diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index fbe077451..b00215a0d 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -154,7 +154,7 @@ jobs: # unmarked tests: adapter, execution engine, etc. - id: others display-name: Others - pytest-mark: 'not store and not agentops and not weave and not llmproxy and not utils' + pytest-mark: 'not store and not agentops and not weave and not llmproxy and not utils and not gepa' env: - python-version: '3.10' setup-script: 'legacy' diff --git a/agentlightning/algorithm/__init__.py b/agentlightning/algorithm/__init__.py index 720f132cc..c06ea64c9 100644 --- a/agentlightning/algorithm/__init__.py +++ b/agentlightning/algorithm/__init__.py @@ -10,9 +10,10 @@ if TYPE_CHECKING: from .apo import APO as APOType + from .gepa import GEPA as GEPAType from .verl import VERL as VERLType -__all__ = ["Algorithm", "algo", "FastAlgorithm", "Baseline", "APO", "VERL"] +__all__ = ["Algorithm", "algo", "FastAlgorithm", "Baseline", "APO", "VERL", "GEPA"] # Shortcuts for usages like algo.APO(...) @@ -27,3 +28,9 @@ def VERL(*args: Any, **kwargs: Any) -> VERLType: from .verl import VERL as VERLImplementation return VERLImplementation(*args, **kwargs) + + +def GEPA(*args: Any, **kwargs: Any) -> GEPAType: + from .gepa import GEPA as GEPAImplementation + + return GEPAImplementation(*args, **kwargs) diff --git a/agentlightning/algorithm/gepa/__init__.py b/agentlightning/algorithm/gepa/__init__.py new file mode 100644 index 000000000..49eccc4fa --- /dev/null +++ b/agentlightning/algorithm/gepa/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft. All rights reserved. + +from .config import GEPAConfig +from .interface import GEPA + +__all__ = ["GEPA", "GEPAConfig"] diff --git a/agentlightning/algorithm/gepa/callbacks.py b/agentlightning/algorithm/gepa/callbacks.py new file mode 100644 index 000000000..31d49d5a5 --- /dev/null +++ b/agentlightning/algorithm/gepa/callbacks.py @@ -0,0 +1,139 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Logging callback for GEPA optimization within Agent Lightning.""" + +from __future__ import annotations + +import logging +from typing import Any, Dict + +logger = logging.getLogger(__name__) + + +class LightningGEPACallback: + """GEPA callback that logs optimization progress via the standard logger. + + Implements the hooks from GEPA's ``GEPACallback`` protocol, forwarding + each event to Python's logging system. All methods are intentionally + defensive—exceptions are caught and logged so that a callback failure + never aborts the optimization run. + + When W&B experiment tracking is enabled (via ``use_wandb``), this callback + also logs per-iteration progress metrics (pareto front aggregate, best + candidate score, number of candidates) so that optimization progress is + visible across every iteration—not only when a new candidate is accepted. + """ + + def __init__(self, *, use_wandb: bool = False) -> None: + self._use_wandb = use_wandb + + def on_optimization_start(self, event: Dict[str, Any]) -> None: + logger.info("GEPA optimization started") + + def on_optimization_end(self, event: Dict[str, Any]) -> None: + logger.info("GEPA optimization ended") + + def on_iteration_start(self, event: Dict[str, Any]) -> None: + iteration = event.get("iteration", "?") + logger.info("GEPA iteration %s started", iteration) + + def on_iteration_end(self, event: Dict[str, Any]) -> None: + iteration = event.get("iteration", "?") + logger.info("GEPA iteration %s ended", iteration) + if self._use_wandb: + self._log_iteration_metrics(event) + + def on_candidate_selected(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA candidate selected: %s", event) + + def on_candidate_accepted(self, event: Dict[str, Any]) -> None: + logger.info("GEPA candidate accepted: %s", event) + + def on_candidate_rejected(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA candidate rejected: %s", event) + + def on_evaluation_start(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA evaluation started") + + def on_evaluation_end(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA evaluation ended") + + def on_evaluation_skipped(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA evaluation skipped: %s", event) + + def on_valset_evaluated(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA valset evaluated: %s", event) + + def on_reflective_dataset_built(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA reflective dataset built") + + def on_proposal_start(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA proposal started") + + def on_proposal_end(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA proposal ended") + + def on_merge_attempted(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA merge attempted") + + def on_merge_accepted(self, event: Dict[str, Any]) -> None: + logger.info("GEPA merge accepted") + + def on_merge_rejected(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA merge rejected") + + def on_pareto_front_updated(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA Pareto front updated: %s", event) + + def on_state_saved(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA state saved") + + def on_budget_updated(self, event: Dict[str, Any]) -> None: + remaining = event.get("metric_calls_remaining", "?") + logger.info("GEPA budget updated, remaining: %s", remaining) + + def on_error(self, event: Dict[str, Any]) -> None: + error = event.get("exception", "unknown") + logger.error("GEPA error: %s", error) + + def on_minibatch_sampled(self, event: Dict[str, Any]) -> None: + logger.debug("GEPA minibatch sampled: %s", event) + + def _log_iteration_metrics(self, event: Dict[str, Any]) -> None: + """Log per-iteration progress metrics to W&B. + + Extracts running totals from ``GEPAState`` so that metrics like + pareto front aggregate and best candidate score are logged at every + iteration, not only when a new candidate is accepted. + """ + try: + import wandb + + if wandb.run is None: + return + + state = event.get("state") + if state is None: + return + + iteration: int = event.get("iteration", 0) + + # Pareto front aggregate (average of per-example best scores) + pareto_scores = list(state.pareto_front_valset.values()) + pareto_agg = sum(pareto_scores) / len(pareto_scores) if pareto_scores else 0.0 + + # Best single-candidate aggregate score on valset + val_scores = state.program_full_scores_val_set + best_candidate_score = max(val_scores) if val_scores else 0.0 + + metrics: Dict[str, Any] = { + "agl/pareto_front_agg": pareto_agg, + "agl/best_candidate_valset_score": best_candidate_score, + "agl/num_candidates": len(state.program_candidates), + "agl/total_metric_calls": state.total_num_evals, + "agl/proposal_accepted": event.get("proposal_accepted", False), + } + + wandb.log(metrics, step=iteration) + except Exception as e: + logger.debug("Failed to log iteration metrics to W&B: %s", e) diff --git a/agentlightning/algorithm/gepa/config.py b/agentlightning/algorithm/gepa/config.py new file mode 100644 index 000000000..3298b1ca4 --- /dev/null +++ b/agentlightning/algorithm/gepa/config.py @@ -0,0 +1,84 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Configuration for the GEPA algorithm integration.""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any, Dict, Literal, Optional + + +@dataclass +class GEPAConfig: + """Configuration for the GEPA evolutionary prompt optimizer. + + Groups GEPA optimizer knobs, rollout execution parameters, and + reflection LLM configuration into a single configuration object. + + Args: + max_metric_calls: Maximum number of evaluation calls before stopping. + Maps to GEPA's budget constraint. ``None`` means unlimited. + candidate_selection_strategy: Strategy for selecting candidates. + ``"pareto"`` tracks per-instance best, ``"current_best"`` keeps the + single highest scorer, ``"epsilon_greedy"`` adds exploration. + frontier_type: Pareto frontier granularity. ``"instance"`` tracks + per-example bests; ``"aggregate"`` tracks only the overall best. + reflection_minibatch_size: Number of examples sampled for each + reflection step. ``None`` lets GEPA choose automatically. + module_selector: Strategy for choosing which component to update. + ``"round_robin"`` cycles through components sequentially. + seed: Random seed for reproducibility. + use_merge: Whether to attempt merging top candidates. + max_merge_invocations: Maximum number of merge attempts when + ``use_merge`` is enabled. + skip_perfect_score: Skip further evaluation when a candidate + achieves perfect score. + perfect_score: Value considered a perfect score. + display_progress_bar: Show a progress bar during optimization. + raise_on_exception: Raise exceptions from GEPA instead of logging. + rollout_batch_timeout: Maximum seconds to wait for a rollout batch + to complete before scoring incomplete rollouts as 0.0. + rollout_poll_interval: Seconds between polling the store for + rollout completion. + reflection_model: Model identifier passed to ``litellm.completion`` + for GEPA's reflection/proposal calls. When ``None``, GEPA uses + its own default model. + """ + + # GEPA optimizer knobs + max_metric_calls: Optional[int] = None + candidate_selection_strategy: Literal["pareto", "current_best", "epsilon_greedy"] = "pareto" + frontier_type: Literal["instance", "aggregate"] = "instance" + reflection_minibatch_size: Optional[int] = None + module_selector: str = "round_robin" + seed: int = 0 + use_merge: bool = False + max_merge_invocations: int = 5 + skip_perfect_score: bool = True + perfect_score: float = 1.0 + display_progress_bar: bool = False + raise_on_exception: bool = True + + # Rollout execution parameters + rollout_batch_timeout: float = 3600.0 + rollout_poll_interval: float = 2.0 + + # Reflection LLM configuration + reflection_model: Optional[str] = None + reflection_model_kwargs: Dict[str, Any] = field( + default_factory=lambda: {} + ) # pyright: ignore[reportUnknownVariableType] + """Extra keyword arguments forwarded to ``litellm.completion`` for reflection + calls. Useful for passing authentication parameters such as + ``azure_ad_token_provider`` for Azure Entra ID.""" + + # Experiment tracking + use_wandb: bool = False + """Enable Weights & Biases experiment tracking during optimization.""" + wandb_api_key: Optional[str] = None + """W&B API key. When ``None``, relies on ``WANDB_API_KEY`` env var or prior ``wandb login``.""" + wandb_init_kwargs: Dict[str, Any] = field(default_factory=lambda: {}) # pyright: ignore[reportUnknownVariableType] + """Extra keyword arguments forwarded to ``wandb.init()`` (e.g. ``project``, ``name``, ``tags``).""" + + # Extra kwargs forwarded to gepa.optimize() + extra_kwargs: Dict[str, Any] = field(default_factory=lambda: {}) # pyright: ignore[reportUnknownVariableType] diff --git a/agentlightning/algorithm/gepa/interface.py b/agentlightning/algorithm/gepa/interface.py new file mode 100644 index 000000000..232085d78 --- /dev/null +++ b/agentlightning/algorithm/gepa/interface.py @@ -0,0 +1,247 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""GEPA algorithm integration for Agent Lightning.""" + +from __future__ import annotations + +import asyncio +import logging +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from agentlightning.algorithm.base import Algorithm +from agentlightning.algorithm.utils import with_llm_proxy, with_store +from agentlightning.types import Dataset, NamedResources, PromptTemplate + +from .callbacks import LightningGEPACallback +from .config import GEPAConfig +from .resources import PromptResourceCodec +from .rollout_adapter import LightningGEPAAdapter + +if TYPE_CHECKING: + from agentlightning.llm_proxy import LLMProxy + from agentlightning.store.base import LightningStore + +logger = logging.getLogger(__name__) + + +class GEPA(Algorithm): + """Evolutionary prompt optimizer powered by GEPA's reflective mutation engine. + + GEPA evaluates prompt candidates via batched rollouts, builds reflective + datasets from execution traces, and proposes improved candidates through + LLM-driven reflection while tracking a Pareto frontier of per-example + performance. + + The algorithm runs GEPA's synchronous ``optimize()`` in a worker thread + via ``asyncio.to_thread``, while the `LightningGEPAAdapter` calls back + to the async event loop for store operations. + + Args: + config: GEPA optimizer and rollout configuration. Uses sensible + defaults when ``None``. + resource_name: Explicit resource key in ``initial_resources`` to + optimize. When ``None``, auto-detects the first `PromptTemplate`. + """ + + def __init__( + self, + config: GEPAConfig | None = None, + *, + resource_name: str | None = None, + ) -> None: + self._config = config or GEPAConfig() + self._resource_name = resource_name + self._result: Any = None + self._best_prompt: Optional[PromptTemplate] = None + self._codec: Optional[PromptResourceCodec] = None + + @property + def config(self) -> GEPAConfig: + """The active configuration.""" + return self._config + + @property + def result(self) -> Optional[Any]: + """The full ``GEPAResult`` after ``run()`` completes, or ``None``.""" + return self._result + + def get_best_prompt(self) -> PromptTemplate: + """Retrieve the best prompt discovered during optimization. + + Returns: + The `PromptTemplate` corresponding to GEPA's best candidate. + + Raises: + ValueError: If ``run()`` has not been called yet. + """ + if self._best_prompt is None: + raise ValueError("No best prompt available — run() has not been called yet") + return self._best_prompt + + @with_llm_proxy() + @with_store + async def run( + self, + store: LightningStore, + llm_proxy: Optional[LLMProxy], + train_dataset: Optional[Dataset[Any]] = None, + val_dataset: Optional[Dataset[Any]] = None, + ) -> None: + """Execute GEPA optimization over the configured prompt resource. + + The method: + + 1. Discovers the optimizable `PromptTemplate` from ``initial_resources``. + 2. Materializes AGL datasets into plain lists. + 3. Builds the adapter, callback, and reflection LM. + 4. Runs ``gepa.optimize()`` in a worker thread. + 5. Publishes the best candidate back to the store. + + Args: + store: Injected by ``@with_store`` — callers should not provide this. + llm_proxy: Injected by ``@with_llm_proxy()`` — callers should not provide this. + train_dataset: Training examples for gradient computation. Required. + val_dataset: Validation examples for candidate evaluation. Optional + for GEPA (when ``None``, GEPA skips validation-based selection). + + Raises: + ValueError: If ``train_dataset`` is ``None`` or ``initial_resources`` + are not set. + """ + if train_dataset is None: + raise ValueError("train_dataset is required for GEPA algorithm") + + initial_resources = self.get_initial_resources() + if initial_resources is None: + raise ValueError( + "initial_resources are not set for GEPA algorithm. " + "Use algorithm.set_initial_resources() or set it in Trainer()" + ) + + import gepa as gepa_lib + + # Build codec and seed candidate + codec, seed_candidate = PromptResourceCodec.from_initial_resources( + initial_resources, resource_name=self._resource_name + ) + self._codec = codec + + # Materialize datasets into plain lists + train_list: List[Any] = [train_dataset[i] for i in range(len(train_dataset))] + val_list: Optional[List[Any]] = ( + [val_dataset[i] for i in range(len(val_dataset))] if val_dataset is not None else None + ) + + # Prepare adapter + loop = asyncio.get_running_loop() + version_counter: List[int] = [0] + adapter = LightningGEPAAdapter( + store=store, + codec=codec, + loop=loop, + version_counter=version_counter, + rollout_batch_timeout=self._config.rollout_batch_timeout, + rollout_poll_interval=self._config.rollout_poll_interval, + ) + + # Prepare reflection LM + reflection_lm = self._build_reflection_lm() + + # Prepare callback + callback = LightningGEPACallback(use_wandb=self._config.use_wandb) + + # Build kwargs for gepa.optimize + optimize_kwargs: Dict[str, Any] = { + "seed_candidate": seed_candidate, + "trainset": train_list, + "adapter": adapter, + "callbacks": [callback], + "candidate_selection_strategy": self._config.candidate_selection_strategy, + "frontier_type": self._config.frontier_type, + "module_selector": self._config.module_selector, + "seed": self._config.seed, + "use_merge": self._config.use_merge, + "max_merge_invocations": self._config.max_merge_invocations, + "skip_perfect_score": self._config.skip_perfect_score, + "perfect_score": self._config.perfect_score, + "display_progress_bar": self._config.display_progress_bar, + "raise_on_exception": self._config.raise_on_exception, + } + if val_list is not None: + optimize_kwargs["valset"] = val_list + if self._config.max_metric_calls is not None: + optimize_kwargs["max_metric_calls"] = self._config.max_metric_calls + if self._config.reflection_minibatch_size is not None: + optimize_kwargs["reflection_minibatch_size"] = self._config.reflection_minibatch_size + if reflection_lm is not None: + optimize_kwargs["reflection_lm"] = reflection_lm + if self._config.use_wandb: + optimize_kwargs["use_wandb"] = True + if self._config.wandb_api_key is not None: + optimize_kwargs["wandb_api_key"] = self._config.wandb_api_key + if self._config.wandb_init_kwargs: + optimize_kwargs["wandb_init_kwargs"] = self._config.wandb_init_kwargs + + # Merge any extra kwargs + optimize_kwargs.update(self._config.extra_kwargs) + + logger.info( + "Starting GEPA optimization: seed_candidate keys=%s, train_size=%d, val_size=%s, max_metric_calls=%s", + list(seed_candidate.keys()), + len(train_list), + len(val_list) if val_list else "N/A", + self._config.max_metric_calls, + ) + + # Run synchronous GEPA in a worker thread + gepa_result: Any = await asyncio.to_thread(gepa_lib.optimize, **optimize_kwargs) # type: ignore[reportUnknownMemberType] + + self._result = gepa_result + + # Extract and publish the best candidate + best_candidate: Any = gepa_result.best_candidate + if isinstance(best_candidate, str): + # Single-component case: GEPA returns a plain string + best_candidate = {codec.resource_name: best_candidate} + + best_resources: NamedResources = codec.candidate_to_resources(best_candidate) + self._best_prompt = best_resources[codec.resource_name] # type: ignore[assignment] + + # Publish final best resources to the store + best_version = f"gepa-best-v{version_counter[0]}" + version_counter[0] += 1 + await store.update_resources(best_version, best_resources) + + best_score: float = 0.0 + if gepa_result.val_aggregate_scores: + best_score = float(gepa_result.val_aggregate_scores[gepa_result.best_idx]) + logger.info( + "GEPA optimization complete. Best candidate score: %.4f, published as resources_id=%s", + best_score, + best_version, + ) + + def _build_reflection_lm(self) -> Optional[Any]: + """Build a reflection language model callable for GEPA if configured. + + Returns a callable matching GEPA's ``LanguageModel`` protocol:: + + def __call__(self, prompt: str | list[dict[str, Any]]) -> str + """ + if self._config.reflection_model is None: + return None + + model_name = self._config.reflection_model + extra_kwargs = dict(self._config.reflection_model_kwargs) + + def _litellm_completion(prompt: Any) -> str: + import litellm # type: ignore[reportUnknownMemberType] + + if isinstance(prompt, str): + messages = [{"role": "user", "content": prompt}] + else: + messages = prompt + response: Any = litellm.completion(model=model_name, messages=messages, **extra_kwargs) # type: ignore[reportUnknownMemberType] + return str(response.choices[0].message.content) + + return _litellm_completion diff --git a/agentlightning/algorithm/gepa/resources.py b/agentlightning/algorithm/gepa/resources.py new file mode 100644 index 000000000..f198071e5 --- /dev/null +++ b/agentlightning/algorithm/gepa/resources.py @@ -0,0 +1,118 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Codec for converting between Agent Lightning resources and GEPA candidates.""" + +from __future__ import annotations + +import logging +from typing import Dict, Tuple + +from agentlightning.types import NamedResources, PromptTemplate + +logger = logging.getLogger(__name__) + + +class PromptResourceCodec: + """Bidirectional converter between AGL `NamedResources` and GEPA candidate dicts. + + GEPA candidates are plain ``dict[str, str]`` mappings from component names + to text content. Agent Lightning resources are ``NamedResources`` mappings + from resource names to typed ``Resource`` objects. This codec bridges the + two representations for single-`PromptTemplate` optimization, preserving + the template engine across round-trips. + + Args: + resource_name: The key in `NamedResources` that holds the optimizable + `PromptTemplate`. + engine: The template engine to use when reconstructing `PromptTemplate` + objects from GEPA candidate text (e.g. ``"f-string"``, ``"jinja"``). + """ + + def __init__(self, resource_name: str, engine: str) -> None: + self.resource_name = resource_name + self.engine = engine + + def resources_to_candidate(self, resources: NamedResources) -> Dict[str, str]: + """Extract optimizable text from Agent Lightning resources. + + Args: + resources: Named resource mapping containing the target `PromptTemplate`. + + Returns: + A GEPA candidate dict mapping the resource name to the template text. + + Raises: + KeyError: If the configured resource name is not present. + TypeError: If the resource is not a `PromptTemplate`. + """ + resource = resources[self.resource_name] + if not isinstance(resource, PromptTemplate): + raise TypeError(f"Resource '{self.resource_name}' is not a PromptTemplate, got {type(resource).__name__}") + return {self.resource_name: resource.template} + + def candidate_to_resources(self, candidate: Dict[str, str]) -> NamedResources: + """Rebuild Agent Lightning resources from a GEPA candidate dict. + + Args: + candidate: GEPA candidate mapping component names to text. + + Returns: + A `NamedResources` mapping containing the reconstructed `PromptTemplate`. + + Raises: + KeyError: If the configured resource name is not in the candidate. + """ + template_text = candidate[self.resource_name] + return {self.resource_name: PromptTemplate(template=template_text, engine=self.engine)} # type: ignore[arg-type] + + @classmethod + def from_initial_resources( + cls, + resources: NamedResources, + resource_name: str | None = None, + ) -> Tuple[PromptResourceCodec, Dict[str, str]]: + """Auto-detect the first `PromptTemplate` and build a codec plus seed candidate. + + When ``resource_name`` is provided, that specific resource is used. + Otherwise, the first `PromptTemplate` found in ``resources`` is selected. + + Args: + resources: Initial named resources from the trainer. + resource_name: Explicit resource key to use. When ``None``, + auto-detects the first `PromptTemplate`. + + Returns: + A tuple of ``(codec, seed_candidate)`` ready for GEPA's ``optimize()``. + + Raises: + ValueError: If no `PromptTemplate` is found in resources, or the + specified ``resource_name`` does not exist or is not a + `PromptTemplate`. + """ + if resource_name is not None: + if resource_name not in resources: + raise ValueError(f"Resource '{resource_name}' not found in initial_resources") + resource = resources[resource_name] + if not isinstance(resource, PromptTemplate): + raise ValueError(f"Resource '{resource_name}' is not a PromptTemplate, got {type(resource).__name__}") + detected_name = resource_name + detected_resource = resource + else: + detected_name = None + detected_resource = None + for name, resource in resources.items(): + if isinstance(resource, PromptTemplate): + detected_name = name + detected_resource = resource + break + if detected_name is None or detected_resource is None: + raise ValueError("No PromptTemplate found in initial_resources") + + logger.info( + "Using resource '%s' (engine=%s) for GEPA optimization", + detected_name, + detected_resource.engine, + ) + codec = cls(resource_name=detected_name, engine=detected_resource.engine) + seed_candidate = {detected_name: detected_resource.template} + return codec, seed_candidate diff --git a/agentlightning/algorithm/gepa/rollout_adapter.py b/agentlightning/algorithm/gepa/rollout_adapter.py new file mode 100644 index 000000000..17e3190e0 --- /dev/null +++ b/agentlightning/algorithm/gepa/rollout_adapter.py @@ -0,0 +1,290 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Bridge between GEPA's synchronous adapter protocol and AGL's async store.""" + +from __future__ import annotations + +import asyncio +import logging +import time +from typing import Any, Dict, List, Mapping, Optional, Sequence + +from agentlightning.adapter.messages import TraceToMessages +from agentlightning.emitter.reward import find_final_reward +from agentlightning.store.base import LightningStore +from agentlightning.types import NamedResources, Rollout, Span + +from .resources import PromptResourceCodec +from .trajectories import RolloutOutput, RolloutTrajectory + +logger = logging.getLogger(__name__) + + +class LightningGEPAAdapter: + """GEPA adapter that evaluates candidates by running AGL store-backed rollouts. + + This class structurally satisfies the ``GEPAAdapter[Any, RolloutTrajectory, + RolloutOutput]`` protocol without importing ``gepa`` at module level. It + bridges GEPA's synchronous call convention with the asynchronous + `LightningStore` API using `asyncio.run_coroutine_threadsafe`. + + Args: + store: The `LightningStore` instance for enqueuing rollouts and querying spans. + codec: Codec for converting between GEPA candidates and AGL resources. + loop: The event loop on which async store methods should execute. + version_counter: Shared mutable ``list[int]`` for generating monotonic + resource version identifiers across calls. + rollout_batch_timeout: Maximum seconds to wait for rollout completion. + rollout_poll_interval: Seconds between completion polls. + """ + + propose_new_texts = None + """Let GEPA use its default LLM-based proposer.""" + + def __init__( + self, + store: LightningStore, + codec: PromptResourceCodec, + loop: asyncio.AbstractEventLoop, + version_counter: List[int], + rollout_batch_timeout: float = 3600.0, + rollout_poll_interval: float = 2.0, + ) -> None: + self._store = store + self._codec = codec + self._loop = loop + self._version_counter = version_counter + self._rollout_batch_timeout = rollout_batch_timeout + self._rollout_poll_interval = rollout_poll_interval + + # ------------------------------------------------------------------ + # Sync → async bridge + # ------------------------------------------------------------------ + + def _run_async(self, coro: Any) -> Any: + """Execute an async coroutine on the algorithm's event loop from a worker thread.""" + future = asyncio.run_coroutine_threadsafe(coro, self._loop) + return future.result() + + # ------------------------------------------------------------------ + # Version management + # ------------------------------------------------------------------ + + def _next_version(self) -> str: + version = self._version_counter[0] + self._version_counter[0] = version + 1 + return f"gepa-v{version}" + + # ------------------------------------------------------------------ + # GEPAAdapter.evaluate + # ------------------------------------------------------------------ + + def evaluate( + self, + batch: List[Any], + candidate: Dict[str, str], + capture_traces: bool = False, + ) -> Any: + """Evaluate a candidate prompt on a batch of inputs via AGL rollouts. + + Publishes the candidate as AGL resources, enqueues one rollout per + batch item, polls for completion, and returns an ``EvaluationBatch`` + with per-example scores, outputs, and optional trajectories. + + Per-example failures are scored as 0.0 — the run is never crashed. + + Args: + batch: List of task inputs. + candidate: GEPA candidate mapping component names to text. + capture_traces: Whether to populate trajectory data for reflection. + + Returns: + A ``gepa.EvaluationBatch`` with outputs, scores, and trajectories. + """ + from gepa import EvaluationBatch # type: ignore[reportAttributeAccessIssue] + + resources: NamedResources = self._codec.candidate_to_resources(candidate) + version_id = self._next_version() + resource_update = self._run_async(self._store.update_resources(version_id, resources)) + + # Enqueue rollouts + rollout_ids: List[str] = [] + for task_input in batch: + rollout = self._run_async( + self._store.enqueue_rollout( + input=task_input, + mode="train", + resources_id=resource_update.resources_id, + ) + ) + rollout_ids.append(rollout.rollout_id) + + # Poll for completion + finished = self._wait_for_rollouts(rollout_ids) + + # Build per-example results + outputs: List[RolloutOutput] = [] + scores: List[float] = [] + trajectories: Optional[List[RolloutTrajectory]] = [] if capture_traces else None + + finished_by_id: Dict[str, Rollout] = {r.rollout_id: r for r in finished} + + for idx, rollout_id in enumerate(rollout_ids): + rollout = finished_by_id.get(rollout_id) + if rollout is None: + # Timed out — score as 0.0 + logger.warning("Rollout %s did not complete within timeout, scoring 0.0", rollout_id) + outputs.append(RolloutOutput(rollout_id=rollout_id, status="cancelled", final_reward=None)) + scores.append(0.0) + if trajectories is not None: + trajectories.append( + RolloutTrajectory( + rollout_id=rollout_id, + status="cancelled", + spans=[], + final_reward=None, + input=batch[idx], + ) + ) + continue + + try: + spans: Sequence[Span] = self._run_async( + self._store.query_spans(rollout.rollout_id, attempt_id="latest") + ) + except Exception: + logger.exception("Failed to query spans for rollout %s", rollout.rollout_id) + spans = [] + + reward = find_final_reward(list(spans)) + score = reward if reward is not None else 0.0 + + outputs.append( + RolloutOutput( + rollout_id=rollout.rollout_id, + status=rollout.status, + final_reward=reward, + ) + ) + scores.append(score) + + if trajectories is not None: + messages = self._try_trace_to_messages(list(spans)) + trajectories.append( + RolloutTrajectory( + rollout_id=rollout.rollout_id, + status=rollout.status, + spans=list(spans), + final_reward=reward, + input=batch[idx], + messages=messages, + ) + ) + + return EvaluationBatch( # type: ignore[reportUnknownVariableType] + outputs=outputs, + scores=scores, + trajectories=trajectories, + ) + + # ------------------------------------------------------------------ + # GEPAAdapter.make_reflective_dataset + # ------------------------------------------------------------------ + + def make_reflective_dataset( + self, + candidate: Dict[str, str], + eval_batch: Any, + components_to_update: List[str], + ) -> Mapping[str, Sequence[Mapping[str, Any]]]: + """Build per-component reflective records from evaluation trajectories. + + Each record includes the task input, candidate text, reward, status, + a span summary, and optional reconstructed messages for the component. + + Args: + candidate: The evaluated GEPA candidate. + eval_batch: The ``EvaluationBatch`` returned by ``evaluate()``. + components_to_update: Component names that GEPA wants to refine. + + Returns: + Mapping from component name to a list of reflective dataset records. + """ + result: Dict[str, List[Dict[str, Any]]] = {comp: [] for comp in components_to_update} + + trajectories: Optional[List[RolloutTrajectory]] = eval_batch.trajectories + if trajectories is None: + logger.warning("No trajectories available for reflective dataset; returning empty records") + return result + + for traj in trajectories: + span_summary = self._summarize_spans(traj.spans) + record: Dict[str, Any] = { + "Inputs": repr(traj.input), + "Generated Outputs": span_summary, + "Feedback": f"Reward: {traj.final_reward}, Status: {traj.status}", + } + if traj.messages: + record["Messages"] = traj.messages + + for comp in components_to_update: + comp_record = {**record, "Component": comp, "Component Text": candidate.get(comp, "")} + result[comp].append(comp_record) + + return result + + # ------------------------------------------------------------------ + # Helpers + # ------------------------------------------------------------------ + + def _wait_for_rollouts(self, rollout_ids: List[str]) -> List[Rollout]: + """Poll the store for rollout completion with timeout.""" + deadline = time.time() + self._rollout_batch_timeout + while time.time() < deadline: + finished: List[Rollout] = self._run_async( + self._store.wait_for_rollouts(rollout_ids=rollout_ids, timeout=0.0) + ) + if len(finished) >= len(rollout_ids): + logger.info("All %d rollouts finished within timeout", len(rollout_ids)) + return finished + logger.debug( + "%d / %d rollouts finished, polling again in %.1fs", + len(finished), + len(rollout_ids), + self._rollout_poll_interval, + ) + time.sleep(self._rollout_poll_interval) + + # Deadline passed — return whatever finished + finished = self._run_async(self._store.wait_for_rollouts(rollout_ids=rollout_ids, timeout=0.0)) + logger.warning( + "Rollout batch timed out after %.0fs: %d / %d finished", + self._rollout_batch_timeout, + len(finished), + len(rollout_ids), + ) + return finished + + @staticmethod + def _try_trace_to_messages(spans: List[Span]) -> Optional[List[Any]]: + """Attempt to reconstruct OpenAI messages from spans, returning None on failure.""" + try: + adapter = TraceToMessages() + return adapter.adapt(spans) + except Exception: + logger.debug("TraceToMessages failed, falling back to None", exc_info=True) + return None + + @staticmethod + def _summarize_spans(spans: List[Span]) -> str: + """Build a concise textual summary of span names and rewards.""" + if not spans: + return "(no spans)" + parts: List[str] = [] + for span in spans: + reward = find_final_reward([span]) + if reward is not None: + parts.append(f"{span.name} [reward={reward}]") + else: + parts.append(span.name) + return " -> ".join(parts) diff --git a/agentlightning/algorithm/gepa/trajectories.py b/agentlightning/algorithm/gepa/trajectories.py new file mode 100644 index 000000000..26a303264 --- /dev/null +++ b/agentlightning/algorithm/gepa/trajectories.py @@ -0,0 +1,42 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Rollout result dataclasses used as GEPA's Trajectory and RolloutOutput types.""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional + +from agentlightning.types import RolloutStatus, Span + + +@dataclass +class RolloutTrajectory: + """Execution trace for a single rollout, used as GEPA's ``Trajectory`` type. + + Captures the full span history and metadata needed by + `LightningGEPAAdapter.make_reflective_dataset` to build per-component + evidence records for GEPA's reflection step. + """ + + rollout_id: str + status: RolloutStatus + spans: List[Span] + final_reward: Optional[float] + input: Any + messages: Optional[List[Any]] = None + metadata: Dict[str, Any] = field(default_factory=lambda: {}) # pyright: ignore[reportUnknownVariableType] + + +@dataclass +class RolloutOutput: + """Summarized output for a single rollout, used as GEPA's ``RolloutOutput`` type. + + A lightweight summary carrying only the identifiers and scalar reward. + GEPA forwards these through ``EvaluationBatch.outputs`` but does not + interpret them directly. + """ + + rollout_id: str + status: RolloutStatus + final_reward: Optional[float] diff --git a/examples/README.md b/examples/README.md index 30f16969e..35dffe11d 100644 --- a/examples/README.md +++ b/examples/README.md @@ -11,6 +11,7 @@ Community-contributed examples and recipes are available in the [contrib](../con | [calc_x](./calc_x) | VERL-powered math reasoning agent training that uses AutoGen with an MCP calculator tool. | [![calc_x workflow status](https://github.com/microsoft/agent-lightning/actions/workflows/badge-calc-x.yml/badge.svg)](https://github.com/microsoft/agent-lightning/actions/workflows/examples-calc-x.yml) | | [chartqa](./chartqa) | Vision-language ChartQA agent that reasons over charts with LangGraph and VERL plus multi-step self-refinement. | [![chartqa workflow status](https://github.com/microsoft/agent-lightning/actions/workflows/badge-chartqa.yml/badge.svg)](https://github.com/microsoft/agent-lightning/actions/workflows/examples-chartqa.yml) | | [claude_code](./claude_code) | Claude Code SWE-bench harness that records Agent-lightning traces across Anthropic, vLLM, and OpenAI-compatible backends. | [![claude_code workflow status](https://github.com/microsoft/agent-lightning/actions/workflows/badge-claude-code.yml/badge.svg)](https://github.com/microsoft/agent-lightning/actions/workflows/examples-claude-code.yml) | +| [gepa](./gepa) | GEPA evolutionary prompt optimization on a room-booking agent with Azure OpenAI Entra ID authentication. | — | | [minimal](./minimal) | Bite-sized programs that demonstrate how individual Agent-lightning building blocks behave in isolation. | [![minimal workflow status](https://github.com/microsoft/agent-lightning/actions/workflows/badge-unit.yml/badge.svg)](https://github.com/microsoft/agent-lightning/actions/workflows/badge-unit.yml) | | [rag](./rag) | Retrieval-Augmented Generation pipeline targeting the MuSiQue dataset with Wikipedia retrieval. | [![rag workflow status](https://github.com/microsoft/agent-lightning/actions/workflows/badge-rag.yml/badge.svg)](https://github.com/microsoft/agent-lightning/actions/workflows/examples-rag.yml) | | [spider](./spider) | Text-to-SQL reinforcement learning training on the Spider dataset using LangGraph. | [![spider workflow status](https://github.com/microsoft/agent-lightning/actions/workflows/badge-spider.yml/badge.svg)](https://github.com/microsoft/agent-lightning/actions/workflows/examples-spider.yml) | diff --git a/examples/gepa/.env.example b/examples/gepa/.env.example new file mode 100644 index 000000000..d3437db31 --- /dev/null +++ b/examples/gepa/.env.example @@ -0,0 +1,20 @@ +# LLM provider: azure_entra (default), azure_key, or openai +LLM_PROVIDER=azure_entra + +# --- Azure OpenAI (azure_entra and azure_key) --- +AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/ +AZURE_OPENAI_API_VERSION=2025-04-01-preview +AZURE_OPENAI_DEPLOYMENT=gpt-4.1-nano +AZURE_OPENAI_GRADER_DEPLOYMENT=gpt-4.1-mini + +# Used by litellm for Azure-prefixed model strings +AZURE_API_BASE=https://your-resource.openai.azure.com/ +AZURE_API_VERSION=2025-04-01-preview + +# Only for azure_key provider +AZURE_OPENAI_API_KEY=your-azure-api-key + +# --- OpenAI (openai provider) --- +OPENAI_API_KEY=sk-your-openai-api-key +OPENAI_MODEL=gpt-4.1-nano +OPENAI_GRADER_MODEL=gpt-4.1-mini diff --git a/examples/gepa/README.md b/examples/gepa/README.md new file mode 100644 index 000000000..5d49b81ee --- /dev/null +++ b/examples/gepa/README.md @@ -0,0 +1,117 @@ +# GEPA Example + +This example demonstrates [GEPA](https://github.com/gepa-ai/gepa) (Generalized Evolutionary Prompt Adaptation) prompt optimization on a HotPotQA question-answering agent. It supports Azure OpenAI (Entra ID or API key) and plain OpenAI as backends. + +## Overview + +The HotPotQA agent answers factoid questions from the [HotPotQA](https://hotpotqa.github.io/) dataset (loaded via DSPy). GEPA optimizes the prompt template through evolutionary search with reflective mutations, tracking a Pareto frontier of per-example performance. An optional autoresearch outer loop (`gepa_autoresearch.py`) searches over GEPA hyperparameters themselves. + +## Choosing an LLM Backend + +Set the backend via `LLM_PROVIDER` env var or `--provider` CLI arg: + +| Provider | Auth method | Extra dependency | Key env vars | +|----------|-------------|------------------|--------------| +| `azure_entra` (default) | Entra ID / `az login` | `azure-identity` | `AZURE_OPENAI_ENDPOINT`, `AZURE_OPENAI_DEPLOYMENT` | +| `azure_key` | API key | — | `AZURE_OPENAI_ENDPOINT`, `AZURE_OPENAI_API_KEY` | +| `openai` | OpenAI API key | — | `OPENAI_API_KEY` | + +## Requirements + +1. Install Agent-Lightning with GEPA extras: + + ```bash + uv sync --extra gepa + ``` + +2. **(Azure Entra ID only)** Install the Azure Identity library and authenticate: + + ```bash + uv pip install azure-identity + az login + ``` + +3. Copy `.env.example` to `.env` and fill in the variables for your chosen provider. Export them before running: + + ```bash + export $(grep -v '^#' .env | xargs) + ``` + +## Included Files + +| File | Description | +|------|-------------| +| `llm_backend.py` | Centralized LLM provider logic (Azure Entra ID, Azure API key, OpenAI) | +| `hotpotqa_agent.py` | HotPotQA question-answering agent with multi-backend LLM support | +| `hotpotqa_gepa.py` | GEPA training script that optimizes the agent's prompt template | +| `gepa_autoresearch.py` | Autoresearch-style outer loop that searches over GEPA hyperparameters | +| `.env.example` | Template for environment variables (all three providers documented) | + +## Smoke Test + +Run a single task to verify authentication and connectivity: + +```bash +# Default (azure_entra): +python hotpotqa_agent.py + +# Or with a specific provider: +LLM_PROVIDER=openai python hotpotqa_agent.py +``` + +## Full Training + +Run GEPA optimization over the training dataset: + +```bash +python hotpotqa_gepa.py + +# Or with a specific provider: +python hotpotqa_gepa.py --provider openai +``` + +GEPA will evaluate prompt candidates, build reflective datasets from execution traces, and propose improved prompts. The best prompt is logged at the end of training. + +## Autoresearch (Hyperparameter Search) + +Run the autoresearch outer loop to search over GEPA configurations: + +```bash +python gepa_autoresearch.py + +# With LLM-guided proposals: +python gepa_autoresearch.py --proposal-policy llm --iterations 8 + +# With random search: +python gepa_autoresearch.py --proposal-policy random --iterations 16 +``` + +Each trial runs a full GEPA experiment and evaluates the learned prompt on a held-out split. The best configuration and prompt are saved to the run directory. + +## W&B Experiment Tracking + +Track GEPA optimization progress (scores, budget, candidate acceptance) in [Weights & Biases](https://wandb.ai/): + +```bash +pip install wandb +python hotpotqa_gepa.py --wandb +``` + +Customize the project and run name: + +```bash +python hotpotqa_gepa.py --wandb --wandb-project my-project --wandb-name run-1 +``` + +Set `WANDB_API_KEY` or run `wandb login` before launching. + +## GEPA vs APO + +| Aspect | APO (`examples/apo/`) | GEPA (`examples/gepa/`) | +|--------|----------------------|------------------------| +| Algorithm | Beam search with gradient-based proposals | Evolutionary search with reflective mutations | +| Selection strategy | Single best | Pareto frontier (per-example tracking) | +| Adapter | Requires `TraceToMessages` adapter | Built-in adapter (no explicit adapter needed) | +| Reflection model | Uses `AsyncOpenAI` client directly | Uses `litellm.completion()` via model string | +| Auth in this example | OpenAI API key | Multi-backend (Azure Entra ID, Azure key, OpenAI) | +| Budget control | Beam rounds / beam width | `max_metric_calls` budget | diff --git a/examples/gepa/gepa_autoresearch.py b/examples/gepa/gepa_autoresearch.py new file mode 100644 index 000000000..60264ab22 --- /dev/null +++ b/examples/gepa/gepa_autoresearch.py @@ -0,0 +1,749 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Autonomous outer-loop search for GEPA hyperparameters. + +This script keeps GEPA as the inner prompt optimizer and adds an autoresearch- +style outer loop that: + +1. proposes a GEPA configuration, +2. runs a bounded GEPA experiment, +3. evaluates the learned prompt on a held-out split, +4. accepts or discards the proposal, +5. logs the full experiment history. + +The default design mirrors the key autoresearch idea of *fixed-budget* +experiments: every trial uses the same GEPA metric-call budget unless you opt in +with ``--tune-max-metric-calls``. +""" + +from __future__ import annotations + +import argparse +import json +import logging +import os +import random +from dataclasses import asdict, dataclass +from pathlib import Path +from typing import Any, Literal, Sequence, cast + +from hotpotqa_gepa import GEPAExperimentConfig, GEPAExperimentResult, run_gepa_experiment +from llm_backend import VALID_PROVIDERS, LLMProvider, get_provider, make_client +from openai.types.chat import ChatCompletionMessageParam +from pydantic import BaseModel, Field + +logger = logging.getLogger(__name__) + +CandidateSelectionStrategy = Literal["pareto", "current_best"] +ProposalPolicy = Literal["llm", "mutation", "random"] + +ConfigKey = tuple[int, int, str, int, int] + +DEFAULT_REFLECTION_MINIBATCH_OPTIONS = [2, 4, 8, 12, 16] +DEFAULT_N_RUNNER_OPTIONS = [1, 2, 4, 8] +DEFAULT_SEED_OPTIONS = [0, 1, 2, 7, 42, 123] +DEFAULT_MAX_METRIC_CALL_OPTIONS = [64, 96, 128, 160, 192, 256] +DEFAULT_CANDIDATE_STRATEGIES: list[CandidateSelectionStrategy] = ["pareto", "current_best"] + + +@dataclass(frozen=True) +class SearchSpace: + reflection_minibatch_options: list[int] + candidate_selection_options: list[CandidateSelectionStrategy] + n_runner_options: list[int] + seed_options: list[int] + max_metric_call_options: list[int] + + +@dataclass(frozen=True) +class TrialSummary: + iteration: int + accepted: bool + proposal_source: str + hypothesis: str + config: GEPAExperimentConfig + result: GEPAExperimentResult | None = None + error: str | None = None + + def to_dict(self) -> dict[str, Any]: + payload: dict[str, Any] = { + "iteration": self.iteration, + "accepted": self.accepted, + "proposal_source": self.proposal_source, + "hypothesis": self.hypothesis, + "config": asdict(self.config), + "error": self.error, + } + if self.result is not None: + payload["result"] = self.result.to_dict() + return payload + + +class ProposalConfig(BaseModel): + max_metric_calls: int = Field(description="GEPA metric-call budget for the trial") + reflection_minibatch_size: int = Field(description="GEPA reflection minibatch size") + candidate_selection_strategy: CandidateSelectionStrategy = Field(description="GEPA candidate selection strategy") + seed: int = Field(description="Random seed for GEPA") + n_runners: int = Field(description="Number of Agent-Lightning runners to use") + + +class ResearchProposal(BaseModel): + hypothesis: str = Field(description="Why this config might outperform the current best") + config: ProposalConfig + + +def parse_int_list(raw: str) -> list[int]: + values = [part.strip() for part in raw.split(",") if part.strip()] + if not values: + raise ValueError("Expected a comma-separated list of integers") + return [int(value) for value in values] + + +def ensure_log_dir(path: Path) -> None: + path.mkdir(parents=True, exist_ok=True) + + +def history_file_path(run_dir: Path) -> Path: + return run_dir / "history.jsonl" + + +def best_result_path(run_dir: Path) -> Path: + return run_dir / "best_result.json" + + +def load_history(run_dir: Path) -> list[dict[str, Any]]: + path = history_file_path(run_dir) + if not path.exists(): + return [] + entries: list[dict[str, Any]] = [] + with path.open("r", encoding="utf-8") as handle: + for line in handle: + line = line.strip() + if not line: + continue + entries.append(json.loads(line)) + return entries + + +def append_history_entry(run_dir: Path, entry: TrialSummary) -> None: + path = history_file_path(run_dir) + with path.open("a", encoding="utf-8") as handle: + handle.write(json.dumps(entry.to_dict()) + "\n") + + +def write_best_result(run_dir: Path, entry: TrialSummary) -> None: + path = best_result_path(run_dir) + with path.open("w", encoding="utf-8") as handle: + json.dump(entry.to_dict(), handle, indent=2) + + +def trial_dir(run_dir: Path, iteration: int) -> Path: + return run_dir / f"trial_{iteration:03d}" + + +def config_to_key(config: GEPAExperimentConfig) -> ConfigKey: + return ( + config.max_metric_calls, + config.reflection_minibatch_size, + config.candidate_selection_strategy, + config.seed, + config.n_runners, + ) + + +def history_best_entry(history: Sequence[dict[str, Any]]) -> dict[str, Any] | None: + scored_entries = [ + entry for entry in history if entry.get("result") and entry["result"].get("holdout_mean_reward") is not None + ] + if not scored_entries: + return None + return max(scored_entries, key=lambda entry: float(entry["result"]["holdout_mean_reward"])) + + +def collect_seen_configs(history: Sequence[dict[str, Any]]) -> set[ConfigKey]: + seen: set[ConfigKey] = set() + for entry in history: + config = entry.get("config") + if not config: + continue + seen.add( + ( + int(config["max_metric_calls"]), + int(config["reflection_minibatch_size"]), + str(config["candidate_selection_strategy"]), + int(config["seed"]), + int(config["n_runners"]), + ) + ) + return seen + + +def get_research_model_name(provider: LLMProvider) -> str: + if provider in ("azure_entra", "azure_key"): + return ( + os.environ.get("AZURE_OPENAI_RESEARCH_DEPLOYMENT") + or os.environ.get("AZURE_OPENAI_GRADER_DEPLOYMENT") + or os.environ.get("AZURE_OPENAI_DEPLOYMENT", "gpt-4.1-mini") + ) + return ( + os.environ.get("OPENAI_RESEARCH_MODEL") + or os.environ.get("OPENAI_GRADER_MODEL") + or os.environ.get("OPENAI_MODEL", "gpt-4.1-mini") + ) + + +def baseline_config(args: argparse.Namespace) -> GEPAExperimentConfig: + return GEPAExperimentConfig( + max_metric_calls=args.max_metric_calls, + reflection_minibatch_size=args.initial_reflection_minibatch_size, + candidate_selection_strategy=cast(CandidateSelectionStrategy, args.initial_candidate_selection_strategy), + seed=args.initial_seed, + n_runners=args.initial_n_runners, + ) + + +def make_search_space(args: argparse.Namespace) -> SearchSpace: + max_metric_call_options = [args.max_metric_calls] + if args.tune_max_metric_calls: + max_metric_call_options = sorted(set(parse_int_list(args.max_metric_call_options))) + + return SearchSpace( + reflection_minibatch_options=sorted(set(parse_int_list(args.reflection_minibatch_options))), + candidate_selection_options=[ + cast(CandidateSelectionStrategy, value.strip()) + for value in args.candidate_selection_options.split(",") + if value.strip() + ], + n_runner_options=sorted(set(parse_int_list(args.n_runner_options))), + seed_options=sorted(set(parse_int_list(args.seed_options))), + max_metric_call_options=max_metric_call_options, + ) + + +def clamp_to_allowed(value: int, allowed: Sequence[int]) -> int: + return min(allowed, key=lambda option: (abs(option - value), option)) + + +def canonicalize_config(config: GEPAExperimentConfig, space: SearchSpace, dev_size: int) -> GEPAExperimentConfig: + minibatch = clamp_to_allowed(config.reflection_minibatch_size, space.reflection_minibatch_options) + minibatch = max(1, min(minibatch, max(1, dev_size))) + + strategy: CandidateSelectionStrategy + if config.candidate_selection_strategy in space.candidate_selection_options: + strategy = config.candidate_selection_strategy + else: + strategy = space.candidate_selection_options[0] + + return GEPAExperimentConfig( + max_metric_calls=clamp_to_allowed(config.max_metric_calls, space.max_metric_call_options), + reflection_minibatch_size=minibatch, + candidate_selection_strategy=strategy, + seed=clamp_to_allowed(config.seed, space.seed_options), + n_runners=clamp_to_allowed(config.n_runners, space.n_runner_options), + display_progress_bar=True, + ) + + +def entry_to_short_line(entry: dict[str, Any]) -> str: + result: dict[str, Any] = entry.get("result") or {} + config: dict[str, Any] = entry.get("config") or {} + holdout = result.get("holdout_mean_reward") + inner = result.get("inner_val_mean_reward") + return ( + f"iter={entry.get('iteration')} accepted={entry.get('accepted')} " + f"holdout={holdout} inner={inner} " + f"cfg={{budget={config.get('max_metric_calls')}, minibatch={config.get('reflection_minibatch_size')}, " + f"strategy={config.get('candidate_selection_strategy')}, seed={config.get('seed')}, runners={config.get('n_runners')}}} " + f"hypothesis={entry.get('hypothesis')}" + ) + + +def summarize_history(history: Sequence[dict[str, Any]], limit: int = 8) -> str: + if not history: + return "No prior trials yet." + + scored = [entry for entry in history if entry.get("result")] + if not scored: + recent = history[-limit:] + return "\n".join(entry_to_short_line(entry) for entry in recent) + + best = sorted(scored, key=lambda entry: float(entry["result"]["holdout_mean_reward"]), reverse=True)[ + : max(1, limit // 2) + ] + recent = history[-max(1, limit - len(best)) :] + + lines = ["Top trials:"] + lines.extend(f"- {entry_to_short_line(entry)}" for entry in best) + lines.append("Recent trials:") + lines.extend(f"- {entry_to_short_line(entry)}" for entry in recent) + return "\n".join(lines) + + +def mutate_config( + base: GEPAExperimentConfig, rng: random.Random, space: SearchSpace, dev_size: int +) -> GEPAExperimentConfig: + proposal = { + "max_metric_calls": base.max_metric_calls, + "reflection_minibatch_size": base.reflection_minibatch_size, + "candidate_selection_strategy": base.candidate_selection_strategy, + "seed": base.seed, + "n_runners": base.n_runners, + } + knobs = ["reflection_minibatch_size", "candidate_selection_strategy", "seed", "n_runners"] + if len(space.max_metric_call_options) > 1: + knobs.append("max_metric_calls") + + num_mutations = rng.randint(1, min(3, len(knobs))) + for knob in rng.sample(knobs, k=num_mutations): + if knob == "reflection_minibatch_size": + proposal[knob] = rng.choice(space.reflection_minibatch_options) + elif knob == "candidate_selection_strategy": + proposal[knob] = rng.choice(space.candidate_selection_options) + elif knob == "seed": + proposal[knob] = rng.choice(space.seed_options) + elif knob == "n_runners": + proposal[knob] = rng.choice(space.n_runner_options) + elif knob == "max_metric_calls": + proposal[knob] = rng.choice(space.max_metric_call_options) + + return canonicalize_config( + GEPAExperimentConfig( + max_metric_calls=int(proposal["max_metric_calls"]), + reflection_minibatch_size=int(proposal["reflection_minibatch_size"]), + candidate_selection_strategy=cast(CandidateSelectionStrategy, proposal["candidate_selection_strategy"]), + seed=int(proposal["seed"]), + n_runners=int(proposal["n_runners"]), + ), + space, + dev_size, + ) + + +def random_config(rng: random.Random, space: SearchSpace, dev_size: int) -> GEPAExperimentConfig: + return canonicalize_config( + GEPAExperimentConfig( + max_metric_calls=rng.choice(space.max_metric_call_options), + reflection_minibatch_size=rng.choice(space.reflection_minibatch_options), + candidate_selection_strategy=rng.choice(space.candidate_selection_options), + seed=rng.choice(space.seed_options), + n_runners=rng.choice(space.n_runner_options), + ), + space, + dev_size, + ) + + +def propose_with_llm( + *, + provider: LLMProvider, + research_model: str, + current_best: GEPAExperimentConfig, + history: Sequence[dict[str, Any]], + space: SearchSpace, + dev_size: int, +) -> ResearchProposal: + client = make_client(provider) + + search_space_description = { + "max_metric_calls": space.max_metric_call_options, + "reflection_minibatch_size": [ + value for value in space.reflection_minibatch_options if value <= max(1, dev_size) + ], + "candidate_selection_strategy": list(space.candidate_selection_options), + "seed": space.seed_options, + "n_runners": space.n_runner_options, + } + + best_score = None + best_entry = history_best_entry(history) + if best_entry and best_entry.get("result"): + best_score = best_entry["result"].get("holdout_mean_reward") + + messages: list[ChatCompletionMessageParam] = [ + { + "role": "system", + "content": ( + "You are an autonomous research engineer optimizing GEPA hyperparameters. " + "Your job is to suggest a single next experiment that is plausible, non-duplicate, and " + "cost-aware. Prefer small, targeted changes from the current best unless history suggests a broader pivot. " + "Always stay inside the allowed search space." + ), + }, + { + "role": "user", + "content": ( + "Current best GEPA configuration:\n" + f"{json.dumps(asdict(current_best), indent=2)}\n\n" + f"Current best holdout score: {best_score}\n\n" + "Allowed search space:\n" + f"{json.dumps(search_space_description, indent=2)}\n\n" + "Experiment history:\n" + f"{summarize_history(history)}\n\n" + "Return exactly one promising next configuration and a short hypothesis." + ), + }, + ] + + response = client.chat.completions.parse( + model=research_model, + messages=messages, + response_format=ResearchProposal, + temperature=1.0, + ) + parsed = response.choices[0].message.parsed + if parsed is None: + raise ValueError("Research proposal parsing returned None") + + config = canonicalize_config( + GEPAExperimentConfig( + max_metric_calls=parsed.config.max_metric_calls, + reflection_minibatch_size=parsed.config.reflection_minibatch_size, + candidate_selection_strategy=parsed.config.candidate_selection_strategy, + seed=parsed.config.seed, + n_runners=parsed.config.n_runners, + ), + space, + dev_size, + ) + return ResearchProposal( + hypothesis=parsed.hypothesis, + config=ProposalConfig( + max_metric_calls=config.max_metric_calls, + reflection_minibatch_size=config.reflection_minibatch_size, + candidate_selection_strategy=config.candidate_selection_strategy, + seed=config.seed, + n_runners=config.n_runners, + ), + ) + + +def unique_candidate( + candidate: GEPAExperimentConfig, + *, + current_best: GEPAExperimentConfig, + seen: set[ConfigKey], + rng: random.Random, + space: SearchSpace, + dev_size: int, +) -> GEPAExperimentConfig: + if config_to_key(candidate) not in seen: + return candidate + + for _ in range(20): + mutated = mutate_config(current_best, rng, space, dev_size) + if config_to_key(mutated) not in seen: + return mutated + + for _ in range(20): + random_candidate = random_config(rng, space, dev_size) + if config_to_key(random_candidate) not in seen: + return random_candidate + + return candidate + + +def build_trial_summary( + *, + iteration: int, + accepted: bool, + proposal_source: str, + hypothesis: str, + config: GEPAExperimentConfig, + result: GEPAExperimentResult | None = None, + error: str | None = None, +) -> TrialSummary: + return TrialSummary( + iteration=iteration, + accepted=accepted, + proposal_source=proposal_source, + hypothesis=hypothesis, + config=config, + result=result, + error=error, + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Autoresearch-like outer loop for GEPA hyperparameter tuning") + parser.add_argument( + "--provider", + type=str, + choices=VALID_PROVIDERS, + default=None, + help="LLM backend (default: LLM_PROVIDER env var or azure_entra)", + ) + parser.add_argument("--iterations", type=int, default=8, help="Number of outer-loop trials to run") + parser.add_argument( + "--proposal-policy", + type=str, + choices=("llm", "mutation", "random"), + default="llm", + help="How to propose the next GEPA configuration", + ) + parser.add_argument( + "--run-dir", + type=str, + default="gepa_autoresearch_runs/default", + help="Directory for experiment history and per-trial artifacts", + ) + parser.add_argument("--resume", action="store_true", help="Resume from an existing history file if present") + parser.add_argument("--min-improvement", type=float, default=1e-6, help="Minimum holdout improvement to accept") + parser.add_argument("--train-size", type=int, default=32, help="Inner-loop HotPotQA train size") + parser.add_argument("--dev-size", type=int, default=32, help="Inner-loop HotPotQA dev size") + parser.add_argument( + "--holdout-size", + type=int, + default=32, + help="Held-out evaluation size for the outer-loop objective", + ) + parser.add_argument( + "--holdout-split", + type=str, + choices=("dev", "test"), + default="test", + help="Held-out split used by the outer loop. 'test' uses the official HotPotQA validation split.", + ) + parser.add_argument("--train-seed", type=int, default=1, help="HotPotQA train seed") + parser.add_argument("--eval-seed", type=int, default=2023, help="HotPotQA eval seed for inner dev") + parser.add_argument( + "--holdout-eval-seed", + type=int, + default=2024, + help="HotPotQA eval seed for held-out scoring", + ) + parser.add_argument( + "--max-metric-calls", + type=int, + default=128, + help="Default fixed GEPA metric-call budget per trial", + ) + parser.add_argument( + "--tune-max-metric-calls", + action="store_true", + help="Allow the outer loop to search over GEPA metric-call budgets as well", + ) + parser.add_argument( + "--max-metric-call-options", + type=str, + default=",".join(str(value) for value in DEFAULT_MAX_METRIC_CALL_OPTIONS), + help="Comma-separated GEPA metric-call budgets to consider when --tune-max-metric-calls is enabled", + ) + parser.add_argument( + "--reflection-minibatch-options", + type=str, + default=",".join(str(value) for value in DEFAULT_REFLECTION_MINIBATCH_OPTIONS), + help="Comma-separated reflection minibatch sizes to search", + ) + parser.add_argument( + "--candidate-selection-options", + type=str, + default=",".join(DEFAULT_CANDIDATE_STRATEGIES), + help="Comma-separated candidate selection strategies to search", + ) + parser.add_argument( + "--n-runner-options", + type=str, + default=",".join(str(value) for value in DEFAULT_N_RUNNER_OPTIONS), + help="Comma-separated runner counts to search", + ) + parser.add_argument( + "--seed-options", + type=str, + default=",".join(str(value) for value in DEFAULT_SEED_OPTIONS), + help="Comma-separated GEPA seeds to search", + ) + parser.add_argument( + "--initial-reflection-minibatch-size", + type=int, + default=8, + help="Baseline reflection minibatch size before the search starts", + ) + parser.add_argument( + "--initial-candidate-selection-strategy", + type=str, + choices=("pareto", "current_best"), + default="pareto", + help="Baseline candidate selection strategy before the search starts", + ) + parser.add_argument("--initial-seed", type=int, default=42, help="Baseline GEPA seed") + parser.add_argument("--initial-n-runners", type=int, default=8, help="Baseline runner count") + parser.add_argument("--search-seed", type=int, default=0, help="Random seed for outer-loop mutation/random search") + parser.add_argument( + "--research-model", + type=str, + default=None, + help="Optional override for the LLM used to propose the next GEPA configuration", + ) + parser.add_argument("--wandb", action="store_true", help="Enable W&B for the inner GEPA experiments") + parser.add_argument("--wandb-project", type=str, default="gepa-hotpotqa-autoresearch", help="W&B project name") + parser.add_argument("--wandb-name-prefix", type=str, default="autoresearch", help="W&B run name prefix") + return parser.parse_args() + + +def main() -> None: + args = parse_args() + provider = get_provider(args.provider) + os.environ["LLM_PROVIDER"] = provider + + run_dir = Path(args.run_dir) + ensure_log_dir(run_dir) + + history: list[dict[str, Any]] + if args.resume: + history = load_history(run_dir) + else: + history = [] + history_path = history_file_path(run_dir) + if history_path.exists(): + history_path.unlink() + best_path = best_result_path(run_dir) + if best_path.exists(): + best_path.unlink() + + rng = random.Random(args.search_seed) + space = make_search_space(args) + research_model = args.research_model or get_research_model_name(provider) + + best_history_entry = history_best_entry(history) + current_best_config = baseline_config(args) + current_best_score = float("-inf") + if best_history_entry is not None: + config = best_history_entry["config"] + current_best_config = GEPAExperimentConfig( + max_metric_calls=int(config["max_metric_calls"]), + reflection_minibatch_size=int(config["reflection_minibatch_size"]), + candidate_selection_strategy=cast(CandidateSelectionStrategy, config["candidate_selection_strategy"]), + seed=int(config["seed"]), + n_runners=int(config["n_runners"]), + ) + current_best_score = float(best_history_entry["result"]["holdout_mean_reward"]) + + seen = collect_seen_configs(history) + start_iteration = len(history) + + logger.info("Using provider=%s research_model=%s", provider, research_model) + logger.info("Resuming with %d existing trials", start_iteration) + + for iteration in range(start_iteration, start_iteration + args.iterations): + if iteration == 0 and not history: + candidate = baseline_config(args) + hypothesis = "Baseline GEPA configuration." + proposal_source = "baseline" + else: + candidate = None + hypothesis = "" + proposal_source = cast(ProposalPolicy, args.proposal_policy) + + if args.proposal_policy == "llm": + try: + proposal = propose_with_llm( + provider=provider, + research_model=research_model, + current_best=current_best_config, + history=history, + space=space, + dev_size=args.dev_size, + ) + candidate = canonicalize_config( + GEPAExperimentConfig( + max_metric_calls=proposal.config.max_metric_calls, + reflection_minibatch_size=proposal.config.reflection_minibatch_size, + candidate_selection_strategy=proposal.config.candidate_selection_strategy, + seed=proposal.config.seed, + n_runners=proposal.config.n_runners, + ), + space, + args.dev_size, + ) + hypothesis = proposal.hypothesis + except Exception as exc: # pragma: no cover - defensive fallback for runtime environments + logger.warning("LLM proposal failed, falling back to mutation: %s", exc) + candidate = mutate_config(current_best_config, rng, space, args.dev_size) + hypothesis = f"Fallback mutation after LLM proposal failure: {exc}" + proposal_source = "mutation_fallback" + elif args.proposal_policy == "mutation": + candidate = mutate_config(current_best_config, rng, space, args.dev_size) + hypothesis = "Mutate a few GEPA knobs around the current best configuration." + else: + candidate = random_config(rng, space, args.dev_size) + hypothesis = "Randomly sample a fresh GEPA configuration from the search space." + + assert candidate is not None + candidate = unique_candidate( + candidate, + current_best=current_best_config, + seen=seen, + rng=rng, + space=space, + dev_size=args.dev_size, + ) + + candidate = canonicalize_config(candidate, space, args.dev_size) + candidate_key = config_to_key(candidate) + seen.add(candidate_key) + + this_trial_dir = trial_dir(run_dir, iteration) + ensure_log_dir(this_trial_dir) + + try: + result = run_gepa_experiment( + provider=provider, + experiment_config=candidate, + train_size=args.train_size, + dev_size=args.dev_size, + train_seed=args.train_seed, + eval_seed=args.eval_seed, + holdout_size=args.holdout_size, + holdout_eval_seed=args.holdout_eval_seed, + holdout_split=cast(Literal["dev", "test"], args.holdout_split), + artifact_dir=this_trial_dir, + use_wandb=args.wandb, + wandb_project=args.wandb_project, + wandb_name=f"{args.wandb_name_prefix}-{iteration:03d}", + ) + accepted = result.holdout_mean_reward > current_best_score + args.min_improvement + trial = build_trial_summary( + iteration=iteration, + accepted=accepted, + proposal_source=proposal_source, + hypothesis=hypothesis, + config=candidate, + result=result, + ) + append_history_entry(run_dir, trial) + history.append(trial.to_dict()) + + print( + f"[trial {iteration:03d}] holdout={result.holdout_mean_reward:.4f} " + f"inner={result.inner_val_mean_reward:.4f} accepted={accepted} " + f"config={asdict(candidate)}" + ) + + if accepted: + current_best_config = candidate + current_best_score = result.holdout_mean_reward + write_best_result(run_dir, trial) + except Exception as exc: # pragma: no cover - defensive runtime logging + trial = build_trial_summary( + iteration=iteration, + accepted=False, + proposal_source=proposal_source, + hypothesis=hypothesis, + config=candidate, + error=str(exc), + ) + append_history_entry(run_dir, trial) + history.append(trial.to_dict()) + print(f"[trial {iteration:03d}] failed error={exc} config={asdict(candidate)}") + + final_best = history_best_entry(history) + print("\nBest result:") + if final_best is None: + print("No successful trials were recorded.") + return + + print(json.dumps(final_best, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/examples/gepa/hotpotqa_agent.py b/examples/gepa/hotpotqa_agent.py new file mode 100644 index 000000000..0321c950c --- /dev/null +++ b/examples/gepa/hotpotqa_agent.py @@ -0,0 +1,268 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""HotPotQA agent with multi-backend LLM support. + +Supports Azure OpenAI (Entra ID or API key) and plain OpenAI. The backend is +selected via the ``LLM_PROVIDER`` env var — see ``llm_backend.py`` for details. + +Usage:: + + # Azure Entra ID (default): + az login + python hotpotqa_agent.py + + # Azure API key: + LLM_PROVIDER=azure_key python hotpotqa_agent.py + + # OpenAI: + LLM_PROVIDER=openai python hotpotqa_agent.py +""" + +from __future__ import annotations + +import asyncio +import re +import string +from collections import Counter +from typing import Any, List, Optional, Tuple, TypedDict, cast + +from dspy.datasets import HotPotQA # type: ignore[import-untyped] +from llm_backend import LLMProvider, get_model_names, get_provider, make_client +from openai.types.chat import ChatCompletionMessageParam +from rich.console import Console + +from agentlightning.adapter import TraceToMessages +from agentlightning.litagent import rollout +from agentlightning.reward import find_final_reward +from agentlightning.runner import LitAgentRunner +from agentlightning.store import InMemoryLightningStore +from agentlightning.tracer.agentops import AgentOpsTracer +from agentlightning.types import Dataset, PromptTemplate + +console = Console() + + +class HotPotQATask(TypedDict): + id: str + question: str + answer: str + + +ZERO_METRIC_ANSWERS = {"yes", "no", "noanswer"} + + +def prompt_template_baseline() -> PromptTemplate: + return PromptTemplate( + template=( + "Answer the following question with a short factoid answer. " + "Return only the answer, with no explanation.\n\n" + "Question: {question}\n" + "Answer:" + ), + engine="f-string", + ) + + +def _normalize_answer(text: str) -> str: + def remove_articles(value: str) -> str: + return re.sub(r"\b(a|an|the)\b", " ", value) + + def white_space_fix(value: str) -> str: + return " ".join(value.split()) + + def remove_punc(value: str) -> str: + return "".join(ch for ch in value if ch not in string.punctuation) + + return white_space_fix(remove_articles(remove_punc(text.lower()))) + + +def hotpot_exact_match(prediction: Optional[str], gold: str) -> float: + if not prediction: + return 0.0 + return float(_normalize_answer(prediction) == _normalize_answer(gold)) + + +def hotpot_f1(prediction: Optional[str], gold: str) -> float: + if not prediction: + return 0.0 + + normalized_prediction = _normalize_answer(prediction) + normalized_gold = _normalize_answer(gold) + + if normalized_prediction in ZERO_METRIC_ANSWERS or normalized_gold in ZERO_METRIC_ANSWERS: + return float(normalized_prediction == normalized_gold) + + pred_tokens = normalized_prediction.split() + gold_tokens = normalized_gold.split() + common = Counter(pred_tokens) & Counter(gold_tokens) + num_same = sum(common.values()) + if num_same == 0: + return 0.0 + + precision = num_same / len(pred_tokens) + recall = num_same / len(gold_tokens) + return 2 * precision * recall / (precision + recall) + + +def hotpotqa_grader(final_message: Optional[str], expected_answer: str) -> float: + em = hotpot_exact_match(final_message, expected_answer) + f1 = hotpot_f1(final_message, expected_answer) + + console.print("[bold yellow]=== Gold Answer ===[/bold yellow]") + console.print(expected_answer) + console.print("[bold yellow]=== Prediction ===[/bold yellow]") + console.print(final_message) + console.print("[bold yellow]=== HotPotQA EM / F1 ===[/bold yellow]") + console.print({"exact_match": em, "f1": f1}) + return f1 + + +def _resolve_runtime_backend() -> Tuple[LLMProvider, str]: + """Resolve the provider and model at call time. + + This allows scripts such as ``hotpotqa_gepa.py`` and + ``gepa_autoresearch.py`` to set ``LLM_PROVIDER`` after import time and still + have the rollout use the intended backend. + """ + + provider = get_provider() + deployment_name, _ = get_model_names(provider) + return provider, deployment_name + + +@rollout +def hotpotqa_agent(task: HotPotQATask, prompt_template: PromptTemplate) -> float: + """Answer a HotPotQA question. + + The prompt template is optimized by Agent-lightning's GEPA algorithm. + """ + + provider, model = _resolve_runtime_backend() + client = make_client(provider) + + user_message = prompt_template.format(question=task["question"]) + + messages: List[ChatCompletionMessageParam] = [ + { + "role": "system", + "content": ( + "You answer factoid questions. Give the shortest correct answer you can. " + "Do not add explanations unless the prompt explicitly asks for them." + ), + }, + {"role": "user", "content": user_message}, + ] + + console.print("[bold yellow]=== Question ===[/bold yellow]") + console.print(task["question"]) + console.print("[bold yellow]=== User Message ===[/bold yellow]") + console.print(user_message) + + resp = client.chat.completions.create( + model=model, + messages=messages, + temperature=0.0, + ) + + final_message = resp.choices[0].message.content + + console.print("[bold yellow]=== Final Assistant Message ===[/bold yellow]") + console.print(final_message) + + return hotpotqa_grader(final_message, task["answer"]) + + +def _example_to_task(split_name: str, index: int, example: Any) -> HotPotQATask: + return { + "id": str(example.get("id", f"{split_name}-{index}")), + "question": str(example["question"]), + "answer": str(example["answer"]), + } + + +def load_hotpotqa_splits( + train_size: int = 32, + dev_size: int = 32, + test_size: int = 0, + train_seed: int = 1, + eval_seed: int = 2023, +) -> Tuple[Dataset[HotPotQATask], Dataset[HotPotQATask], Dataset[HotPotQATask]]: + dataset: Any = HotPotQA( # type: ignore[reportUnknownVariableType] + train_seed=train_seed, + train_size=train_size, + eval_seed=eval_seed, + dev_size=dev_size, + test_size=test_size, + ) + + train_tasks: List[HotPotQATask] = [ + _example_to_task("train", idx, example) for idx, example in enumerate(dataset.train) # type: ignore[reportUnknownArgumentType, reportUnknownMemberType, reportUnknownVariableType] + ] + dev_tasks: List[HotPotQATask] = [ + _example_to_task("dev", idx, example) for idx, example in enumerate(dataset.dev) # type: ignore[reportUnknownArgumentType, reportUnknownMemberType, reportUnknownVariableType] + ] + test_tasks: List[HotPotQATask] = [ + _example_to_task("test", idx, example) for idx, example in enumerate(dataset.test) # type: ignore[reportUnknownArgumentType, reportUnknownMemberType, reportUnknownVariableType] + ] + return ( + cast(Dataset[HotPotQATask], train_tasks), + cast(Dataset[HotPotQATask], dev_tasks), + cast(Dataset[HotPotQATask], test_tasks), + ) + + +def load_hotpotqa_tasks( + train_size: int = 32, + dev_size: int = 32, + train_seed: int = 1, + eval_seed: int = 2023, +) -> Tuple[Dataset[HotPotQATask], Dataset[HotPotQATask]]: + train_tasks, dev_tasks, _ = load_hotpotqa_splits( + train_size=train_size, + dev_size=dev_size, + test_size=0, + train_seed=train_seed, + eval_seed=eval_seed, + ) + return train_tasks, dev_tasks + + +def load_hotpotqa_holdout_tasks( + test_size: int = 32, + eval_seed: int = 2023, + train_seed: int = 1, +) -> Dataset[HotPotQATask]: + """Load a held-out slice from the official HotPotQA validation split.""" + + _, _, test_tasks = load_hotpotqa_splits( + train_size=1, + dev_size=1, + test_size=test_size, + train_seed=train_seed, + eval_seed=eval_seed, + ) + return test_tasks + + +async def debug_hotpotqa_agent(limit: int = 1) -> None: + runner = LitAgentRunner[HotPotQATask](AgentOpsTracer()) + store = InMemoryLightningStore() + prompt_template = prompt_template_baseline() + dataset, _ = load_hotpotqa_tasks(train_size=limit, dev_size=1) + tasks = cast(List[HotPotQATask], dataset) + with runner.run_context(agent=hotpotqa_agent, store=store): + for task in tasks[:limit]: + console.print("[bold green]=== Task ===[/bold green]", task, sep="\n") + rollout = await runner.step(task, resources={"prompt_template": prompt_template}) + spans = await store.query_spans(rollout.rollout_id) + adapter = TraceToMessages() + messages = adapter.adapt(spans) + for message_idx, message in enumerate(messages): + console.print(f"[bold purple]=== Postmortem Message #{message_idx} ===[/bold purple]") + console.print(message) + reward = find_final_reward(spans) + console.print("[bold purple]=== Postmortem Reward ===[/bold purple]", reward, sep="\n") + + +if __name__ == "__main__": + asyncio.run(debug_hotpotqa_agent()) diff --git a/examples/gepa/hotpotqa_gepa.py b/examples/gepa/hotpotqa_gepa.py new file mode 100644 index 000000000..c44645a4a --- /dev/null +++ b/examples/gepa/hotpotqa_gepa.py @@ -0,0 +1,393 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""GEPA prompt optimization for the HotPotQA agent. + +Supports Azure OpenAI (Entra ID or API key) and plain OpenAI. The backend is +selected via ``--provider`` or the ``LLM_PROVIDER`` env var — see +``llm_backend.py`` for details. + +Usage:: + + # Azure Entra ID (default): + az login + python hotpotqa_gepa.py + + # Azure API key: + python hotpotqa_gepa.py --provider azure_key + + # OpenAI: + python hotpotqa_gepa.py --provider openai + + # With W&B experiment tracking: + python hotpotqa_gepa.py --wandb --wandb-project gepa-hotpotqa +""" + +from __future__ import annotations + +import argparse +import asyncio +import json +import logging +import os +import time +from dataclasses import asdict, dataclass +from pathlib import Path +from typing import Any, Literal, Sequence, Tuple, cast + +from hotpotqa_agent import ( + HotPotQATask, + hotpotqa_agent, + load_hotpotqa_holdout_tasks, + load_hotpotqa_tasks, + prompt_template_baseline, +) +from llm_backend import VALID_PROVIDERS, LLMProvider, build_reflection_config, get_provider + +from agentlightning import Trainer, setup_logging +from agentlightning.algorithm.gepa import GEPA, GEPAConfig +from agentlightning.reward import find_final_reward +from agentlightning.runner import LitAgentRunner +from agentlightning.store import InMemoryLightningStore +from agentlightning.tracer.agentops import AgentOpsTracer +from agentlightning.types import Dataset, PromptTemplate + +logger = logging.getLogger(__name__) + +CandidateSelectionStrategy = Literal["pareto", "current_best"] + + +@dataclass(frozen=True) +class GEPAExperimentConfig: + max_metric_calls: int = 250 + reflection_minibatch_size: int = 8 + candidate_selection_strategy: CandidateSelectionStrategy = "pareto" + seed: int = 42 + n_runners: int = 8 + display_progress_bar: bool = True + + +@dataclass(frozen=True) +class PromptEvaluationResult: + mean_reward: float + rewards: list[float] + + @property + def num_examples(self) -> int: + return len(self.rewards) + + +@dataclass(frozen=True) +class GEPAExperimentResult: + config: GEPAExperimentConfig + best_prompt_template: str + train_size: int + dev_size: int + holdout_size: int + train_seed: int + eval_seed: int + holdout_eval_seed: int + holdout_split: str + inner_val_mean_reward: float + holdout_mean_reward: float + runtime_seconds: float + artifact_dir: str | None = None + + def to_dict(self) -> dict[str, Any]: + return asdict(self) + + +def ensure_setup_logging() -> None: + if getattr(ensure_setup_logging, "_configured", False): + return + setup_logging() + setattr(ensure_setup_logging, "_configured", True) + + +def load_train_val_dataset( + train_size: int, + dev_size: int, + train_seed: int, + eval_seed: int, +) -> Tuple[Dataset[HotPotQATask], Dataset[HotPotQATask]]: + dataset_train, dataset_val = load_hotpotqa_tasks( + train_size=train_size, + dev_size=dev_size, + train_seed=train_seed, + eval_seed=eval_seed, + ) + return dataset_train, dataset_val + + +def setup_gepa_logger(file_path: str = "gepa.log") -> None: + """Dump a copy of all the logs produced by the GEPA algorithm to a file.""" + + target = Path(file_path).resolve() + gepa_logger = logging.getLogger("agentlightning.algorithm.gepa") + for handler in gepa_logger.handlers: + if isinstance(handler, logging.FileHandler): + try: + if Path(handler.baseFilename).resolve() == target: + return + except Exception: + continue + + target.parent.mkdir(parents=True, exist_ok=True) + file_handler = logging.FileHandler(target) + file_handler.setLevel(logging.INFO) + formatter = logging.Formatter("%(asctime)s [%(levelname)s] (Process-%(process)d %(name)s) %(message)s") + file_handler.setFormatter(formatter) + gepa_logger.addHandler(file_handler) + + +async def _evaluate_prompt_template_async( + prompt_template: PromptTemplate, + dataset: Sequence[HotPotQATask], +) -> PromptEvaluationResult: + runner = LitAgentRunner[HotPotQATask](AgentOpsTracer()) + store = InMemoryLightningStore() + rewards: list[float] = [] + + with runner.run_context(agent=hotpotqa_agent, store=store): + for task in dataset: + rollout = await runner.step(task, resources={"prompt_template": prompt_template}) + spans = await store.query_spans(rollout.rollout_id) + reward = find_final_reward(spans) + rewards.append(float(reward or 0.0)) + + mean_reward = sum(rewards) / len(rewards) if rewards else 0.0 + return PromptEvaluationResult(mean_reward=mean_reward, rewards=rewards) + + +def evaluate_prompt_template( + prompt_template: PromptTemplate, + dataset: Sequence[HotPotQATask], +) -> PromptEvaluationResult: + return asyncio.run(_evaluate_prompt_template_async(prompt_template, dataset)) + + +def build_gepa( + provider: LLMProvider, + experiment_config: GEPAExperimentConfig, + *, + use_wandb: bool = False, + wandb_project: str = "gepa-hotpotqa", + wandb_name: str | None = None, +) -> GEPA: + reflection_model, reflection_model_kwargs = build_reflection_config(provider) + wandb_init_kwargs: dict[str, str] = {"project": wandb_project} + if wandb_name: + wandb_init_kwargs["name"] = wandb_name + + logger.info("Using LLM provider: %s (reflection model: %s)", provider, reflection_model) + + return GEPA( + config=GEPAConfig( + max_metric_calls=experiment_config.max_metric_calls, + reflection_minibatch_size=experiment_config.reflection_minibatch_size, + candidate_selection_strategy=experiment_config.candidate_selection_strategy, + reflection_model=reflection_model, + reflection_model_kwargs=reflection_model_kwargs, + seed=experiment_config.seed, + display_progress_bar=experiment_config.display_progress_bar, + use_wandb=use_wandb, + wandb_init_kwargs=wandb_init_kwargs, + ), + ) + + +def run_gepa_experiment( + *, + provider: LLMProvider, + experiment_config: GEPAExperimentConfig, + train_size: int = 32, + dev_size: int = 32, + train_seed: int = 1, + eval_seed: int = 2023, + holdout_size: int = 32, + holdout_eval_seed: int | None = None, + holdout_split: Literal["dev", "test"] = "test", + artifact_dir: str | Path | None = None, + use_wandb: bool = False, + wandb_project: str = "gepa-hotpotqa", + wandb_name: str | None = None, +) -> GEPAExperimentResult: + """Run one GEPA configuration and evaluate the resulting prompt. + + This treats GEPA as the inner optimizer. The returned holdout score is the + outer-loop objective used by ``gepa_autoresearch.py``. + """ + + ensure_setup_logging() + os.environ["LLM_PROVIDER"] = provider + + artifact_path: Path | None = None + if artifact_dir is not None: + artifact_path = Path(artifact_dir) + artifact_path.mkdir(parents=True, exist_ok=True) + setup_gepa_logger(str(artifact_path / "gepa.log")) + + start_time = time.perf_counter() + dataset_train, dataset_val = load_train_val_dataset( + train_size=train_size, + dev_size=dev_size, + train_seed=train_seed, + eval_seed=eval_seed, + ) + + if holdout_eval_seed is None: + holdout_eval_seed = eval_seed + + if holdout_size <= 0: + holdout_dataset = dataset_val + elif holdout_split == "test": + holdout_dataset = load_hotpotqa_holdout_tasks( + test_size=holdout_size, + eval_seed=holdout_eval_seed, + train_seed=train_seed, + ) + else: + _, holdout_dataset = load_hotpotqa_tasks( + train_size=1, + dev_size=holdout_size, + train_seed=train_seed, + eval_seed=holdout_eval_seed, + ) + + algo = build_gepa( + provider=provider, + experiment_config=experiment_config, + use_wandb=use_wandb, + wandb_project=wandb_project, + wandb_name=wandb_name, + ) + trainer = Trainer( + algorithm=algo, + n_runners=experiment_config.n_runners, + initial_resources={ + "prompt_template": prompt_template_baseline(), + }, + ) + trainer.fit(agent=hotpotqa_agent, train_dataset=dataset_train, val_dataset=dataset_val) + + best_prompt = algo.get_best_prompt() + + if artifact_path is not None: + (artifact_path / "best_prompt.txt").write_text(best_prompt.template, encoding="utf-8") + + inner_eval = evaluate_prompt_template(best_prompt, [dataset_val[i] for i in range(len(dataset_val))]) + holdout_eval = evaluate_prompt_template(best_prompt, [holdout_dataset[i] for i in range(len(holdout_dataset))]) + runtime_seconds = time.perf_counter() - start_time + + result = GEPAExperimentResult( + config=experiment_config, + best_prompt_template=best_prompt.template, + train_size=train_size, + dev_size=dev_size, + holdout_size=holdout_size, + train_seed=train_seed, + eval_seed=eval_seed, + holdout_eval_seed=holdout_eval_seed, + holdout_split=holdout_split, + inner_val_mean_reward=inner_eval.mean_reward, + holdout_mean_reward=holdout_eval.mean_reward, + runtime_seconds=runtime_seconds, + artifact_dir=str(artifact_path) if artifact_path is not None else None, + ) + + if artifact_path is not None: + (artifact_path / "result.json").write_text(json.dumps(result.to_dict(), indent=2), encoding="utf-8") + + return result + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="GEPA prompt optimization for the HotPotQA agent") + parser.add_argument( + "--provider", + type=str, + choices=VALID_PROVIDERS, + default=None, + help="LLM backend (default: LLM_PROVIDER env var or azure_entra)", + ) + parser.add_argument("--train-size", type=int, default=32, help="Number of HotPotQA training examples") + parser.add_argument("--dev-size", type=int, default=32, help="Number of HotPotQA validation examples") + parser.add_argument( + "--holdout-size", + type=int, + default=32, + help="Number of held-out examples for final evaluation of the learned prompt", + ) + parser.add_argument( + "--holdout-split", + type=str, + choices=("dev", "test"), + default="test", + help="Held-out split used for the final score. 'test' uses the official HotPotQA validation split.", + ) + parser.add_argument("--train-seed", type=int, default=1, help="DSPy HotPotQA training seed") + parser.add_argument("--eval-seed", type=int, default=2023, help="DSPy HotPotQA eval seed") + parser.add_argument( + "--holdout-eval-seed", + type=int, + default=2024, + help="Seed for the held-out evaluation split sampling", + ) + parser.add_argument("--max-metric-calls", type=int, default=250, help="GEPA metric-call budget") + parser.add_argument( + "--reflection-minibatch-size", + type=int, + default=8, + help="GEPA reflection minibatch size", + ) + parser.add_argument( + "--candidate-selection-strategy", + type=str, + choices=("pareto", "current_best"), + default="pareto", + help="GEPA candidate selection strategy", + ) + parser.add_argument("--seed", type=int, default=42, help="GEPA random seed") + parser.add_argument("--n-runners", type=int, default=8, help="Number of Agent-Lightning runners") + parser.add_argument("--artifact-dir", type=str, default=None, help="Directory to write logs and prompt artifacts") + parser.add_argument("--wandb", action="store_true", help="Enable W&B experiment tracking") + parser.add_argument("--wandb-project", type=str, default="gepa-hotpotqa", help="W&B project name") + parser.add_argument("--wandb-name", type=str, default=None, help="W&B run name") + return parser.parse_args() + + +def main() -> None: + args = parse_args() + provider = get_provider(args.provider) + os.environ["LLM_PROVIDER"] = provider + + experiment_config = GEPAExperimentConfig( + max_metric_calls=args.max_metric_calls, + reflection_minibatch_size=args.reflection_minibatch_size, + candidate_selection_strategy=cast(CandidateSelectionStrategy, args.candidate_selection_strategy), + seed=args.seed, + n_runners=args.n_runners, + ) + + result = run_gepa_experiment( + provider=provider, + experiment_config=experiment_config, + train_size=args.train_size, + dev_size=args.dev_size, + train_seed=args.train_seed, + eval_seed=args.eval_seed, + holdout_size=args.holdout_size, + holdout_eval_seed=args.holdout_eval_seed, + holdout_split=cast(Literal["dev", "test"], args.holdout_split), + artifact_dir=args.artifact_dir, + use_wandb=args.wandb, + wandb_project=args.wandb_project, + wandb_name=args.wandb_name, + ) + + print("\nGEPA experiment completed.") + print(json.dumps(result.to_dict(), indent=2)) + print(f"\nBest prompt found:\n{result.best_prompt_template}") + + +if __name__ == "__main__": + main() diff --git a/examples/gepa/llm_backend.py b/examples/gepa/llm_backend.py new file mode 100644 index 000000000..55c3ad109 --- /dev/null +++ b/examples/gepa/llm_backend.py @@ -0,0 +1,108 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Centralized LLM provider logic for the room-booking example. + +Supports three backends selectable via the ``LLM_PROVIDER`` env var or CLI +``--provider`` flag: + +- ``azure_entra`` (default) — Azure OpenAI with Entra ID / ``DefaultAzureCredential`` +- ``azure_key`` — Azure OpenAI with a plain API key +- ``openai`` — OpenAI (or any OpenAI-compatible endpoint) + +Usage:: + + from llm_backend import get_provider, make_client, build_reflection_config, get_model_names + + provider = get_provider() # reads LLM_PROVIDER env var + client = make_client(provider) # returns OpenAI or AzureOpenAI + model, grader = get_model_names(provider) + reflection_model, reflection_kwargs = build_reflection_config(provider) +""" + +import logging +import os +from typing import Any, Dict, Literal, Tuple + +from openai import OpenAI + +logger = logging.getLogger(__name__) + +LLMProvider = Literal["azure_entra", "azure_key", "openai"] +VALID_PROVIDERS: Tuple[str, ...] = ("azure_entra", "azure_key", "openai") + + +def get_provider(override: str | None = None) -> LLMProvider: + """Resolve the LLM provider from an explicit override or ``LLM_PROVIDER`` env var. + + Defaults to ``azure_entra`` when neither is set. + """ + raw = override or os.environ.get("LLM_PROVIDER", "azure_entra") + if raw not in VALID_PROVIDERS: + raise ValueError(f"Unknown LLM_PROVIDER '{raw}'. Choose from: {', '.join(VALID_PROVIDERS)}") + return raw # type: ignore[return-value] + + +def get_model_names(provider: LLMProvider) -> Tuple[str, str]: + """Return ``(model, grader_model)`` deployment/model names for the given provider.""" + if provider in ("azure_entra", "azure_key"): + model = os.environ.get("AZURE_OPENAI_DEPLOYMENT", "gpt-4.1-nano") + grader = os.environ.get("AZURE_OPENAI_GRADER_DEPLOYMENT", "gpt-4.1-mini") + else: + model = os.environ.get("OPENAI_MODEL", "gpt-4.1-nano") + grader = os.environ.get("OPENAI_GRADER_MODEL", "gpt-4.1-mini") + return model, grader + + +def make_client(provider: LLMProvider) -> OpenAI: + """Create an OpenAI-compatible client for the given provider. + + Returns an `OpenAI` instance (the base class of `AzureOpenAI`) so callers + don't need to branch on the provider. + """ + if provider == "azure_entra": + from azure.identity import DefaultAzureCredential, get_bearer_token_provider + from openai import AzureOpenAI + + token_provider = get_bearer_token_provider( + DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + return AzureOpenAI( + azure_ad_token_provider=token_provider, + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], + api_version=os.environ.get("AZURE_OPENAI_API_VERSION", "2025-04-01-preview"), + ) + + if provider == "azure_key": + from openai import AzureOpenAI + + return AzureOpenAI( + api_key=os.environ["AZURE_OPENAI_API_KEY"], + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], + api_version=os.environ.get("AZURE_OPENAI_API_VERSION", "2025-04-01-preview"), + ) + + # provider == "openai" + return OpenAI() + + +def build_reflection_config(provider: LLMProvider) -> Tuple[str, Dict[str, Any]]: + """Return ``(litellm_model_string, extra_kwargs)`` for GEPA reflection calls. + + The returned values are meant to be passed as ``reflection_model`` and + ``reflection_model_kwargs`` to [`GEPAConfig`][agentlightning.algorithm.gepa.GEPAConfig]. + """ + _, grader = get_model_names(provider) + + if provider == "azure_entra": + from azure.identity import DefaultAzureCredential, get_bearer_token_provider + + token_provider = get_bearer_token_provider( + DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + return f"azure/{grader}", {"azure_ad_token_provider": token_provider} + + if provider == "azure_key": + return f"azure/{grader}", {"api_key": os.environ["AZURE_OPENAI_API_KEY"]} + + # provider == "openai" + return grader, {} diff --git a/pyproject.toml b/pyproject.toml index 8ac322264..b0ed2b4ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,6 +38,10 @@ verl = [ "vllm>=0.8.4", # Due to interface change of ExternalZeroMQDistributedExecutor ] +gepa = [ + "gepa", + "dspy", +] weave = [ "weave>=0.52.22", ] @@ -361,6 +365,7 @@ markers = [ "prometheus: tests that require Prometheus", "utils: tests for utility functions", "langchain: tests that require LangChain", + "gepa: tests that require GEPA", ] [tool.black] diff --git a/tests/algorithm/gepa/__init__.py b/tests/algorithm/gepa/__init__.py new file mode 100644 index 000000000..2a50eae89 --- /dev/null +++ b/tests/algorithm/gepa/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Microsoft. All rights reserved. diff --git a/tests/algorithm/gepa/test_callbacks.py b/tests/algorithm/gepa/test_callbacks.py new file mode 100644 index 000000000..0bd18465f --- /dev/null +++ b/tests/algorithm/gepa/test_callbacks.py @@ -0,0 +1,41 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Tests for LightningGEPACallback — all methods log without raising.""" + +from __future__ import annotations + +from agentlightning.algorithm.gepa.callbacks import LightningGEPACallback + + +def test_all_callbacks_log_without_raising(): + cb = LightningGEPACallback() + + # Each callback should be callable with an event dict and not raise + cb.on_optimization_start({}) + cb.on_optimization_end({}) + cb.on_iteration_start({"iteration": 1}) + cb.on_iteration_end({"iteration": 1}) + cb.on_candidate_selected({"candidate": {"prompt": "test"}}) + cb.on_candidate_accepted({"candidate": {"prompt": "test"}, "score": 0.8}) + cb.on_candidate_rejected({"candidate": {"prompt": "test"}, "score": 0.1}) + cb.on_evaluation_start({}) + cb.on_evaluation_end({}) + cb.on_evaluation_skipped({"reason": "cached"}) + cb.on_valset_evaluated({"score": 0.9}) + cb.on_reflective_dataset_built({}) + cb.on_proposal_start({}) + cb.on_proposal_end({}) + cb.on_merge_attempted({}) + cb.on_merge_accepted({}) + cb.on_merge_rejected({}) + cb.on_pareto_front_updated({"frontier_size": 3}) + cb.on_state_saved({}) + cb.on_budget_updated({"metric_calls_remaining": 10}) + cb.on_error({"exception": "test error"}) + + +def test_callbacks_accept_arbitrary_event_keys(): + cb = LightningGEPACallback() + # All methods accept a dict, so any keys should be accepted + cb.on_optimization_start({"extra": "value", "count": 42}) + cb.on_error({"exception": "message", "traceback": "stack", "severity": "high"}) diff --git a/tests/algorithm/gepa/test_config.py b/tests/algorithm/gepa/test_config.py new file mode 100644 index 000000000..b726dbf0a --- /dev/null +++ b/tests/algorithm/gepa/test_config.py @@ -0,0 +1,58 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Tests for GEPAConfig defaults and overrides.""" + +from __future__ import annotations + +from agentlightning.algorithm.gepa.config import GEPAConfig + + +def test_defaults(): + config = GEPAConfig() + assert config.max_metric_calls is None + assert config.candidate_selection_strategy == "pareto" + assert config.frontier_type == "instance" + assert config.reflection_minibatch_size is None + assert config.module_selector == "round_robin" + assert config.seed == 0 + assert config.use_merge is False + assert config.max_merge_invocations == 5 + assert config.skip_perfect_score is True + assert config.perfect_score == 1.0 + assert config.display_progress_bar is False + assert config.raise_on_exception is True + assert config.rollout_batch_timeout == 3600.0 + assert config.rollout_poll_interval == 2.0 + assert config.reflection_model is None + assert config.extra_kwargs == {} + + +def test_overrides(): + config = GEPAConfig( + max_metric_calls=50, + candidate_selection_strategy="current_best", + frontier_type="aggregate", + reflection_minibatch_size=8, + module_selector="all", + seed=42, + use_merge=True, + rollout_batch_timeout=120.0, + rollout_poll_interval=0.5, + reflection_model="gpt-4.1-mini", + ) + assert config.max_metric_calls == 50 + assert config.candidate_selection_strategy == "current_best" + assert config.frontier_type == "aggregate" + assert config.reflection_minibatch_size == 8 + assert config.module_selector == "all" + assert config.seed == 42 + assert config.use_merge is True + assert config.rollout_batch_timeout == 120.0 + assert config.rollout_poll_interval == 0.5 + assert config.reflection_model == "gpt-4.1-mini" + + +def test_extra_kwargs(): + config = GEPAConfig(extra_kwargs={"run_dir": "/tmp/gepa_run", "cache_evaluation": True}) + assert config.extra_kwargs["run_dir"] == "/tmp/gepa_run" + assert config.extra_kwargs["cache_evaluation"] is True diff --git a/tests/algorithm/gepa/test_interface.py b/tests/algorithm/gepa/test_interface.py new file mode 100644 index 000000000..789a12713 --- /dev/null +++ b/tests/algorithm/gepa/test_interface.py @@ -0,0 +1,106 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Tests for the GEPA Algorithm subclass.""" + +from __future__ import annotations + +import pytest + +from agentlightning.algorithm.gepa.config import GEPAConfig +from agentlightning.algorithm.gepa.interface import GEPA +from agentlightning.types import LLM, NamedResources, PromptTemplate + + +class TestInit: + def test_default_config(self): + algo = GEPA() + assert algo.config.max_metric_calls is None + assert algo.config.candidate_selection_strategy == "pareto" + + def test_custom_config(self): + config = GEPAConfig(max_metric_calls=20, seed=42) + algo = GEPA(config=config) + assert algo.config.max_metric_calls == 20 + assert algo.config.seed == 42 + + def test_resource_name_stored(self): + algo = GEPA(resource_name="my_prompt") + assert algo._resource_name == "my_prompt" + + def test_result_is_none_before_run(self): + algo = GEPA() + assert algo.result is None + + +class TestGetBestPrompt: + def test_raises_before_run(self): + algo = GEPA() + with pytest.raises(ValueError, match="run\\(\\) has not been called"): + algo.get_best_prompt() + + def test_returns_prompt_after_setting(self): + algo = GEPA() + prompt = PromptTemplate(template="Best prompt", engine="f-string") + algo._best_prompt = prompt + assert algo.get_best_prompt() is prompt + + +class TestRunValidation: + @pytest.mark.asyncio + async def test_requires_train_dataset(self): + algo = GEPA() + algo.set_store(_make_mock_store()) + algo.set_initial_resources(_make_initial_resources()) + with pytest.raises(ValueError, match="train_dataset is required"): + await algo.run(train_dataset=None) + + @pytest.mark.asyncio + async def test_requires_initial_resources(self): + algo = GEPA() + algo.set_store(_make_mock_store()) + with pytest.raises(ValueError, match="initial_resources are not set"): + await algo.run(train_dataset=["task1", "task2"]) + + def test_codec_auto_detect(self): + """Verify that the codec is properly built from initial resources.""" + from agentlightning.algorithm.gepa.resources import PromptResourceCodec + + resources = _make_initial_resources() + codec, seed = PromptResourceCodec.from_initial_resources(resources) + assert codec.resource_name == "system_prompt" + assert seed == {"system_prompt": "You are helpful."} + + def test_codec_explicit_name(self): + """Verify explicit resource name selection works.""" + from agentlightning.algorithm.gepa.resources import PromptResourceCodec + + resources: NamedResources = { + "llm": LLM(endpoint="http://localhost", model="test"), + "prompt": PromptTemplate(template="Greet {user}", engine="f-string"), # type: ignore[dict-item] + } + codec, seed = PromptResourceCodec.from_initial_resources(resources, resource_name="prompt") + assert codec.resource_name == "prompt" + assert seed == {"prompt": "Greet {user}"} + + +class TestIsAsync: + def test_gepa_run_is_async(self): + algo = GEPA() + assert algo.is_async() is True + + +# ---------- Helpers ---------- + + +def _make_mock_store(): + from unittest.mock import MagicMock + + from agentlightning.store.base import LightningStore + + return MagicMock(spec=LightningStore) + + +def _make_initial_resources() -> NamedResources: + return { + "system_prompt": PromptTemplate(template="You are helpful.", engine="f-string"), # type: ignore[dict-item] + } diff --git a/tests/algorithm/gepa/test_resources.py b/tests/algorithm/gepa/test_resources.py new file mode 100644 index 000000000..b6cff1da8 --- /dev/null +++ b/tests/algorithm/gepa/test_resources.py @@ -0,0 +1,92 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Tests for PromptResourceCodec round-trip, engine preservation, and error cases.""" + +from __future__ import annotations + +import pytest + +from agentlightning.algorithm.gepa.resources import PromptResourceCodec +from agentlightning.types import LLM, NamedResources, PromptTemplate + + +def _make_resources(template: str = "Hello {name}", engine: str = "f-string") -> NamedResources: + return {"system_prompt": PromptTemplate(template=template, engine=engine)} # type: ignore[dict-item] + + +class TestPromptResourceCodec: + def test_round_trip(self): + resources = _make_resources() + codec = PromptResourceCodec(resource_name="system_prompt", engine="f-string") + + candidate = codec.resources_to_candidate(resources) + assert candidate == {"system_prompt": "Hello {name}"} + + rebuilt = codec.candidate_to_resources(candidate) + assert isinstance(rebuilt["system_prompt"], PromptTemplate) + assert rebuilt["system_prompt"].template == "Hello {name}" # type: ignore[union-attr] + assert rebuilt["system_prompt"].engine == "f-string" # type: ignore[union-attr] + + def test_engine_preservation(self): + resources: NamedResources = { + "jinja_prompt": PromptTemplate(template="Hello {{ name }}", engine="jinja"), # type: ignore[dict-item] + } + codec = PromptResourceCodec(resource_name="jinja_prompt", engine="jinja") + candidate = codec.resources_to_candidate(resources) + rebuilt = codec.candidate_to_resources(candidate) + assert rebuilt["jinja_prompt"].engine == "jinja" # type: ignore[union-attr] + + def test_resources_to_candidate_missing_key(self): + codec = PromptResourceCodec(resource_name="missing", engine="f-string") + with pytest.raises(KeyError): + codec.resources_to_candidate({}) + + def test_resources_to_candidate_wrong_type(self): + resources: NamedResources = { + "llm_resource": LLM(endpoint="http://localhost", model="test"), + } + codec = PromptResourceCodec(resource_name="llm_resource", engine="f-string") + with pytest.raises(TypeError, match="not a PromptTemplate"): + codec.resources_to_candidate(resources) + + def test_candidate_to_resources_missing_key(self): + codec = PromptResourceCodec(resource_name="prompt", engine="f-string") + with pytest.raises(KeyError): + codec.candidate_to_resources({"wrong_key": "text"}) + + +class TestFromInitialResources: + def test_auto_detect(self): + resources = _make_resources() + codec, seed = PromptResourceCodec.from_initial_resources(resources) + assert codec.resource_name == "system_prompt" + assert codec.engine == "f-string" + assert seed == {"system_prompt": "Hello {name}"} + + def test_explicit_resource_name(self): + resources: NamedResources = { + "first": LLM(endpoint="http://localhost", model="test"), + "second": PromptTemplate(template="Greet {user}", engine="f-string"), # type: ignore[dict-item] + } + codec, seed = PromptResourceCodec.from_initial_resources(resources, resource_name="second") + assert codec.resource_name == "second" + assert seed == {"second": "Greet {user}"} + + def test_no_prompt_template_raises(self): + resources: NamedResources = { + "llm": LLM(endpoint="http://localhost", model="test"), + } + with pytest.raises(ValueError, match="No PromptTemplate found"): + PromptResourceCodec.from_initial_resources(resources) + + def test_explicit_name_not_found_raises(self): + resources = _make_resources() + with pytest.raises(ValueError, match="not found"): + PromptResourceCodec.from_initial_resources(resources, resource_name="nonexistent") + + def test_explicit_name_wrong_type_raises(self): + resources: NamedResources = { + "llm": LLM(endpoint="http://localhost", model="test"), + } + with pytest.raises(ValueError, match="not a PromptTemplate"): + PromptResourceCodec.from_initial_resources(resources, resource_name="llm") diff --git a/tests/algorithm/gepa/test_rollout_adapter.py b/tests/algorithm/gepa/test_rollout_adapter.py new file mode 100644 index 000000000..dad1b33ca --- /dev/null +++ b/tests/algorithm/gepa/test_rollout_adapter.py @@ -0,0 +1,267 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Tests for LightningGEPAAdapter — evaluate and make_reflective_dataset.""" + +from __future__ import annotations + +import asyncio +from typing import Any, Dict, List, Optional, Sequence +from unittest.mock import MagicMock + +import pytest + +from agentlightning.algorithm.gepa.resources import PromptResourceCodec +from agentlightning.algorithm.gepa.rollout_adapter import LightningGEPAAdapter +from agentlightning.algorithm.gepa.trajectories import RolloutOutput, RolloutTrajectory +from agentlightning.types import PromptTemplate, ResourcesUpdate, Rollout, RolloutConfig, Span +from agentlightning.types.tracer import OtelResource, TraceStatus + + +def _make_span( + rollout_id: str = "r1", + attempt_id: str = "a1", + name: str = "test_span", + attributes: Optional[Dict[str, Any]] = None, +) -> Span: + return Span( + rollout_id=rollout_id, + attempt_id=attempt_id, + sequence_id=1, + span_id="span-1", + parent_id=None, + trace_id="trace-1", + name=name, + start_time=0.0, + end_time=1.0, + attributes=attributes or {}, + status=TraceStatus(status_code="UNSET"), + events=[], + links=[], + context=None, + parent=None, + resource=OtelResource(attributes={}, schema_url=""), + ) + + +def _make_rollout(rollout_id: str = "r1", status: str = "succeeded") -> Rollout: + return Rollout( + rollout_id=rollout_id, + input={"task": "test"}, + start_time=0.0, + status=status, # type: ignore[arg-type] + config=RolloutConfig(), + ) + + +def _make_resources_update(resources_id: str = "v0") -> ResourcesUpdate: + return ResourcesUpdate( + resources_id=resources_id, + create_time=0.0, + update_time=0.0, + version=1, + resources={"prompt": PromptTemplate(template="test", engine="f-string")}, + ) + + +class _MockStore: + """Minimal mock store for adapter tests.""" + + def __init__( + self, + rollouts: Optional[List[Rollout]] = None, + spans: Optional[Sequence[Span]] = None, + ) -> None: + self._rollouts = rollouts or [] + self._spans: Sequence[Span] = spans or [] + self._enqueued: List[Dict[str, Any]] = [] + self._update_resources_calls: List[Any] = [] + + async def update_resources(self, resources_id: str, resources: Any) -> ResourcesUpdate: + self._update_resources_calls.append((resources_id, resources)) + return _make_resources_update(resources_id) + + async def enqueue_rollout(self, **kwargs: Any) -> Rollout: + rollout_id = f"r{len(self._enqueued)}" + self._enqueued.append(kwargs) + return _make_rollout(rollout_id) + + async def wait_for_rollouts(self, *, rollout_ids: List[str], timeout: Optional[float] = None) -> List[Rollout]: + # Return matching rollouts from our pre-built list + by_id = {r.rollout_id: r for r in self._rollouts} + return [by_id[rid] for rid in rollout_ids if rid in by_id] + + async def query_spans(self, rollout_id: str, attempt_id: Any = None) -> Sequence[Span]: + return [s for s in self._spans if s.rollout_id == rollout_id] + + +@pytest.mark.gepa +class TestEvaluate: + def test_evaluate_returns_evaluation_batch(self): + """evaluate() should return an EvaluationBatch with correct structure.""" + from gepa import EvaluationBatch + + rollouts = [_make_rollout("r0", "succeeded")] + spans = [_make_span("r0")] + store = _MockStore(rollouts=rollouts, spans=spans) + codec = PromptResourceCodec("prompt", "f-string") + + loop = asyncio.new_event_loop() + try: + adapter = LightningGEPAAdapter( + store=store, # type: ignore[arg-type] + codec=codec, + loop=loop, + version_counter=[0], + rollout_batch_timeout=5.0, + rollout_poll_interval=0.1, + ) + + # Run evaluate in a thread that uses our loop + import threading + + result_holder: List[Any] = [] + + def run_in_thread(): + result_holder.append( + adapter.evaluate( + batch=[{"task": "test"}], + candidate={"prompt": "Hello"}, + capture_traces=False, + ) + ) + + thread = threading.Thread(target=run_in_thread) + + async def pump(): + thread.start() + # Let the loop process coroutines + while thread.is_alive(): + await asyncio.sleep(0.01) + + loop.run_until_complete(pump()) + thread.join() + + batch = result_holder[0] + assert isinstance(batch, EvaluationBatch) + assert len(batch.outputs) == 1 + assert len(batch.scores) == 1 + assert batch.trajectories is None # capture_traces=False + finally: + loop.close() + + def test_evaluate_timeout_scores_zero(self): + """Timed-out rollouts should score 0.0.""" + from gepa import EvaluationBatch + + # Store returns no finished rollouts + store = _MockStore(rollouts=[], spans=[]) + codec = PromptResourceCodec("prompt", "f-string") + + loop = asyncio.new_event_loop() + try: + adapter = LightningGEPAAdapter( + store=store, # type: ignore[arg-type] + codec=codec, + loop=loop, + version_counter=[0], + rollout_batch_timeout=0.1, + rollout_poll_interval=0.05, + ) + + import threading + + result_holder: List[Any] = [] + + def run_in_thread(): + result_holder.append( + adapter.evaluate( + batch=[{"task": "test"}], + candidate={"prompt": "Hello"}, + capture_traces=True, + ) + ) + + thread = threading.Thread(target=run_in_thread) + + async def pump(): + thread.start() + while thread.is_alive(): + await asyncio.sleep(0.01) + + loop.run_until_complete(pump()) + thread.join() + + batch = result_holder[0] + assert batch.scores == [0.0] + assert batch.trajectories is not None + assert len(batch.trajectories) == 1 + assert batch.trajectories[0].status == "cancelled" + finally: + loop.close() + + +class TestMakeReflectiveDataset: + def test_returns_per_component_records(self): + """make_reflective_dataset should produce records for each component.""" + codec = PromptResourceCodec("prompt", "f-string") + loop = asyncio.new_event_loop() + try: + adapter = LightningGEPAAdapter( + store=MagicMock(), + codec=codec, + loop=loop, + version_counter=[0], + ) + + traj = RolloutTrajectory( + rollout_id="r0", + status="succeeded", + spans=[_make_span("r0")], + final_reward=0.8, + input={"task": "test"}, + ) + + mock_eval_batch = MagicMock() + mock_eval_batch.trajectories = [traj] + + result = adapter.make_reflective_dataset( + candidate={"prompt": "Hello world"}, + eval_batch=mock_eval_batch, + components_to_update=["prompt"], + ) + + assert "prompt" in result + assert len(result["prompt"]) == 1 + record = result["prompt"][0] + assert "Inputs" in record + assert "Generated Outputs" in record + assert "Feedback" in record + assert "Component Text" in record + assert record["Component Text"] == "Hello world" + finally: + loop.close() + + def test_empty_when_no_trajectories(self): + """Returns empty records when trajectories are None.""" + codec = PromptResourceCodec("prompt", "f-string") + loop = asyncio.new_event_loop() + try: + adapter = LightningGEPAAdapter( + store=MagicMock(), + codec=codec, + loop=loop, + version_counter=[0], + ) + + mock_eval_batch = MagicMock() + mock_eval_batch.trajectories = None + + result = adapter.make_reflective_dataset( + candidate={"prompt": "Hello"}, + eval_batch=mock_eval_batch, + components_to_update=["prompt"], + ) + + assert result == {"prompt": []} + finally: + loop.close() diff --git a/uv.lock b/uv.lock index c020c9c79..075d309d8 100644 --- a/uv.lock +++ b/uv.lock @@ -189,6 +189,10 @@ dependencies = [ apo = [ { name = "poml", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, ] +gepa = [ + { name = "dspy", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "gepa", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, +] mongo = [ { name = "pymongo", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, ] @@ -427,8 +431,10 @@ requires-dist = [ { name = "agentops", specifier = ">=0.4.13" }, { name = "aiohttp" }, { name = "aiologic" }, + { name = "dspy", marker = "extra == 'gepa'" }, { name = "fastapi" }, { name = "flask" }, + { name = "gepa", marker = "extra == 'gepa'" }, { name = "gpustat" }, { name = "graphviz" }, { name = "gunicorn" }, @@ -450,7 +456,7 @@ requires-dist = [ { name = "vllm", marker = "extra == 'verl'", specifier = ">=0.8.4" }, { name = "weave", marker = "extra == 'weave'", specifier = ">=0.52.22" }, ] -provides-extras = ["apo", "verl", "weave", "mongo"] +provides-extras = ["apo", "verl", "gepa", "weave", "mongo"] [package.metadata.requires-dev] agents = [ @@ -935,6 +941,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/16/87/59b570b9c4b014532777dc3baffc9bea10cf0cc8b232cf3c17e4bd0754a6/airportsdata-20250909-py3-none-any.whl", hash = "sha256:ce7dc6e1485afe3915e708212c7024ad158470c1c934e6a6cb217cf28b798ac7", size = 914391, upload-time = "2025-09-09T01:07:29.364Z" }, ] +[[package]] +name = "alembic" +version = "1.18.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mako", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "sqlalchemy", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "tomli", marker = "(python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (python_full_version >= '3.11' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-tinker' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "typing-extensions", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/13/8b084e0f2efb0275a1d534838844926f798bd766566b1375174e2448cd31/alembic-1.18.4.tar.gz", hash = "sha256:cb6e1fd84b6174ab8dbb2329f86d631ba9559dd78df550b57804d607672cedbc", size = 2056725, upload-time = "2026-02-10T16:00:47.195Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/29/6533c317b74f707ea28f8d633734dbda2119bbadfc61b2f3640ba835d0f7/alembic-1.18.4-py3-none-any.whl", hash = "sha256:a5ed4adcf6d8a4cb575f3d759f071b03cd6e5c7618eb796cb52497be25bfe19a", size = 263893, upload-time = "2026-02-10T16:00:49.997Z" }, +] + [[package]] name = "annotated-doc" version = "0.0.4" @@ -1057,6 +1078,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fe/ba/e2081de779ca30d473f21f5b30e0e737c438205440784c7dfc81efc2b029/async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c", size = 6233, upload-time = "2024-11-06T16:41:37.9Z" }, ] +[[package]] +name = "asyncer" +version = "0.0.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ff/67/7ea59c3e69eaeee42e7fc91a5be67ca5849c8979acac2b920249760c6af2/asyncer-0.0.8.tar.gz", hash = "sha256:a589d980f57e20efb07ed91d0dbe67f1d2fd343e7142c66d3a099f05c620739c", size = 18217, upload-time = "2024-08-24T23:15:36.449Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/04/15b6ca6b7842eda2748bda0a0af73f2d054e9344320f8bba01f994294bcb/asyncer-0.0.8-py3-none-any.whl", hash = "sha256:5920d48fc99c8f8f0f1576e1882f5022885589c5fcbc46ce4224ec3e53776eeb", size = 9209, upload-time = "2024-08-24T23:15:35.317Z" }, +] + [[package]] name = "attrs" version = "25.4.0" @@ -2122,6 +2155,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c3/11/25cdf9d5fc21efd30134fc74c43702c6f7ef09ebae8ed927f1283403ad8d/colorful-0.5.8-py2.py3-none-any.whl", hash = "sha256:a9381fdda3337fbaba5771991020abc69676afa102646650b759927892875992", size = 201334, upload-time = "2025-10-29T11:53:20.251Z" }, ] +[[package]] +name = "colorlog" +version = "6.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/61/f083b5ac52e505dfc1c624eafbf8c7589a0d7f32daa398d2e7590efa5fda/colorlog-6.10.1.tar.gz", hash = "sha256:eb4ae5cb65fe7fec7773c2306061a8e63e02efc2c72eba9d27b0fa23c94f1321", size = 17162, upload-time = "2025-10-16T16:14:11.978Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/c1/e419ef3723a074172b68aaa89c9f3de486ed4c2399e2dbd8113a4fdcaf9e/colorlog-6.10.1-py3-none-any.whl", hash = "sha256:2d7e8348291948af66122cff006c9f8da6255d224e7cf8e37d8de2df3bad8c9c", size = 11743, upload-time = "2025-10-16T16:14:10.512Z" }, +] + [[package]] name = "compressed-tensors" version = "0.10.2" @@ -2640,6 +2682,40 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/11/a8/c6a4b901d17399c77cd81fb001ce8961e9f5e04d3daf27e8925cb012e163/docutils-0.22.3-py3-none-any.whl", hash = "sha256:bd772e4aca73aff037958d44f2be5229ded4c09927fcf8690c577b66234d6ceb", size = 633032, upload-time = "2025-11-06T02:35:52.391Z" }, ] +[[package]] +name = "dspy" +version = "2.6.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "asyncer", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "backoff", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "cachetools", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "cloudpickle", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "datasets", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "diskcache", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "joblib", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "json-repair", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "litellm", version = "1.74.15", source = { registry = "https://pypi.org/simple" }, marker = "(sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform == 'linux' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform == 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "litellm", version = "1.80.0", source = { registry = "https://pypi.org/simple" }, marker = "(sys_platform == 'linux' and extra == 'group-14-agentlightning-core-stable') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-langchain') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-tinker') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform == 'linux' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "magicattr", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "openai", version = "1.90.0", source = { registry = "https://pypi.org/simple" }, marker = "(sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform == 'linux' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform == 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "openai", version = "1.109.1", source = { registry = "https://pypi.org/simple" }, marker = "(sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy') or (sys_platform == 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "openai", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "(sys_platform == 'linux' and extra == 'group-14-agentlightning-core-stable') or (sys_platform == 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain') or (sys_platform == 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-tinker') or (sys_platform == 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform == 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-legacy') or (sys_platform == 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-tinker' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra != 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "optuna", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "pandas", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "pydantic", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "regex", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "requests", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "tenacity", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "tqdm", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "ujson", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2c/37/3e6c6929a39ef96ce20f2eb3773f2743267c52f02e55059b1f11bef40ebf/dspy-2.6.13.tar.gz", hash = "sha256:66aa1919364f631ee48abad8e9c387d83e0884805106b79ec5b22cb023bbaf04", size = 194148, upload-time = "2025-03-19T05:17:58.444Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/27/2a2832ca6111e219c7bacd15d82fc32b6e77ecabf31a4234e54f131a8e28/dspy-2.6.13-py3-none-any.whl", hash = "sha256:3151dd5125f2db7ab453fa27349262ad3f9602e95d8bdd298b53ec622d4352a3", size = 247632, upload-time = "2025-03-19T05:17:56.665Z" }, +] + [[package]] name = "durationpy" version = "0.10" @@ -3259,6 +3335,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl", hash = "sha256:33083832d82b1101bdd0e9df3edd0fbc0e1c5f14c9d8c38d2a35bf1683b526d6", size = 18235, upload-time = "2024-05-12T06:45:10.017Z" }, ] +[[package]] +name = "gepa" +version = "0.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/30/511e52916956508f56eca721260fcd524cfffd580e57782dd471be925f7e/gepa-0.1.0.tar.gz", hash = "sha256:f8b3d7918d4cdcf8593f39ef1cc757c4ba1a4e6793e3ffb622e6c0bc60a1efd9", size = 226064, upload-time = "2026-02-19T19:43:08.272Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/32/fe8afb3d2a6605a6bcbc8f119f0a2adae96e9e5d57ebed055490219956a8/gepa-0.1.0-py3-none-any.whl", hash = "sha256:4e3f8fe8ca20169e60518b2e9d416e8c4a579459848adffdcad12223fbf9643e", size = 191392, upload-time = "2026-02-19T19:43:07.065Z" }, +] + [[package]] name = "gguf" version = "0.17.1" @@ -4931,6 +5016,9 @@ resolution-markers = [ "python_full_version < '3.11' and sys_platform == 'linux'", ] sdist = { url = "https://files.pythonhosted.org/packages/f7/6e/6e46bf6abaddc73973933334ec6761da556617c26e224fe06a1628f69f4a/litellm_proxy_extras-0.2.14.tar.gz", hash = "sha256:c05bacba2048130648e41287856c3ca5cdcf744708e19970679333b2fed96dfb", size = 15083, upload-time = "2025-07-30T23:05:00.051Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/d8/2b5b554e84291cd79074f81b27e92a12814b7b98c0a65df5b789dd8121ba/litellm_proxy_extras-0.2.14-py3-none-any.whl", hash = "sha256:f1b3286fbe6ac75a176b391e53a37f6f11b3edabab57bec2ea07a636cdc69c5d", size = 28844, upload-time = "2026-02-21T20:03:01.987Z" }, +] [[package]] name = "litellm-proxy-extras" @@ -5156,6 +5244,26 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6c/77/d7f491cbc05303ac6801651aabeb262d43f319288c1ea96c66b1d2692ff3/lxml-6.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:27220da5be049e936c3aca06f174e8827ca6445a4353a1995584311487fc4e3e", size = 3518768, upload-time = "2025-09-22T04:04:57.097Z" }, ] +[[package]] +name = "magicattr" +version = "0.1.6" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/7e/76b7e0c391bee7e9273725c29c8fe41c4df62a215ce58aa8e3518baee0bb/magicattr-0.1.6-py2.py3-none-any.whl", hash = "sha256:d96b18ee45b5ee83b09c17e15d3459a64de62d538808c2f71182777dd9dbbbdf", size = 4664, upload-time = "2022-01-25T16:56:47.074Z" }, +] + +[[package]] +name = "mako" +version = "1.3.10" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/38/bd5b78a920a64d708fe6bc8e0a2c075e1389d53bef8413725c63ba041535/mako-1.3.10.tar.gz", hash = "sha256:99579a6f39583fa7e5630a28c3c1f440e4e97a414b80372649c0ce338da2ea28", size = 392474, upload-time = "2025-04-10T12:44:31.16Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/fb/99f81ac72ae23375f22b7afdb7642aba97c00a713c217124420147681a2f/mako-1.3.10-py3-none-any.whl", hash = "sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59", size = 78509, upload-time = "2025-04-10T12:50:53.297Z" }, +] + [[package]] name = "markdown" version = "3.10" @@ -7349,6 +7457,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/24/7d/c88d7b15ba8fe5c6b8f93be50fc11795e9fc05386c44afaf6b76fe191f9b/opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed", size = 207954, upload-time = "2025-10-16T08:35:48.054Z" }, ] +[[package]] +name = "optuna" +version = "4.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "alembic", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "colorlog", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "numpy", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "packaging", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "pyyaml", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "sqlalchemy", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "tqdm", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bf/9b/62f120fb2ecbc4338bee70c5a3671c8e561714f3aa1a046b897ff142050e/optuna-4.8.0.tar.gz", hash = "sha256:6f7043e9f8ecb5e607af86a7eb00fb5ec2be26c3b08c201209a73d36aff37a38", size = 482603, upload-time = "2026-03-16T04:59:58.659Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/24/7c731839566d30dc70556d9824ef17692d896c15e3df627bce8c16f753e1/optuna-4.8.0-py3-none-any.whl", hash = "sha256:c57a7682679c36bfc9bca0da430698179e513874074b71bebedb0334964ab930", size = 419456, upload-time = "2026-03-16T04:59:56.977Z" }, +] + [[package]] name = "ordered-set" version = "4.1.0" @@ -10558,8 +10684,8 @@ name = "sqlalchemy" version = "2.0.44" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "greenlet", marker = "(platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (platform_machine == 'AMD64' and sys_platform == 'linux') or (platform_machine == 'WIN32' and sys_platform == 'linux') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'amd64' and sys_platform == 'linux') or (platform_machine == 'ppc64le' and sys_platform == 'linux') or (platform_machine == 'win32' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, - { name = "typing-extensions", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "greenlet", marker = "(platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (platform_machine != 'AMD64' and platform_machine != 'WIN32' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'ppc64le' and platform_machine != 'win32' and platform_machine != 'x86_64' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (platform_machine == 'AMD64' and sys_platform == 'linux') or (platform_machine == 'WIN32' and sys_platform == 'linux') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'amd64' and sys_platform == 'linux') or (platform_machine == 'ppc64le' and sys_platform == 'linux') or (platform_machine == 'win32' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-tinker' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (sys_platform != 'linux' and extra != 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra != 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra != 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (sys_platform != 'linux' and extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-cpu' and extra != 'group-14-agentlightning-torch-cu128' and extra != 'group-14-agentlightning-torch-gpu-legacy' and extra != 'group-14-agentlightning-torch-gpu-stable' and extra != 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, + { name = "typing-extensions", marker = "sys_platform == 'linux' or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-core-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-tinker') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-core-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-core-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-langchain' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-tinker' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-cu128') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-legacy') or (extra == 'group-14-agentlightning-torch-cpu' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-gpu-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-gpu-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-torch-gpu-stable' and extra == 'group-14-agentlightning-torch-legacy') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-torch-stable') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-trl') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-10-2') or (extra == 'group-14-agentlightning-torch-legacy' and extra == 'group-14-agentlightning-vllm-0-11-0') or (extra == 'group-14-agentlightning-vllm-0-10-2' and extra == 'group-14-agentlightning-vllm-0-11-0')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/f0/f2/840d7b9496825333f532d2e3976b8eadbf52034178aac53630d09fe6e1ef/sqlalchemy-2.0.44.tar.gz", hash = "sha256:0ae7454e1ab1d780aee69fd2aae7d6b8670a581d8847f2d1e0f7ddfbf47e5a22", size = 9819830, upload-time = "2025-10-10T14:39:12.935Z" } wheels = [ @@ -11929,6 +12055,91 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/37/87/1f677586e8ac487e29672e4b17455758fce261de06a0d086167bb760361a/uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5", size = 6229, upload-time = "2024-02-09T16:52:00.371Z" }, ] +[[package]] +name = "ujson" +version = "5.12.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cb/3e/c35530c5ffc25b71c59ae0cd7b8f99df37313daa162ce1e2f7925f7c2877/ujson-5.12.0.tar.gz", hash = "sha256:14b2e1eb528d77bc0f4c5bd1a7ebc05e02b5b41beefb7e8567c9675b8b13bcf4", size = 7158451, upload-time = "2026-03-11T22:19:30.397Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/ee/45c7c1f9268b0fecdd68f9ada490bc09632b74f5f90a9be759e51a746ddc/ujson-5.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:38051f36423f084b909aaadb3b41c9c6a2958e86956ba21a8489636911e87504", size = 56145, upload-time = "2026-03-11T22:17:49.409Z" }, + { url = "https://files.pythonhosted.org/packages/6d/dc/ed181dbfb2beee598e91280c6903ba71e10362b051716317e2d3664614bb/ujson-5.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:457fabc2700a8e6ddb85bc5a1d30d3345fe0d3ec3ee8161a4e032ec585801dfa", size = 53839, upload-time = "2026-03-11T22:17:50.973Z" }, + { url = "https://files.pythonhosted.org/packages/e4/d8/eb9ef42c660f431deeedc2e1b09c4ba29aa22818a439ddda7da6ae23ddfa/ujson-5.12.0-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:57930ac9519099b852e190d2c04b1fb5d97ea128db33bce77ed874eccb4c7f09", size = 57844, upload-time = "2026-03-11T22:17:53.029Z" }, + { url = "https://files.pythonhosted.org/packages/68/37/0b586d079d3f2a5be5aa58ab5c423cbb4fae2ee4e65369c87aa74ac7e113/ujson-5.12.0-cp310-cp310-manylinux_2_24_i686.manylinux_2_28_i686.whl", hash = "sha256:9b3b86ec3e818f3dd3e13a9de628e88a9990f4af68ecb0b12dd3de81227f0a26", size = 59923, upload-time = "2026-03-11T22:17:54.332Z" }, + { url = "https://files.pythonhosted.org/packages/28/ed/6a4b69eb397502767f438b5a2b4c066dccc9e3b263115f5ee07510250fc7/ujson-5.12.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:460e76a4daff214ae33ab959494962c93918cb44714ea3e3f748b14aa37f8a87", size = 57427, upload-time = "2026-03-11T22:17:55.317Z" }, + { url = "https://files.pythonhosted.org/packages/bb/4b/ae118440a72e85e68ee8dd26cfc47ea7857954a3341833cde9da7dc40ca3/ujson-5.12.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e584d0cdd37cac355aca52ed788d1a2d939d6837e2870d3b70e585db24025a50", size = 1037301, upload-time = "2026-03-11T22:17:56.427Z" }, + { url = "https://files.pythonhosted.org/packages/c2/76/834caa7905f65d3a695e4f5ff8d5d4a98508e396a9e8ab0739ab4fe2d422/ujson-5.12.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0fe9128e75c6aa6e9ae06c1408d6edd9179a2fef0fe6d9cda3166b887eba521d", size = 1196664, upload-time = "2026-03-11T22:17:58.061Z" }, + { url = "https://files.pythonhosted.org/packages/f2/33/1f3c1543c1d3f18c54bb3f8c1e74314fd6ad3c1aa375f01433e89a86bfa6/ujson-5.12.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3ed5cb149892141b1e77ef312924a327f2cc718b34247dae346ed66329e1b8be", size = 1089668, upload-time = "2026-03-11T22:17:59.617Z" }, + { url = "https://files.pythonhosted.org/packages/db/52/07d9da456a78296f61893b9d2bbfb2512f4233394748aae80b8d08c7d96e/ujson-5.12.0-cp310-cp310-win32.whl", hash = "sha256:973b7d7145b1ac553a7466a64afa8b31ec2693d7c7fff6a755059e0a2885dfd2", size = 39644, upload-time = "2026-03-11T22:18:01.212Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e5/c1de3041672fa1ab97aae0f0b9f4e30a9b15d4104c734d5627779206c878/ujson-5.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:1d072a403d82aef8090c6d4f728e3a727dfdba1ad3b7fa3a052c3ecbd37e73cb", size = 43875, upload-time = "2026-03-11T22:18:02.268Z" }, + { url = "https://files.pythonhosted.org/packages/8b/49/714a9240d9e6bd86c9684a72f100a0005459165fb2b0f6bf1a1156be0b9f/ujson-5.12.0-cp310-cp310-win_arm64.whl", hash = "sha256:55ede2a7a051b3b7e71a394978a098d71b3783e6b904702ff45483fad434ae2d", size = 38563, upload-time = "2026-03-11T22:18:03.546Z" }, + { url = "https://files.pythonhosted.org/packages/10/22/fd22e2f6766bae934d3050517ca47d463016bd8688508d1ecc1baa18a7ad/ujson-5.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:58a11cb49482f1a095a2bd9a1d81dd7c8fb5d2357f959ece85db4e46a825fd00", size = 56139, upload-time = "2026-03-11T22:18:04.591Z" }, + { url = "https://files.pythonhosted.org/packages/c6/fd/6839adff4fc0164cbcecafa2857ba08a6eaeedd7e098d6713cb899a91383/ujson-5.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9b3cf13facf6f77c283af0e1713e5e8c47a0fe295af81326cb3cb4380212e797", size = 53836, upload-time = "2026-03-11T22:18:05.662Z" }, + { url = "https://files.pythonhosted.org/packages/f9/b0/0c19faac62d68ceeffa83a08dc3d71b8462cf5064d0e7e0b15ba19898dad/ujson-5.12.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fb94245a715b4d6e24689de12772b85329a1f9946cbf6187923a64ecdea39e65", size = 57851, upload-time = "2026-03-11T22:18:06.744Z" }, + { url = "https://files.pythonhosted.org/packages/04/f6/e7fd283788de73b86e99e08256726bb385923249c21dcd306e59d532a1a1/ujson-5.12.0-cp311-cp311-manylinux_2_24_i686.manylinux_2_28_i686.whl", hash = "sha256:0fe6b8b8968e11dd9b2348bd508f0f57cf49ab3512064b36bc4117328218718e", size = 59906, upload-time = "2026-03-11T22:18:07.791Z" }, + { url = "https://files.pythonhosted.org/packages/d7/3a/b100735a2b43ee6e8fe4c883768e362f53576f964d4ea841991060aeaf35/ujson-5.12.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:89e302abd3749f6d6699691747969a5d85f7c73081d5ed7e2624c7bd9721a2ab", size = 57409, upload-time = "2026-03-11T22:18:08.79Z" }, + { url = "https://files.pythonhosted.org/packages/5c/fa/f97cc20c99ca304662191b883ae13ae02912ca7244710016ba0cb8a5be34/ujson-5.12.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0727363b05ab05ee737a28f6200dc4078bce6b0508e10bd8aab507995a15df61", size = 1037339, upload-time = "2026-03-11T22:18:10.424Z" }, + { url = "https://files.pythonhosted.org/packages/10/7a/53ddeda0ffe1420db2f9999897b3cbb920fbcff1849d1f22b196d0f34785/ujson-5.12.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b62cb9a7501e1f5c9ffe190485501349c33e8862dde4377df774e40b8166871f", size = 1196625, upload-time = "2026-03-11T22:18:11.82Z" }, + { url = "https://files.pythonhosted.org/packages/0d/1a/4c64a6bef522e9baf195dd5be151bc815cd4896c50c6e2489599edcda85f/ujson-5.12.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a6ec5bf6bc361f2f0f9644907a36ce527715b488988a8df534120e5c34eeda94", size = 1089669, upload-time = "2026-03-11T22:18:13.343Z" }, + { url = "https://files.pythonhosted.org/packages/18/11/8ccb109f5777ec0d9fb826695a9e2ac36ae94c1949fc8b1e4d23a5bd067a/ujson-5.12.0-cp311-cp311-win32.whl", hash = "sha256:006428d3813b87477d72d306c40c09f898a41b968e57b15a7d88454ecc42a3fb", size = 39648, upload-time = "2026-03-11T22:18:14.785Z" }, + { url = "https://files.pythonhosted.org/packages/6f/e3/87fc4c27b20d5125cff7ce52d17ea7698b22b74426da0df238e3efcb0cf2/ujson-5.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:40aa43a7a3a8d2f05e79900858053d697a88a605e3887be178b43acbcd781161", size = 43876, upload-time = "2026-03-11T22:18:15.768Z" }, + { url = "https://files.pythonhosted.org/packages/9e/21/324f0548a8c8c48e3e222eaed15fb6d48c796593002b206b4a28a89e445f/ujson-5.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:561f89cc82deeae82e37d4a4764184926fb432f740a9691563a391b13f7339a4", size = 38553, upload-time = "2026-03-11T22:18:17.251Z" }, + { url = "https://files.pythonhosted.org/packages/84/f6/ac763d2108d28f3a40bb3ae7d2fafab52ca31b36c2908a4ad02cd3ceba2a/ujson-5.12.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:09b4beff9cc91d445d5818632907b85fb06943b61cb346919ce202668bf6794a", size = 56326, upload-time = "2026-03-11T22:18:18.467Z" }, + { url = "https://files.pythonhosted.org/packages/25/46/d0b3af64dcdc549f9996521c8be6d860ac843a18a190ffc8affeb7259687/ujson-5.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ca0c7ce828bb76ab78b3991904b477c2fd0f711d7815c252d1ef28ff9450b052", size = 53910, upload-time = "2026-03-11T22:18:19.502Z" }, + { url = "https://files.pythonhosted.org/packages/9a/10/853c723bcabc3e9825a079019055fc99e71b85c6bae600607a2b9d31d18d/ujson-5.12.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a2d79c6635ccffcbfc1d5c045874ba36b594589be81d50d43472570bb8de9c57", size = 57754, upload-time = "2026-03-11T22:18:20.874Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c6/6e024830d988f521f144ead641981c1f7a82c17ad1927c22de3242565f5c/ujson-5.12.0-cp312-cp312-manylinux_2_24_i686.manylinux_2_28_i686.whl", hash = "sha256:7e07f6f644d2c44d53b7a320a084eef98063651912c1b9449b5f45fcbdc6ccd2", size = 59936, upload-time = "2026-03-11T22:18:21.924Z" }, + { url = "https://files.pythonhosted.org/packages/34/c9/c5f236af5abe06b720b40b88819d00d10182d2247b1664e487b3ed9229cf/ujson-5.12.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:085b6ce182cdd6657481c7c4003a417e0655c4f6e58b76f26ee18f0ae21db827", size = 57463, upload-time = "2026-03-11T22:18:22.924Z" }, + { url = "https://files.pythonhosted.org/packages/ae/04/41342d9ef68e793a87d84e4531a150c2b682f3bcedfe59a7a5e3f73e9213/ujson-5.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:16b4fe9c97dc605f5e1887a9e1224287291e35c56cbc379f8aa44b6b7bcfe2bb", size = 1037239, upload-time = "2026-03-11T22:18:24.04Z" }, + { url = "https://files.pythonhosted.org/packages/d4/81/dc2b7617d5812670d4ff4a42f6dd77926430ee52df0dedb2aec7990b2034/ujson-5.12.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0d2e8db5ade3736a163906154ca686203acc7d1d30736cbf577c730d13653d84", size = 1196713, upload-time = "2026-03-11T22:18:25.391Z" }, + { url = "https://files.pythonhosted.org/packages/b6/9c/80acff0504f92459ed69e80a176286e32ca0147ac6a8252cd0659aad3227/ujson-5.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:93bc91fdadcf046da37a214eaa714574e7e9b1913568e93bb09527b2ceb7f759", size = 1089742, upload-time = "2026-03-11T22:18:26.738Z" }, + { url = "https://files.pythonhosted.org/packages/e3/f0/123ffaac17e45ef2b915e3e3303f8f4ea78bb8d42afad828844e08622b1e/ujson-5.12.0-cp312-cp312-win32.whl", hash = "sha256:2a248750abce1c76fbd11b2e1d88b95401e72819295c3b851ec73399d6849b3d", size = 39773, upload-time = "2026-03-11T22:18:28.244Z" }, + { url = "https://files.pythonhosted.org/packages/b5/20/f3bd2b069c242c2b22a69e033bfe224d1d15d3649e6cd7cc7085bb1412ff/ujson-5.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:1b5c6ceb65fecd28a1d20d1eba9dbfa992612b86594e4b6d47bb580d2dd6bcb3", size = 44040, upload-time = "2026-03-11T22:18:29.236Z" }, + { url = "https://files.pythonhosted.org/packages/f0/a7/01b5a0bcded14cd2522b218f2edc3533b0fcbccdea01f3e14a2b699071aa/ujson-5.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:9a5fcbe7b949f2e95c47ea8a80b410fcdf2da61c98553b45a4ee875580418b68", size = 38526, upload-time = "2026-03-11T22:18:30.551Z" }, + { url = "https://files.pythonhosted.org/packages/3f/f1/0ef0eeab1db8493e1833c8b440fe32cf7538f7afa6e7f7c7e9f62cef464d/ujson-5.12.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:15d416440148f3e56b9b244fdaf8a09fcf5a72e4944b8e119f5bf60417a2bfc8", size = 56331, upload-time = "2026-03-11T22:18:31.539Z" }, + { url = "https://files.pythonhosted.org/packages/b0/2f/9159f6f399b3f572d20847a2b80d133e3a03c14712b0da4971a36879fb64/ujson-5.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e0dd3676ea0837cd70ea1879765e9e9f6be063be0436de9b3ea4b775caf83654", size = 53910, upload-time = "2026-03-11T22:18:32.829Z" }, + { url = "https://files.pythonhosted.org/packages/e5/a9/f96376818d71495d1a4be19a0ab6acf0cc01dd8826553734c3d4dac685b2/ujson-5.12.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7bbf05c38debc90d1a195b11340cc85cb43ab3e753dc47558a3a84a38cbc72da", size = 57757, upload-time = "2026-03-11T22:18:33.866Z" }, + { url = "https://files.pythonhosted.org/packages/98/8d/dd4a151caac6fdcb77f024fbe7f09d465ebf347a628ed6dd581a0a7f6364/ujson-5.12.0-cp313-cp313-manylinux_2_24_i686.manylinux_2_28_i686.whl", hash = "sha256:3c2f947e55d3c7cfe124dd4521ee481516f3007d13c6ad4bf6aeb722e190eb1b", size = 59940, upload-time = "2026-03-11T22:18:35.276Z" }, + { url = "https://files.pythonhosted.org/packages/c7/17/0d36c2fee0a8d8dc37b011ccd5bbdcfaff8b8ec2bcfc5be998661cdc935b/ujson-5.12.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ea6206043385343aff0b7da65cf73677f6f5e50de8f1c879e557f4298cac36a", size = 57465, upload-time = "2026-03-11T22:18:36.644Z" }, + { url = "https://files.pythonhosted.org/packages/8c/04/b0ee4a4b643a01ba398441da1e357480595edb37c6c94c508dbe0eb9eb60/ujson-5.12.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bb349dbba57c76eec25e5917e07f35aabaf0a33b9e67fc13d188002500106487", size = 1037236, upload-time = "2026-03-11T22:18:37.743Z" }, + { url = "https://files.pythonhosted.org/packages/2d/08/0e7780d0bbb48fe57ded91f550144bcc99c03b5360bf2886dd0dae0ea8f5/ujson-5.12.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:937794042342006f707837f38d721426b11b0774d327a2a45c0bd389eb750a87", size = 1196717, upload-time = "2026-03-11T22:18:39.101Z" }, + { url = "https://files.pythonhosted.org/packages/ba/4c/e0e34107715bb4dd2d4dcc1ce244d2f074638837adf38aff85a37506efe4/ujson-5.12.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6ad57654570464eb1b040b5c353dee442608e06cff9102b8fcb105565a44c9ed", size = 1089748, upload-time = "2026-03-11T22:18:40.473Z" }, + { url = "https://files.pythonhosted.org/packages/72/43/814f4e2b5374d0d505c254ba4bed43eb25d2d046f19f5fd88555f81a7bd0/ujson-5.12.0-cp313-cp313-win32.whl", hash = "sha256:76bf3e7406cf23a3e1ca6a23fb1fb9ea82f4f6bd226fe226e09146b0194f85dc", size = 39778, upload-time = "2026-03-11T22:18:41.791Z" }, + { url = "https://files.pythonhosted.org/packages/0f/fe/19310d848ebe93315b6cb171277e4ce29f47ef9d46caabd63ff05d5be548/ujson-5.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:15e555c4caca42411270b2ed2b2ebc7b3a42bb04138cef6c956e1f1d49709fe2", size = 44038, upload-time = "2026-03-11T22:18:43.094Z" }, + { url = "https://files.pythonhosted.org/packages/3f/e4/7a39103d7634691601a02bd1ca7268fba4da47ed586365e6ee68168f575a/ujson-5.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:bd03472c36fa3a386a6deb887113b9e3fa40efba8203eb4fe786d3c0ccc724f6", size = 38529, upload-time = "2026-03-11T22:18:44.167Z" }, + { url = "https://files.pythonhosted.org/packages/10/bd/9a8d693254bada62bfea75a507e014afcfdb6b9d047b6f8dd134bfefaf67/ujson-5.12.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:85833bca01aa5cae326ac759276dc175c5fa3f7b3733b7d543cf27f2df12d1ef", size = 56499, upload-time = "2026-03-11T22:18:45.431Z" }, + { url = "https://files.pythonhosted.org/packages/bd/2d/285a83df8176e18dcd675d1a4cff8f7620f003f30903ea43929406e98986/ujson-5.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:d22cad98c2a10bbf6aa083a8980db6ed90d4285a841c4de892890c2b28286ef9", size = 53998, upload-time = "2026-03-11T22:18:47.184Z" }, + { url = "https://files.pythonhosted.org/packages/bf/8b/e2f09e16dabfa91f6a84555df34a4329fa7621e92ed054d170b9054b9bb2/ujson-5.12.0-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99cc80facad240b0c2fb5a633044420878aac87a8e7c348b9486450cba93f27c", size = 57783, upload-time = "2026-03-11T22:18:48.271Z" }, + { url = "https://files.pythonhosted.org/packages/68/fb/ba1d06f3658a0c36d0ab3869ec3914f202bad0a9bde92654e41516c7bb13/ujson-5.12.0-cp314-cp314-manylinux_2_24_i686.manylinux_2_28_i686.whl", hash = "sha256:d1831c07bd4dce53c4b666fa846c7eba4b7c414f2e641a4585b7f50b72f502dc", size = 60011, upload-time = "2026-03-11T22:18:49.284Z" }, + { url = "https://files.pythonhosted.org/packages/64/2b/3e322bf82d926d9857206cd5820438d78392d1f523dacecb8bd899952f73/ujson-5.12.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e00cec383eab2406c9e006bd4edb55d284e94bb943fda558326048178d26961", size = 57465, upload-time = "2026-03-11T22:18:50.584Z" }, + { url = "https://files.pythonhosted.org/packages/e9/fd/af72d69603f9885e5136509a529a4f6d88bf652b457263ff96aefcd3ab7d/ujson-5.12.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f19b3af31d02a2e79c5f9a6deaab0fb3c116456aeb9277d11720ad433de6dfc6", size = 1037275, upload-time = "2026-03-11T22:18:51.998Z" }, + { url = "https://files.pythonhosted.org/packages/9c/a7/a2411ec81aef7872578e56304c3e41b3a544a9809e95c8e1df46923fc40b/ujson-5.12.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:bacbd3c69862478cbe1c7ed4325caedec580d8acf31b8ee1b9a1e02a56295cad", size = 1196758, upload-time = "2026-03-11T22:18:53.548Z" }, + { url = "https://files.pythonhosted.org/packages/ed/85/aa18ae175dd03a118555aa14304d4f466f9db61b924c97c6f84388ecacb1/ujson-5.12.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:94c5f1621cbcab83c03be46441f090b68b9f307b6c7ec44d4e3f6d5997383df4", size = 1089760, upload-time = "2026-03-11T22:18:55.336Z" }, + { url = "https://files.pythonhosted.org/packages/d3/d4/4b40b67ac7e916ebffc3041ae2320c5c0b8a045300d4c542b6e50930cca5/ujson-5.12.0-cp314-cp314-win32.whl", hash = "sha256:e6369ac293d2cc40d52577e4fa3d75a70c1aae2d01fa3580a34a4e6eff9286b9", size = 41043, upload-time = "2026-03-11T22:18:56.505Z" }, + { url = "https://files.pythonhosted.org/packages/24/38/a1496d2a3428981f2b3a2ffbb4656c2b05be6cc406301d6b10a6445f6481/ujson-5.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:31348a0ffbfc815ce78daac569d893349d85a0b57e1cd2cdbba50b7f333784da", size = 45303, upload-time = "2026-03-11T22:18:57.454Z" }, + { url = "https://files.pythonhosted.org/packages/85/d3/39dbd3159543d9c57ec3a82d36226152cf0d710784894ce5aa24b8220ac1/ujson-5.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:6879aed770557f0961b252648d36f6fdaab41079d37a2296b5649fd1b35608e0", size = 39860, upload-time = "2026-03-11T22:18:58.578Z" }, + { url = "https://files.pythonhosted.org/packages/c3/71/9b4dacb177d3509077e50497222d39eec04c8b41edb1471efc764d645237/ujson-5.12.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:7ddb08b3c2f9213df1f2e3eb2fbea4963d80ec0f8de21f0b59898e34f3b3d96d", size = 56845, upload-time = "2026-03-11T22:18:59.629Z" }, + { url = "https://files.pythonhosted.org/packages/24/c2/8abffa3be1f3d605c4a62445fab232b3e7681512ce941c6b23014f404d36/ujson-5.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0a3ae28f0b209be5af50b54ca3e2123a3de3a57d87b75f1e5aa3d7961e041983", size = 54463, upload-time = "2026-03-11T22:19:00.697Z" }, + { url = "https://files.pythonhosted.org/packages/db/2e/60114a35d1d6796eb428f7affcba00a921831ff604a37d9142c3d8bbe5c5/ujson-5.12.0-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d30ad4359413c8821cc7b3707f7ca38aa8bc852ba3b9c5a759ee2d7740157315", size = 58689, upload-time = "2026-03-11T22:19:01.739Z" }, + { url = "https://files.pythonhosted.org/packages/c8/ad/010925c2116c21ce119f9c2ff18d01f48a19ade3ff4c5795da03ce5829fc/ujson-5.12.0-cp314-cp314t-manylinux_2_24_i686.manylinux_2_28_i686.whl", hash = "sha256:02f93da7a4115e24f886b04fd56df1ee8741c2ce4ea491b7ab3152f744ad8f8e", size = 60618, upload-time = "2026-03-11T22:19:03.101Z" }, + { url = "https://files.pythonhosted.org/packages/9b/74/db7f638bf20282b1dccf454386cbd483faaaed3cdbb9cb27e06f74bb109e/ujson-5.12.0-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3ff4ede90ed771140caa7e1890de17431763a483c54b3c1f88bd30f0cc1affc0", size = 58151, upload-time = "2026-03-11T22:19:04.175Z" }, + { url = "https://files.pythonhosted.org/packages/9c/7e/3ebaecfa70a2e8ce623db8e21bd5cb05d42a5ef943bcbb3309d71b5de68d/ujson-5.12.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bf9cc97f05048ac8f3e02cd58f0fe62b901453c24345bfde287f4305dcc31c", size = 1038117, upload-time = "2026-03-11T22:19:05.558Z" }, + { url = "https://files.pythonhosted.org/packages/2e/aa/e073eda7f0036c2973b28db7bb99faba17a932e7b52d801f9bb3e726271f/ujson-5.12.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:2324d9a0502317ffc35d38e153c1b2fa9610ae03775c9d0f8d0cca7b8572b04e", size = 1197434, upload-time = "2026-03-11T22:19:06.92Z" }, + { url = "https://files.pythonhosted.org/packages/1c/01/b9a13f058fdd50c746b192c4447ca8d6352e696dcda912ccee10f032ff85/ujson-5.12.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:50524f4f6a1c839714dbaff5386a1afb245d2d5ec8213a01fbc99cea7307811e", size = 1090401, upload-time = "2026-03-11T22:19:08.383Z" }, + { url = "https://files.pythonhosted.org/packages/c4/37/3d1b4e0076b6e43379600b5229a5993db8a759ff2e1830ea635d876f6644/ujson-5.12.0-cp314-cp314t-win32.whl", hash = "sha256:f7a0430d765f9bda043e6aefaba5944d5f21ec43ff4774417d7e296f61917382", size = 41880, upload-time = "2026-03-11T22:19:09.671Z" }, + { url = "https://files.pythonhosted.org/packages/b1/c5/3c2a262a138b9f0014fe1134a6b5fdc2c54245030affbaac2fcbc0632138/ujson-5.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:ccbfd94e59aad4a2566c71912b55f0547ac1680bfac25eb138e6703eb3dd434e", size = 46365, upload-time = "2026-03-11T22:19:10.662Z" }, + { url = "https://files.pythonhosted.org/packages/83/40/956dc20b7e00dc0ff3259871864f18dab211837fce3478778bedb3132ac1/ujson-5.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:42d875388fbd091c7ea01edfff260f839ba303038ffb23475ef392012e4d63dd", size = 40398, upload-time = "2026-03-11T22:19:11.666Z" }, + { url = "https://files.pythonhosted.org/packages/95/3c/5ee154d505d1aad2debc4ba38b1a60ae1949b26cdb5fa070e85e320d6b64/ujson-5.12.0-graalpy312-graalpy250_312_native-macosx_10_13_x86_64.whl", hash = "sha256:bf85a00ac3b56a1e7a19c5be7b02b5180a0895ac4d3c234d717a55e86960691c", size = 54494, upload-time = "2026-03-11T22:19:13.035Z" }, + { url = "https://files.pythonhosted.org/packages/ce/b3/9496ec399ec921e434a93b340bd5052999030b7ac364be4cbe5365ac6b20/ujson-5.12.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:64df53eef4ac857eb5816a56e2885ccf0d7dff6333c94065c93b39c51063e01d", size = 57999, upload-time = "2026-03-11T22:19:14.385Z" }, + { url = "https://files.pythonhosted.org/packages/0e/da/e9ae98133336e7c0d50b43626c3f2327937cecfa354d844e02ac17379ed1/ujson-5.12.0-graalpy312-graalpy250_312_native-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6c0aed6a4439994c9666fb8a5b6c4eac94d4ef6ddc95f9b806a599ef83547e3b", size = 54518, upload-time = "2026-03-11T22:19:15.4Z" }, + { url = "https://files.pythonhosted.org/packages/58/10/978d89dded6bb1558cd46ba78f4351198bd2346db8a8ee1a94119022ce40/ujson-5.12.0-graalpy312-graalpy250_312_native-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:efae5df7a8cc8bdb1037b0f786b044ce281081441df5418c3a0f0e1f86fe7bb3", size = 55736, upload-time = "2026-03-11T22:19:16.496Z" }, + { url = "https://files.pythonhosted.org/packages/80/25/1df8e6217c92e57a1266bf5be750b1dddc126ee96e53fe959d5693503bc6/ujson-5.12.0-graalpy312-graalpy250_312_native-win_amd64.whl", hash = "sha256:8712b61eb1b74a4478cfd1c54f576056199e9f093659334aeb5c4a6b385338e5", size = 44615, upload-time = "2026-03-11T22:19:17.53Z" }, + { url = "https://files.pythonhosted.org/packages/19/fa/f4a957dddb99bd68c8be91928c0b6fefa7aa8aafc92c93f5d1e8b32f6702/ujson-5.12.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:871c0e5102e47995b0e37e8df7819a894a6c3da0d097545cd1f9f1f7d7079927", size = 52145, upload-time = "2026-03-11T22:19:18.566Z" }, + { url = "https://files.pythonhosted.org/packages/55/6e/50b5cf612de1ca06c7effdc5a5d7e815774dee85a5858f1882c425553b82/ujson-5.12.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:56ba3f7abbd6b0bb282a544dc38406d1a188d8bb9164f49fdb9c2fee62cb29da", size = 49577, upload-time = "2026-03-11T22:19:19.627Z" }, + { url = "https://files.pythonhosted.org/packages/6e/24/b6713fa9897774502cd4c2d6955bb4933349f7d84c3aa805531c382a4209/ujson-5.12.0-pp311-pypy311_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c5a52987a990eb1bae55f9000994f1afdb0326c154fb089992f839ab3c30688", size = 50807, upload-time = "2026-03-11T22:19:20.778Z" }, + { url = "https://files.pythonhosted.org/packages/1f/b6/c0e0f7901180ef80d16f3a4bccb5dc8b01515a717336a62928963a07b80b/ujson-5.12.0-pp311-pypy311_pp73-manylinux_2_24_i686.manylinux_2_28_i686.whl", hash = "sha256:adf28d13a33f9d750fe7a78fb481cac298fa257d8863d8727b2ea4455ea41235", size = 56972, upload-time = "2026-03-11T22:19:21.84Z" }, + { url = "https://files.pythonhosted.org/packages/02/a9/05d91b4295ea7239151eb08cf240e5a2ba969012fda50bc27bcb1ea9cd71/ujson-5.12.0-pp311-pypy311_pp73-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51acc750ec7a2df786cdc868fb16fa04abd6269a01d58cf59bafc57978773d8e", size = 52045, upload-time = "2026-03-11T22:19:22.879Z" }, + { url = "https://files.pythonhosted.org/packages/e3/7a/92047d32bf6f2d9db64605fc32e8eb0e0dd68b671eaafc12a464f69c4af4/ujson-5.12.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:ab9056d94e5db513d9313b34394f3a3b83e6301a581c28ad67773434f3faccab", size = 44053, upload-time = "2026-03-11T22:19:23.918Z" }, +] + [[package]] name = "unidiff" version = "0.7.5"