Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions providers.d/inline/eval/trustyai_lmeval.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
module: llama_stack_provider_lmeval.inline
config_class: llama_stack_provider_lmeval.config.LMEvalEvalProviderConfig
pip_packages: ["lm-eval"]
api_dependencies: ["inference", "files"]
optional_api_dependencies: []
2 changes: 1 addition & 1 deletion providers.d/remote/eval/trustyai_lmeval.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@ adapter:
adapter_type: lmeval
pip_packages: ["kubernetes"]
config_class: llama_stack_provider_lmeval.config.LMEvalEvalProviderConfig
module: llama_stack_provider_lmeval
module: llama_stack_provider_lmeval.remote
api_dependencies: ["inference"]
optional_api_dependencies: []
30 changes: 30 additions & 0 deletions run-inline.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
version: "2"
image_name: trustyai-lmeval
apis:
- inference
- eval
- files
providers:
inference:
- provider_id: vllm
provider_type: remote::vllm
config:
url: ${env.VLLM_URL:=http://localhost:8080/v1}
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=false}
eval:
Copy link

@saichandrapandraju saichandrapandraju Sep 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: shall we start using module: way of specifying the provider as lls-core is moving away from external_providers_dir?

- provider_id: trustyai_lmeval
provider_type: inline::trustyai_lmeval
config:
base_url: ${env.BASE_URL:=http://localhost:8321/v1}
use_k8s: ${env.USE_K8S:=false}
files:
- provider_id: meta-reference-files
provider_type: inline::localfs
config:
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/trustyai-lmeval/files}
metadata_store:
type: sqlite
db_path: ${env.METADATA_STORE_DB_PATH:=~/.llama/distributions/trustyai-lmeval}/registry.db}
external_providers_dir: ./providers.d
54 changes: 0 additions & 54 deletions src/llama_stack_provider_lmeval/__init__.py
Original file line number Diff line number Diff line change
@@ -1,54 +0,0 @@
import logging

from llama_stack.apis.datatypes import Api
from llama_stack.providers.datatypes import ProviderSpec

from .config import LMEvalEvalProviderConfig
from .lmeval import LMEval
from .provider import get_provider_spec

# Set up logging
logger = logging.getLogger(__name__)


async def get_adapter_impl(
config: LMEvalEvalProviderConfig,
deps: dict[Api, ProviderSpec] | None = None,
) -> LMEval:
"""Get an LMEval implementation from the configuration.

Args:
config: LMEval configuration
deps: Optional dependencies for testing/injection

Returns:
Configured LMEval implementation

Raises:
Exception: If configuration is invalid
"""
try:
if deps is None:
deps = {}

# Extract base_url from config if available
base_url = None
if hasattr(config, "model_args") and config.model_args:
for arg in config.model_args:
if arg.get("name") == "base_url":
base_url = arg.get("value")
logger.debug(f"Using base_url from config: {base_url}")
break

return LMEval(config=config)
except Exception as e:
raise Exception(f"Failed to create LMEval implementation: {str(e)}") from e


__all__ = [
# Factory methods
"get_adapter_impl",
# Configurations
"LMEval",
"get_provider_spec",
]
3 changes: 3 additions & 0 deletions src/llama_stack_provider_lmeval/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from __future__ import annotations

from dataclasses import dataclass, field
from pathlib import Path
from typing import Any

from llama_stack.apis.eval import BenchmarkConfig, EvalCandidate
Expand Down Expand Up @@ -125,6 +126,8 @@ class LMEvalEvalProviderConfig:
metadata: dict[str, Any] | None = None
# TLS configuration - structured approach
tls: TLSConfig | None = None
base_dir: Path = Path(__file__).parent
results_dir: Path = base_dir / "results"

def __post_init__(self):
"""Validate the configuration"""
Expand Down
52 changes: 52 additions & 0 deletions src/llama_stack_provider_lmeval/inline/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""LMEval Inline Eval Llama Stack provider."""

import logging

from llama_stack.apis.datatypes import Api
from llama_stack.providers.datatypes import ProviderSpec

from llama_stack_provider_lmeval.config import LMEvalEvalProviderConfig

from .lmeval import LMEvalInline

logger = logging.getLogger(__name__)


async def get_provider_impl(
config: LMEvalEvalProviderConfig,
deps: dict[Api, ProviderSpec] | None = None,
) -> LMEvalInline:
"""Get an inline Eval implementation from the configuration.

Args:
config: LMEvalEvalProviderConfig
deps: Optional[dict[Api, Any]] = None - can be ProviderSpec or API instances

Returns:
Configured LMEval Inline implementation

Raises:
Exception: If configuration is invalid
"""
try:
if deps is None:
deps = {}

# Extract base_url from config if available
base_url = None
if hasattr(config, "model_args") and config.model_args:
for arg in config.model_args:
if arg.get("name") == "base_url":
base_url = arg.get("value")
logger.debug("Using base_url from config: %s", base_url)
break

return LMEvalInline(config=config, deps=deps)
except Exception as e:
raise RuntimeError(f"Failed to create LMEval implementation: {str(e)}") from e


__all__ = [
"get_provider_impl",
"LMEvalInline",
]
Loading
Loading