Skip to content

Commit 10f57d8

Browse files
authored
test: add Anthropic LLM plugin integration test (#2424)
* test: add Anthropic LLM integration test * test: enrich test * chore: bump version to 0.2.5
1 parent 0bb44df commit 10f57d8

File tree

3 files changed

+73
-2
lines changed

3 files changed

+73
-2
lines changed

models/anthropic/manifest.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,4 +25,4 @@ resource:
2525
model:
2626
enabled: false
2727
type: plugin
28-
version: 0.2.4
28+
version: 0.2.5

models/anthropic/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,4 @@ dependencies = [
1414

1515
# uv run black . -C -l 100 && uv run ruff check --fix
1616
[dependency-groups]
17-
dev = []
17+
dev = ["pytest>=8.0.0"]
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
import os
2+
from pathlib import Path
3+
4+
import pytest
5+
6+
from dify_plugin.config.integration_config import IntegrationConfig
7+
from dify_plugin.core.entities.plugin.request import (
8+
ModelActions,
9+
ModelInvokeLLMRequest,
10+
PluginInvokeType,
11+
)
12+
from dify_plugin.entities.model import ModelType
13+
from dify_plugin.entities.model.llm import LLMResultChunk
14+
from dify_plugin.integration.run import PluginRunner
15+
16+
17+
def get_all_models() -> list[str]:
18+
"""Discover all model names from models/llm/*.yaml files."""
19+
models_dir = Path(__file__).parent.parent / "models" / "llm"
20+
models: list[str] = []
21+
for yaml_file in models_dir.glob("*.yaml"):
22+
if yaml_file.name.startswith("_"):
23+
continue
24+
model_name = yaml_file.stem
25+
models.append(model_name)
26+
return models
27+
28+
29+
@pytest.mark.parametrize("model_name", get_all_models())
30+
def test_llm_invoke(model_name: str) -> None:
31+
api_key = os.getenv("ANTHROPIC_API_KEY")
32+
if not api_key:
33+
raise ValueError("ANTHROPIC_API_KEY environment variable is required")
34+
35+
plugin_path = os.getenv("PLUGIN_FILE_PATH")
36+
if not plugin_path:
37+
raise ValueError("PLUGIN_FILE_PATH environment variable is required")
38+
39+
payload = ModelInvokeLLMRequest(
40+
user_id="test_user",
41+
provider="anthropic",
42+
model_type=ModelType.LLM,
43+
model=model_name,
44+
credentials={"anthropic_api_key": api_key},
45+
prompt_messages=[{"role": "user", "content": "Say hello in one word."}],
46+
model_parameters={"max_tokens": 100},
47+
stop=None,
48+
tools=None,
49+
stream=True,
50+
)
51+
52+
with PluginRunner(
53+
config=IntegrationConfig(), plugin_package_path=plugin_path
54+
) as runner:
55+
results: list[LLMResultChunk] = []
56+
for result in runner.invoke(
57+
access_type=PluginInvokeType.Model,
58+
access_action=ModelActions.InvokeLLM,
59+
payload=payload,
60+
response_type=LLMResultChunk,
61+
):
62+
results.append(result)
63+
64+
# Verify we received multiple chunks
65+
assert len(results) > 0, f"No results received for model {model_name}"
66+
67+
# Verify concatenated content is non-empty
68+
full_content = "".join(
69+
r.delta.message.content for r in results if r.delta.message.content
70+
)
71+
assert len(full_content) > 0, f"Empty content for model {model_name}"

0 commit comments

Comments
 (0)