|
14 | 14 | "target_url": "目标URL", |
15 | 15 | "target_url_placeholder": "https://example.com", |
16 | 16 | "target_url_info": "要测试的网站URL", |
17 | | - "llm_config": "🤖 LLM配置", |
| 17 | + "llm_config": "🤖 模型配置", |
18 | 18 | "model_name": "模型名称", |
19 | | - "model_name_info": "使用的语言模型 (OPENAI SDK 兼容格式)", |
| 19 | + "model_name_info": "使用的视觉模型 (OPENAI SDK 兼容格式)", |
20 | 20 | "api_key": "API Key", |
21 | | - "api_key_info": "LLM服务的API密钥", |
| 21 | + "api_key_info": "模型 API Key", |
22 | 22 | "base_url": "Base URL", |
23 | | - "base_url_info": "LLM服务的基础URL", |
| 23 | + "base_url_info": "模型 Base URL", |
24 | 24 | "report_language": "🌐 报告语言", |
25 | 25 | "report_language_label": "选择报告语言", |
26 | 26 | "report_language_info": "选择测试报告和日志的显示语言", |
|
30 | 30 | "function_test_type_info": "default: 遍历测试,覆盖可点击元素和所有链接\n ai: 基于视觉模型的智能测试,能够模拟真实用户行为、理解业务上下文,验证网页功能。", |
31 | 31 | "business_objectives": "AI功能测试业务目标", |
32 | 32 | "business_objectives_placeholder": "测试对话功能", |
33 | | - "business_objectives_info": "ai: 定制不同场景,精准发现复杂功能问题。留空将使用默认设置(生成1个测试用例,每个用例包含6个步骤以内)", |
| 33 | + "business_objectives_info": "ai: 定制不同场景,精准发现复杂功能问题。留空将使用默认设置。", |
34 | 34 | "default_business_objectives": "生成1个测试用例,每个用例包含6个步骤以内", |
35 | 35 | "ux_test": "用户体验测试", |
36 | 36 | "performance_test": "性能测试", |
37 | | - "performance_test_info": "目前在 ModelScope 版本不可用;请前往 GitHub 体验", |
| 37 | + "performance_test_info": "Gradio 版本不可用;请前往 GitHub 体验", |
38 | 38 | "security_test": "安全测试", |
39 | | - "security_test_info": "目前在 ModelScope 版本不可用;请前往 GitHub 体验", |
| 39 | + "security_test_info": "Gradio 版本不可用;请前往 GitHub 体验", |
40 | 40 | "submit_btn": "🚀 提交测试", |
41 | 41 | "submit_btn_submitting": "🚀 提交中...", |
42 | 42 | "submit_result": "📄 任务提交结果" |
|
104 | 104 | "target_url": "Target URL", |
105 | 105 | "target_url_placeholder": "https://example.com", |
106 | 106 | "target_url_info": "Website URL to test", |
107 | | - "llm_config": "🤖 LLM Configuration", |
| 107 | + "llm_config": "🤖 Model Configuration", |
108 | 108 | "model_name": "Model Name", |
109 | | - "model_name_info": "Language model to use (OPENAI SDK compatible format)", |
| 109 | + "model_name_info": "Vision model to use (OPENAI SDK compatible format)", |
110 | 110 | "api_key": "API Key", |
111 | | - "api_key_info": "API key for LLM service", |
| 111 | + "api_key_info": "API key for model service", |
112 | 112 | "base_url": "Base URL", |
113 | | - "base_url_info": "Base URL for LLM service", |
| 113 | + "base_url_info": "Base URL for model service", |
114 | 114 | "report_language": "🌐 Report Language", |
115 | 115 | "report_language_label": "Select Report Language", |
116 | 116 | "report_language_info": "Choose display language for test reports and logs", |
|
120 | 120 | "function_test_type_info": "default: Traverse clickable elements & links.\n ai: Vision-model intelligent test simulating users & validating functionality.", |
121 | 121 | "business_objectives": "AI Function Test Business Objectives", |
122 | 122 | "business_objectives_placeholder": "Test chat functionality", |
123 | | - "business_objectives_info": "ai: Customize different scenarios, accurately find complex functional issues. Leave blank to use default settings (generate 1 test case with no more than 6 steps per case)", |
| 123 | + "business_objectives_info": "ai: Customize different scenarios, accurately find complex functional issues. Leave blank to use default settings.", |
124 | 124 | "default_business_objectives": "Generate 1 test case with no more than 6 steps per case", |
125 | 125 | "ux_test": "User Experience Test", |
126 | 126 | "performance_test": "Performance Test", |
127 | | - "performance_test_info": "Currently unavailable in HuggingFace version; please visit GitHub for experience", |
| 127 | + "performance_test_info": "Gradio version is not available; please visit GitHub for experience", |
128 | 128 | "security_test": "Security Test", |
129 | | - "security_test_info": "Currently unavailable in Huggingface version; please visit GitHub for experience", |
| 129 | + "security_test_info": "Gradio version is not available; please visit GitHub for experience", |
130 | 130 | "submit_btn": "🚀 Submit Test", |
131 | 131 | "submit_btn_submitting": "🚀 Submitting...", |
132 | 132 | "submit_result": "📄 Task Submission Result" |
|
0 commit comments