Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 37 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,40 @@ pip install dingo-python

## Example Use Cases

### 1. Evaluate Local Text File (Plaintext)
### 1. Using Evaluate Core

```python
from dingo.config.config import DynamicLLMConfig
from dingo.io.input.MetaData import MetaData
from dingo.model.llm.detect_text_quality_detail import DetectTextQualityDetail
from dingo.model.prompt.prompt_common import PromptRepeat
from dingo.model.rule.rule_common import RuleEnterAndSpace

def llm():
DetectTextQualityDetail.prompt = PromptRepeat()
DetectTextQualityDetail.dynamic_config = DynamicLLMConfig(
key='',
api_url='',
# model='',
)
res = DetectTextQualityDetail.call_api(MetaData(
data_id='123',
prompt="hello, introduce the world",
content="Hello! The world is a vast and diverse place, full of wonders, cultures, and incredible natural beauty."
))
print(res)

def rule():
data = MetaData(
data_id='123',
prompt="hello, introduce the world",
content="Hello! The world is a vast and diverse place, full of wonders, cultures, and incredible natural beauty."
)
res = RuleEnterAndSpace().eval(data)
print(res)
```

### 2. Evaluate Local Text File (Plaintext)

```python
from dingo.io import InputArgs
Expand All @@ -72,7 +105,7 @@ result = executor.execute()
print(result)
```

### 2. Evaluate Hugging Face Dataset
### 3. Evaluate Hugging Face Dataset

```python
from dingo.io import InputArgs
Expand All @@ -92,7 +125,7 @@ result = executor.execute()
print(result)
```

### 3. Evaluate JSON/JSONL Format
### 4. Evaluate JSON/JSONL Format

```python
from dingo.io import InputArgs
Expand All @@ -114,7 +147,7 @@ result = executor.execute()
print(result)
```

### 4. Using LLM for Evaluation
### 5. Using LLM for Evaluation

```python
from dingo.io import InputArgs
Expand Down
41 changes: 37 additions & 4 deletions README_zh-CN.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,40 @@ pip install dingo-python

## 2. 使用示例

### 2.1 评估本地文本文件(纯文本)
### 2.1 使用评估核心方法

```python
from dingo.config.config import DynamicLLMConfig
from dingo.io.input.MetaData import MetaData
from dingo.model.llm.detect_text_quality_detail import DetectTextQualityDetail
from dingo.model.prompt.prompt_common import PromptRepeat
from dingo.model.rule.rule_common import RuleEnterAndSpace

def llm():
DetectTextQualityDetail.prompt = PromptRepeat()
DetectTextQualityDetail.dynamic_config = DynamicLLMConfig(
key='',
api_url='',
# model='',
)
res = DetectTextQualityDetail.call_api(MetaData(
data_id='123',
prompt="hello, introduce the world",
content="Hello! The world is a vast and diverse place, full of wonders, cultures, and incredible natural beauty."
))
print(res)

def rule():
data = MetaData(
data_id='123',
prompt="hello, introduce the world",
content="Hello! The world is a vast and diverse place, full of wonders, cultures, and incredible natural beauty."
)
res = RuleEnterAndSpace().eval(data)
print(res)
```

### 2.2 评估本地文本文件(纯文本)

```python
from dingo.io import InputArgs
Expand All @@ -61,7 +94,7 @@ result = executor.execute()
print(result)
```

### 2.2 评估Hugging Face数据集
### 2.3 评估Hugging Face数据集

```python
from dingo.io import InputArgs
Expand All @@ -81,7 +114,7 @@ result = executor.execute()
print(result)
```

### 2.3 评估JSON/JSONL格式
### 2.4 评估JSON/JSONL格式

```python
from dingo.io import InputArgs
Expand All @@ -103,7 +136,7 @@ result = executor.execute()
print(result)
```

### 2.4 使用LLM进行评估
### 2.5 使用LLM进行评估

```python
from dingo.io import InputArgs
Expand Down
14 changes: 14 additions & 0 deletions dingo/model/llm/detect_text_quality_detail.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,12 @@
import json

from dingo.config.config import DynamicLLMConfig
from dingo.io.input.MetaData import MetaData
from dingo.model import Model
from dingo.model.llm.base_openai import BaseOpenAI
from dingo.model.modelres import ModelRes
from dingo.model.prompt.base import BasePrompt
from dingo.model.prompt.prompt_common import PromptRepeat
from dingo.model.response.response_class import ResponseScoreTypeNameReason
from dingo.utils import log
from dingo.utils.exception import ConvertJsonError
Expand Down Expand Up @@ -38,3 +42,13 @@ def process_response(cls, response: str) -> ModelRes:
result.reason = [response_model.reason]

return result

if __name__ == "__main__":
DetectTextQualityDetail.prompt = PromptRepeat()
DetectTextQualityDetail.dynamic_config = DynamicLLMConfig(
key='',
api_url='',
# model='',
)
res = DetectTextQualityDetail.call_api(MetaData(data_id='123', content="hello, introduce the world"))
print(res)
33 changes: 33 additions & 0 deletions examples/core/score.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
from dingo.config.config import DynamicLLMConfig
from dingo.io.input.MetaData import MetaData
from dingo.model.llm.detect_text_quality_detail import DetectTextQualityDetail
from dingo.model.prompt.prompt_common import PromptRepeat
from dingo.model.rule.rule_common import RuleEnterAndSpace


def llm():
DetectTextQualityDetail.prompt = PromptRepeat()
DetectTextQualityDetail.dynamic_config = DynamicLLMConfig(
key='',
api_url='',
# model='',
)
res = DetectTextQualityDetail.call_api(MetaData(
data_id='123',
prompt="hello, introduce the world",
content="Hello! The world is a vast and diverse place, full of wonders, cultures, and incredible natural beauty."
))
print(res)

def rule():
data = MetaData(
data_id='123',
prompt="hello, introduce the world",
content="Hello! The world is a vast and diverse place, full of wonders, cultures, and incredible natural beauty."
)
res = RuleEnterAndSpace().eval(data)
print(res)

if __name__ == "__main__":
llm()
rule()