Skip to content

Commit 4bc91b8

Browse files
authored
Merge pull request #111 from ks6088ts-labs/feature/issue-109_promptflow-evaluation
add tracing feature to evaluator script
2 parents 43beadb + c2d8f8b commit 4bc91b8

File tree

1 file changed

+14
-0
lines changed
  • apps/11_promptflow/evaluators

1 file changed

+14
-0
lines changed

apps/11_promptflow/evaluators/main.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from promptflow.core import AzureOpenAIModelConfiguration
1010
from promptflow.evals.evaluate import evaluate
1111
from promptflow.evals.evaluators import RelevanceEvaluator
12+
from promptflow.tracing import start_trace, trace
1213

1314
BASE_DIR = Path(__file__).absolute().parent
1415

@@ -32,6 +33,12 @@ def init_args() -> argparse.Namespace:
3233
choices=[t.value for t in EvaluatorType],
3334
help="Evaluator type",
3435
)
36+
parser.add_argument(
37+
"-r",
38+
"--trace",
39+
action="store_true",
40+
help="Enable tracing",
41+
)
3542
parser.add_argument(
3643
"-v",
3744
"--verbose",
@@ -41,6 +48,7 @@ def init_args() -> argparse.Namespace:
4148
return parser.parse_args()
4249

4350

51+
@trace
4452
def run_relevance_evaluator(model_config):
4553
relevance_eval = RelevanceEvaluator(model_config)
4654

@@ -63,6 +71,7 @@ def __call__(self, *, answer: str, **kwargs):
6371
return {"answer_length": len(answer)}
6472

6573

74+
@trace
6675
def run_answer_length_evaluator():
6776
evaluator = AnswerLengthEvaluator()
6877
answer_length = evaluator(answer="What is the speed of light?")
@@ -76,6 +85,7 @@ def get_apology_evaluator(model_config):
7685
)
7786

7887

88+
@trace
7989
def run_apology_evaluator(model_config):
8090
apology_eval = get_apology_evaluator(model_config)
8191

@@ -87,6 +97,7 @@ def run_apology_evaluator(model_config):
8797
print(apology_score)
8898

8999

100+
@trace
90101
def run_test_dataset(model_config):
91102
result = evaluate(
92103
data=f"{BASE_DIR}/data.jsonl", # provide your data here
@@ -112,6 +123,9 @@ def run_test_dataset(model_config):
112123
if args.verbose:
113124
logging.basicConfig(level=logging.DEBUG)
114125

126+
if args.trace:
127+
start_trace()
128+
115129
load_dotenv()
116130

117131
model_config = AzureOpenAIModelConfiguration(

0 commit comments

Comments
 (0)