|
| 1 | +import argparse |
| 2 | +import logging |
| 3 | +from enum import Enum |
| 4 | +from os import getenv |
| 5 | +from pathlib import Path |
| 6 | + |
| 7 | +from dotenv import load_dotenv |
| 8 | +from promptflow.client import load_flow |
| 9 | +from promptflow.core import AzureOpenAIModelConfiguration |
| 10 | +from promptflow.evals.evaluate import evaluate |
| 11 | +from promptflow.evals.evaluators import RelevanceEvaluator |
| 12 | + |
| 13 | +BASE_DIR = Path(__file__).absolute().parent |
| 14 | + |
| 15 | + |
| 16 | +class EvaluatorType(Enum): |
| 17 | + RELEVANCE = "relevance" |
| 18 | + ANSWER_LENGTH = "answer_length" |
| 19 | + APOLOGY = "apology" |
| 20 | + DATASET = "dataset" |
| 21 | + |
| 22 | + |
| 23 | +def init_args() -> argparse.Namespace: |
| 24 | + parser = argparse.ArgumentParser( |
| 25 | + prog="run_evaluators", |
| 26 | + description="Evaluate with the prompt flow SDK", |
| 27 | + ) |
| 28 | + parser.add_argument( |
| 29 | + "-t", |
| 30 | + "--type", |
| 31 | + default=EvaluatorType.RELEVANCE.value, |
| 32 | + choices=[t.value for t in EvaluatorType], |
| 33 | + help="Evaluator type", |
| 34 | + ) |
| 35 | + parser.add_argument( |
| 36 | + "-v", |
| 37 | + "--verbose", |
| 38 | + action="store_true", |
| 39 | + help="Enable verbose mode", |
| 40 | + ) |
| 41 | + return parser.parse_args() |
| 42 | + |
| 43 | + |
| 44 | +def run_relevance_evaluator(model_config): |
| 45 | + relevance_eval = RelevanceEvaluator(model_config) |
| 46 | + |
| 47 | + relevance_score = relevance_eval( |
| 48 | + answer="The Alpine Explorer Tent is the most waterproof.", |
| 49 | + context="From the our product list," |
| 50 | + " the alpine explorer tent is the most waterproof." |
| 51 | + " The Adventure Dining Table has higher weight.", |
| 52 | + question="Which tent is the most waterproof?", |
| 53 | + ) |
| 54 | + |
| 55 | + print(relevance_score) |
| 56 | + |
| 57 | + |
| 58 | +class AnswerLengthEvaluator: |
| 59 | + def __init__(self): |
| 60 | + pass |
| 61 | + |
| 62 | + def __call__(self, *, answer: str, **kwargs): |
| 63 | + return {"answer_length": len(answer)} |
| 64 | + |
| 65 | + |
| 66 | +def run_answer_length_evaluator(): |
| 67 | + evaluator = AnswerLengthEvaluator() |
| 68 | + answer_length = evaluator(answer="What is the speed of light?") |
| 69 | + print(answer_length) |
| 70 | + |
| 71 | + |
| 72 | +def get_apology_evaluator(model_config): |
| 73 | + return load_flow( |
| 74 | + source=f"{BASE_DIR}/apology.prompty", |
| 75 | + model={"configuration": model_config}, |
| 76 | + ) |
| 77 | + |
| 78 | + |
| 79 | +def run_apology_evaluator(model_config): |
| 80 | + apology_eval = get_apology_evaluator(model_config) |
| 81 | + |
| 82 | + # load apology evaluator from prompty file using promptflow |
| 83 | + apology_score = apology_eval( |
| 84 | + question="Where can I get my car fixed?", |
| 85 | + answer="I'm sorry, I don't know that. Would you like me to look it up for you? Sorry for the inconvenience.", |
| 86 | + ) |
| 87 | + print(apology_score) |
| 88 | + |
| 89 | + |
| 90 | +def run_test_dataset(model_config): |
| 91 | + result = evaluate( |
| 92 | + data=f"{BASE_DIR}/data.jsonl", # provide your data here |
| 93 | + evaluators={ |
| 94 | + EvaluatorType.RELEVANCE.value: RelevanceEvaluator(model_config), |
| 95 | + EvaluatorType.ANSWER_LENGTH.value: AnswerLengthEvaluator(), |
| 96 | + EvaluatorType.APOLOGY.value: get_apology_evaluator(model_config), |
| 97 | + }, |
| 98 | + # column mapping |
| 99 | + evaluator_config={ |
| 100 | + "default": {"ground_truth": "${data.ground_truth}"}, |
| 101 | + }, |
| 102 | + # Optionally provide an output path to dump a json of metric summary, row level data and metric and studio URL |
| 103 | + output_path=f"{BASE_DIR}/results.json", |
| 104 | + ) |
| 105 | + print(result) |
| 106 | + |
| 107 | + |
| 108 | +if __name__ == "__main__": |
| 109 | + args = init_args() |
| 110 | + |
| 111 | + # Set verbose mode |
| 112 | + if args.verbose: |
| 113 | + logging.basicConfig(level=logging.DEBUG) |
| 114 | + |
| 115 | + load_dotenv() |
| 116 | + |
| 117 | + model_config = AzureOpenAIModelConfiguration( |
| 118 | + azure_endpoint=getenv("AZURE_OPENAI_ENDPOINT"), |
| 119 | + api_key=getenv("AZURE_OPENAI_API_KEY"), |
| 120 | + azure_deployment=getenv("AZURE_OPENAI_GPT_MODEL"), |
| 121 | + api_version=getenv("AZURE_OPENAI_API_VERSION"), |
| 122 | + ) |
| 123 | + |
| 124 | + if args.type == EvaluatorType.RELEVANCE.value: |
| 125 | + run_relevance_evaluator(model_config) |
| 126 | + elif args.type == EvaluatorType.ANSWER_LENGTH.value: |
| 127 | + run_answer_length_evaluator() |
| 128 | + elif args.type == EvaluatorType.APOLOGY.value: |
| 129 | + run_apology_evaluator(model_config) |
| 130 | + elif args.type == EvaluatorType.DATASET.value: |
| 131 | + run_test_dataset(model_config) |
| 132 | + else: |
| 133 | + print(f"Invalid evaluator type {args.type}") |
| 134 | + print(f"Please choose from {', '.join([t.value for t in EvaluatorType])}") |
| 135 | + exit(1) |
0 commit comments