|
| 1 | +import logging |
| 2 | +import os |
| 3 | +from logging import basicConfig |
| 4 | + |
| 5 | +import mlflow |
| 6 | +import typer |
| 7 | +from dotenv import load_dotenv |
| 8 | +from langchain_core.messages import HumanMessage, SystemMessage |
| 9 | +from mlflow.genai import scorer |
| 10 | +from mlflow.genai.scorers import Correctness, Guidelines |
| 11 | + |
| 12 | +from template_langgraph.agents.demo_agents.weather_agent import graph |
| 13 | +from template_langgraph.llms.azure_openais import AzureOpenAiWrapper, Settings |
| 14 | +from template_langgraph.loggers import get_logger |
| 15 | + |
| 16 | +app = typer.Typer( |
| 17 | + add_completion=False, |
| 18 | + help="MLflow operator CLI", |
| 19 | +) |
| 20 | +logger = get_logger(__name__) |
| 21 | + |
| 22 | + |
| 23 | +def set_verbose_logging(verbose: bool): |
| 24 | + if verbose: |
| 25 | + logger.setLevel(logging.DEBUG) |
| 26 | + basicConfig(level=logging.DEBUG) |
| 27 | + |
| 28 | + |
| 29 | +@app.command( |
| 30 | + help="Run the LangGraph agent with MLflow tracing ref. https://mlflow.org/docs/2.21.3/tracing/integrations/langgraph" |
| 31 | +) |
| 32 | +def tracing( |
| 33 | + query: str = typer.Option( |
| 34 | + "What is the weather like in Japan?", |
| 35 | + "--query", |
| 36 | + "-q", |
| 37 | + help="Query to run with the LangGraph agent", |
| 38 | + ), |
| 39 | + experiment_name: str = typer.Option( |
| 40 | + "LangGraph Experiment", |
| 41 | + "--experiment-name", |
| 42 | + "-e", |
| 43 | + help="MLflow experiment name", |
| 44 | + ), |
| 45 | + tracking_uri: str = typer.Option( |
| 46 | + "http://localhost:5001", |
| 47 | + "--tracking-uri", |
| 48 | + "-t", |
| 49 | + help="MLflow tracking URI", |
| 50 | + ), |
| 51 | + verbose: bool = typer.Option( |
| 52 | + True, |
| 53 | + "--verbose", |
| 54 | + "-v", |
| 55 | + help="Enable verbose output", |
| 56 | + ), |
| 57 | +): |
| 58 | + set_verbose_logging(verbose) |
| 59 | + logger.info("Running...") |
| 60 | + |
| 61 | + mlflow.langchain.autolog() |
| 62 | + mlflow.set_tracking_uri(tracking_uri) |
| 63 | + mlflow.set_experiment(experiment_name) |
| 64 | + |
| 65 | + result = graph.invoke( |
| 66 | + { |
| 67 | + "messages": [ |
| 68 | + HumanMessage(content=query), |
| 69 | + ] |
| 70 | + }, |
| 71 | + ) |
| 72 | + logger.info(f"Result: {result}") |
| 73 | + |
| 74 | + # Get the trace object just created |
| 75 | + trace = mlflow.get_trace( |
| 76 | + trace_id=mlflow.get_last_active_trace_id(), |
| 77 | + ) |
| 78 | + logger.info(f"Trace info: {trace.info.token_usage}") |
| 79 | + |
| 80 | + |
| 81 | +@app.command( |
| 82 | + help="Evaluate the LangGraph agent with MLflow tracing ref. https://mlflow.org/docs/latest/genai/eval-monitor/quickstart/" |
| 83 | +) |
| 84 | +def evaluate( |
| 85 | + experiment_name: str = typer.Option( |
| 86 | + "LangGraph Experiment", |
| 87 | + "--experiment-name", |
| 88 | + "-e", |
| 89 | + help="MLflow experiment name", |
| 90 | + ), |
| 91 | + tracking_uri: str = typer.Option( |
| 92 | + "http://localhost:5001", |
| 93 | + "--tracking-uri", |
| 94 | + "-t", |
| 95 | + help="MLflow tracking URI", |
| 96 | + ), |
| 97 | + verbose: bool = typer.Option( |
| 98 | + True, |
| 99 | + "--verbose", |
| 100 | + "-v", |
| 101 | + help="Enable verbose output", |
| 102 | + ), |
| 103 | +): |
| 104 | + set_verbose_logging(verbose) |
| 105 | + logger.info("Running...") |
| 106 | + |
| 107 | + mlflow.langchain.autolog() |
| 108 | + mlflow.set_tracking_uri(tracking_uri) |
| 109 | + mlflow.set_experiment(experiment_name) |
| 110 | + |
| 111 | + llm = AzureOpenAiWrapper().chat_model |
| 112 | + |
| 113 | + def qa_predict_fn(question: str) -> str: |
| 114 | + """Simple Q&A prediction function using OpenAI""" |
| 115 | + response = llm.invoke( |
| 116 | + [ |
| 117 | + SystemMessage(content="You are a helpful assistant. Answer questions concisely."), |
| 118 | + HumanMessage(content=question), |
| 119 | + ] |
| 120 | + ) |
| 121 | + return response.content.__str__() |
| 122 | + |
| 123 | + @scorer |
| 124 | + def is_concise(outputs: str) -> bool: |
| 125 | + """Evaluate if the answer is concise (less than 5 words)""" |
| 126 | + return len(outputs.split()) <= 5 |
| 127 | + |
| 128 | + # To configure LiteLLM for Azure OpenAI ref. https://docs.litellm.ai/docs/providers/azure/ |
| 129 | + settings = Settings() |
| 130 | + |
| 131 | + os.environ["AZURE_API_KEY"] = settings.azure_openai_api_key |
| 132 | + os.environ["AZURE_API_BASE"] = settings.azure_openai_endpoint |
| 133 | + os.environ["AZURE_API_VERSION"] = settings.azure_openai_api_version |
| 134 | + os.environ["AZURE_API_TYPE"] = "azure" |
| 135 | + |
| 136 | + model = f"azure:/{settings.azure_openai_model_chat}" |
| 137 | + results = mlflow.genai.evaluate( |
| 138 | + data=[ |
| 139 | + { |
| 140 | + "inputs": {"question": "What is the capital of France?"}, |
| 141 | + "expectations": {"expected_response": "Paris"}, |
| 142 | + }, |
| 143 | + { |
| 144 | + "inputs": {"question": "Who was the first person to build an airplane?"}, |
| 145 | + "expectations": {"expected_response": "Wright Brothers"}, |
| 146 | + }, |
| 147 | + { |
| 148 | + "inputs": {"question": "Who wrote Romeo and Juliet?"}, |
| 149 | + "expectations": {"expected_response": "William Shakespeare"}, |
| 150 | + }, |
| 151 | + ], |
| 152 | + predict_fn=qa_predict_fn, |
| 153 | + scorers=[ |
| 154 | + Correctness(model=model), |
| 155 | + Guidelines( |
| 156 | + model=model, |
| 157 | + name="is_english", |
| 158 | + guidelines="The answer must be in English", |
| 159 | + ), |
| 160 | + is_concise, |
| 161 | + ], |
| 162 | + ) |
| 163 | + logger.info(f"Evaluation results: {results}") |
| 164 | + |
| 165 | + |
| 166 | +if __name__ == "__main__": |
| 167 | + load_dotenv( |
| 168 | + override=True, |
| 169 | + verbose=True, |
| 170 | + ) |
| 171 | + app() |
0 commit comments