Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,11 @@ AZURE_BLOB_CONTAINER_NAME="audio"
# Azure AI Speech
AZURE_AI_SPEECH_API_ENDPOINT="https://<speech-api-name>.cognitiveservices.azure.com/"
AZURE_AI_SPEECH_API_SUBSCRIPTION_KEY="<speech-api-subscription-key>"

# Bing search resource
BING_SUBSCRIPTION_KEY="<bing-subscription-key>"
BING_SEARCH_URL="https://api.bing.microsoft.com/v7.0/search"

# LangSmith
LANGCHAIN_TRACING_V2="true"
LANGCHAIN_API_KEY="<langchain-api-key>"
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -166,3 +166,4 @@ generated/
*.pt
*.jpg
*.jpeg
.chroma
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ Here are the preferred tools for development.
| [9_streamlit_azure_document_intelligence](./apps/9_streamlit_azure_document_intelligence/README.md) | Call Azure AI Document Intelligence API with Streamlit | ![9_streamlit_azure_document_intelligence](./docs/images/9_streamlit_azure_document_intelligence.main.png) |
| [10_streamlit_batch_transcription](./apps/10_streamlit_batch_transcription/README.md) | Call Batch Transcription API with Streamlit | ![10_streamlit_batch_transcription](./docs/images/10_streamlit_batch_transcription.main.png) |
| [11_promptflow](./apps/11_promptflow/README.md) | Get started with Prompt flow | No Image |
| [12_langgraph_agent](./apps/12_langgraph_agent/README.md) | Create agents with LangGraph | No Image |
| [99_streamlit_examples](./apps/99_streamlit_examples/README.md) | Code samples for Streamlit | ![99_streamlit_examples](./docs/images/99_streamlit_examples.explaindata.png) |

## How to run
Expand Down
62 changes: 62 additions & 0 deletions apps/12_langgraph_agent/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# Create agents with LangGraph

This app demonstrates how to implement agents with LangGraph.

## Prerequisites

- Python 3.10 or later
- Azure OpenAI Service

## Overview

**What is [LangGraph](https://langchain-ai.github.io/langgraph/)?**

LangGraph is a library for building stateful, multi-actor applications with LLMs, used to create agent and multi-agent workflows.

This chapter provides a practical example of how to use LangGraph to create an agent that can interact with users and external tools.

## Usage

1. Get Azure OpenAI Service API key
1. Copy [.env.template](../../.env.template) to `.env` in the same directory
1. Set credentials in `.env`
1. Run main.py

```shell
# Create a virtual environment
$ python -m venv .venv

# Activate the virtual environment
$ source .venv/bin/activate

# Install dependencies
$ pip install -r requirements.txt
```

### Examples

#### [reflection_agent](./reflection_agent/main.py)

#### [react_agent](./react_agent/main.py)

#### [advanced_rag_flows](./advanced_rag_flows/main.py)

```shell
# create vector store
python apps/12_langgraph_agent/advanced_rag_flows/ingestion.py

# run main.py
python apps/12_langgraph_agent/advanced_rag_flows/main.py
```

![Advanced RAG Flows](../../docs/images/12_langgraph_agent_graph.png)

## References

- [LangGraph](https://langchain-ai.github.io/langgraph/)
- [Udemy > LangGraph- Develop LLM powered agents with LangGraph](https://www.udemy.com/course/langgraph)
- [emarco177/langgaph-course](https://github.com/emarco177/langgaph-course)
- [Prompt flow > Tracing](https://microsoft.github.io/promptflow/how-to-guides/tracing/index.html)
- [Reflection Agents](https://blog.langchain.dev/reflection-agents/)
- [LangChain > Reflexion](https://langchain-ai.github.io/langgraph/tutorials/reflexion/reflexion/)
- [LangChain > Bing Search](https://python.langchain.com/docs/integrations/tools/bing_search/)
Empty file.
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
from os import getenv

from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableSequence
from langchain_openai import AzureChatOpenAI
from pydantic import BaseModel, Field


class GradeAnswer(BaseModel):

binary_score: bool = Field(description="Answer addresses the question, 'yes' or 'no'")


llm = AzureChatOpenAI(
temperature=0,
api_key=getenv("AZURE_OPENAI_API_KEY"),
api_version=getenv("AZURE_OPENAI_API_VERSION"),
azure_endpoint=getenv("AZURE_OPENAI_ENDPOINT"),
model=getenv("AZURE_OPENAI_GPT_MODEL"),
)

structured_llm_grader = llm.with_structured_output(GradeAnswer)

system = """You are a grader assessing whether an answer addresses / resolves a question \n
Give a binary score 'yes' or 'no'. Yes' means that the answer resolves the question."""
answer_prompt = ChatPromptTemplate.from_messages(
[
("system", system),
("human", "User question: \n\n {question} \n\n LLM generation: {generation}"),
]
)

answer_grader: RunnableSequence = answer_prompt | structured_llm_grader
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from os import getenv

from langchain import hub
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import AzureChatOpenAI

llm = AzureChatOpenAI(
temperature=0,
api_key=getenv("AZURE_OPENAI_API_KEY"),
api_version=getenv("AZURE_OPENAI_API_VERSION"),
azure_endpoint=getenv("AZURE_OPENAI_ENDPOINT"),
model=getenv("AZURE_OPENAI_GPT_MODEL"),
)
prompt = hub.pull("rlm/rag-prompt")

generation_chain = prompt | llm | StrOutputParser()
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
from os import getenv

from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableSequence
from langchain_openai import AzureChatOpenAI
from pydantic import BaseModel, Field

llm = AzureChatOpenAI(
temperature=0,
api_key=getenv("AZURE_OPENAI_API_KEY"),
api_version=getenv("AZURE_OPENAI_API_VERSION"),
azure_endpoint=getenv("AZURE_OPENAI_ENDPOINT"),
model=getenv("AZURE_OPENAI_GPT_MODEL"),
)


class GradeHallucinations(BaseModel):
"""Binary score for hallucination present in generation answer."""

binary_score: bool = Field(description="Answer is grounded in the facts, 'yes' or 'no'")


structured_llm_grader = llm.with_structured_output(GradeHallucinations)

system = """You are a grader assessing whether an LLM generation is grounded in / supported by a set of retrieved facts. \n
Give a binary score 'yes' or 'no'. 'Yes' means that the answer is grounded in / supported by the set of facts.""" # noqa
hallucination_prompt = ChatPromptTemplate.from_messages(
[
("system", system),
("human", "Set of facts: \n\n {documents} \n\n LLM generation: {generation}"),
]
)

hallucination_grader: RunnableSequence = hallucination_prompt | structured_llm_grader
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
from os import getenv

from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import AzureChatOpenAI
from pydantic import BaseModel, Field

llm = AzureChatOpenAI(
temperature=0,
api_key=getenv("AZURE_OPENAI_API_KEY"),
api_version=getenv("AZURE_OPENAI_API_VERSION"),
azure_endpoint=getenv("AZURE_OPENAI_ENDPOINT"),
model=getenv("AZURE_OPENAI_GPT_MODEL"),
)


class GradeDocuments(BaseModel):
"""Binary score for relevance check on retrieved documents."""

binary_score: str = Field(description="Documents are relevant to the question, 'yes' or 'no'")


structured_llm_grader = llm.with_structured_output(GradeDocuments)

system = """You are a grader assessing relevance of a retrieved document to a user question. \n
If the document contains keyword(s) or semantic meaning related to the question, grade it as relevant. \n
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question."""
grade_prompt = ChatPromptTemplate.from_messages(
[
("system", system),
("human", "Retrieved document: \n\n {document} \n\n User question: {question}"),
]
)

retrieval_grader = grade_prompt | structured_llm_grader
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from os import getenv
from typing import Literal

from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import AzureChatOpenAI
from pydantic import BaseModel, Field


class RouteQuery(BaseModel):
"""Route a user query to the most relevant datasource."""

datasource: Literal["vectorstore", "websearch"] = Field(
...,
description="Given a user question choose to route it to web search or a vectorstore.",
)


llm = AzureChatOpenAI(
temperature=0,
api_key=getenv("AZURE_OPENAI_API_KEY"),
api_version=getenv("AZURE_OPENAI_API_VERSION"),
azure_endpoint=getenv("AZURE_OPENAI_ENDPOINT"),
model=getenv("AZURE_OPENAI_GPT_MODEL"),
)
structured_llm_router = llm.with_structured_output(RouteQuery)

system = """You are an expert at routing a user question to a vectorstore or web search.
The vectorstore contains documents related to agents, prompt engineering, and adversarial attacks.
Use the vectorstore for questions on these topics. For all else, use web-search."""
route_prompt = ChatPromptTemplate.from_messages(
[
("system", system),
("human", "{question}"),
]
)

question_router = route_prompt | structured_llm_router
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
from pprint import pprint

from dotenv import load_dotenv
from ingestion import retriever

from graph.chains.generation import generation_chain
from graph.chains.hallucination_grader import GradeHallucinations, hallucination_grader
from graph.chains.retrieval_grader import GradeDocuments, retrieval_grader
from graph.chains.router import RouteQuery, question_router

load_dotenv()


def test_generation_chain() -> None:
question = "agent memory"
docs = retriever.invoke(question)
generation = generation_chain.invoke({"context": docs, "question": question})
pprint(generation)


def test_retrival_grader_answer_yes() -> None:
question = "agent memory"
docs = retriever.invoke(question)
doc_txt = docs[1].page_content

res: GradeDocuments = retrieval_grader.invoke({"question": question, "document": doc_txt})

assert res.binary_score == "yes"


def test_retrival_grader_answer_no() -> None:
question = "agent memory"
docs = retriever.invoke(question)
doc_txt = docs[1].page_content

res: GradeDocuments = retrieval_grader.invoke({"question": "how to make pizaa", "document": doc_txt})

assert res.binary_score == "no"


def test_hallucination_grader_answer_yes() -> None:
question = "agent memory"
docs = retriever.invoke(question)

generation = generation_chain.invoke({"context": docs, "question": question})
res: GradeHallucinations = hallucination_grader.invoke({"documents": docs, "generation": generation})
assert res.binary_score


def test_hallucination_grader_answer_no() -> None:
question = "agent memory"
docs = retriever.invoke(question)

res: GradeHallucinations = hallucination_grader.invoke(
{
"documents": docs,
"generation": "In order to make pizza we need to first start with the dough",
}
)
assert not res.binary_score


def test_router_to_vectorstore() -> None:
question = "agent memory"

res: RouteQuery = question_router.invoke({"question": question})
assert res.datasource == "vectorstore"


def test_router_to_websearch() -> None:
question = "how to make pizza"

res: RouteQuery = question_router.invoke({"question": question})
assert res.datasource == "websearch"
4 changes: 4 additions & 0 deletions apps/12_langgraph_agent/advanced_rag_flows/graph/consts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
RETRIEVE = "retrieve"
GRADE_DOCUMENTS = "grade_documents"
GENERATE = "generate"
WEBSEARCH = "websearch"
Loading