# Configuration file for managing API keys as environment variables
from dotenv import load_dotenv
# Load API key information
load_dotenv()
True
# Set up LangSmith tracking. https://smith.langchain.com
# !pip install -qU langchain-teddynote
from langchain_teddynote import logging
# Enter a project name.
logging.langsmith("CH17-LangGraph-Structures")
from rag.pdf import PDFRetrievalChain
# Load a PDF document.
pdf = PDFRetrievalChain(["data/SPRI_AI_Brief_2023년12월호_F.pdf"]).create_chain()
# Create a retriever and a chain.
pdf_retriever = pdf.retriever
pdf_chain = pdf.chain
from langchain_core.tools.retriever import create_retriever_tool
from langchain_core.prompts import PromptTemplate
# Create a search tool based on a PDF document
retriever_tool = create_retriever_tool(
pdf_retriever,
"pdf_retriever",
"Search and return information about SPRI AI Brief PDF file. It contains useful information on recent AI trends. The document is published on Dec 2023.",
document_prompt=PromptTemplate.from_template(
"<document><context>{page_content}</context><metadata><source>{source}</source><page>{page}</page></metadata></document>"
),
)
# Add the generated search tool to the tools list to make it available to the agent.
tools = [retriever_tool]
from typing import Annotated, Sequence, TypedDict
from langchain_core.messages import BaseMessage
from langgraph.graph.message import add_messages
# A type dictionary that defines the agent state, manages message sequences, and defines additional behavior.
class AgentState(TypedDict):
# add_messages reducer Managing message sequences using functions
messages: Annotated[Sequence[BaseMessage], add_messages]
from typing import Literal
from langchain import hub
from langchain_core.messages import HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from pydantic import BaseModel, Field
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import tools_condition
from langchain_teddynote.models import get_model_name, LLMs
# Get the latest model name
MODEL_NAME = get_model_name(LLMs.GPT4)
# Defining a data model
class grade(BaseModel):
"""A binary score for relevance checks"""
binary_score: str = Field(
description="Response 'yes' if the document is relevant to the question or 'no' if it is not."
)
def grade_documents(state) -> Literal["generate", "rewrite"]:
# Initialize LLM model
model = ChatOpenAI(temperature=0, model=MODEL_NAME, streaming=True)
# Setting up LLM for structured output
llm_with_tool = model.with_structured_output(grade)
# Defining a prompt template
prompt = PromptTemplate(
template="""You are a grader assessing relevance of a retrieved document to a user question. \n
Here is the retrieved document: \n\n {context} \n\n
Here is the user question: {question} \n
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant. \n
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.""",
input_variables=["context", "question"],
)
# Generate llm + tool binding chain
chain = prompt | llm_with_tool
# Extract message from current state
messages = state["messages"]
# Extract the last message
last_message = messages[-1]
# Extract original question
question = messages[0].content
# Extract searched documents
retrieved_docs = last_message.content
# Run a relevance assessment
scored_result = chain.invoke({"question": question, "context": retrieved_docs})
# Extract relevance
score = scored_result.binary_score
# Decision based on relevance
if score == "yes":
print("==== [DECISION: DOCS RELEVANT] ====")
return "generate"
else:
print("==== [DECISION: DOCS NOT RELEVANT] ====")
print(score)
return "rewrite"
def agent(state):
# Extract message from current state
messages = state["messages"]
# Initialize LLM model
model = ChatOpenAI(temperature=0, streaming=True, model=MODEL_NAME)
# retriever tool binding
model = model.bind_tools(tools)
# Generate agent response
response = model.invoke(messages)
# It is returned in list form as it is added to the existing list.
return {"messages": [response]}
def rewrite(state):
print("==== [QUERY REWRITE] ====")
# Extract message from current state
messages = state["messages"]
# Extract original question
question = messages[0].content
# Configuring prompts to improve questions
msg = [
HumanMessage(
content=f""" \n
Look at the input and try to reason about the underlying semantic intent / meaning. \n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Formulate an improved question: """,
)
]
# Improve your questions with the LLM model
model = ChatOpenAI(temperature=0, model=MODEL_NAME, streaming=True)
# Executing Query-Transform chain
response = model.invoke(msg)
# Return rewritten question
return {"messages": [response]}
def generate(state):
# Extract message from current state
messages = state["messages"]
# Extract original question
question = messages[0].content
# Extract the last message
docs = messages[-1].content
# Import RAG prompt template
prompt = hub.pull("teddynote/rag-prompt")
# Initialize LLM model
llm = ChatOpenAI(model_name=MODEL_NAME, temperature=0, streaming=True)
# RAG chain configuration
rag_chain = prompt | llm | StrOutputParser()
# Run Generate Answer
response = rag_chain.invoke({"context": docs, "question": question})
return {"messages": [response]}
# Importing graph and tool node components from the LangGraph library
from langgraph.graph import END, StateGraph, START
from langgraph.prebuilt import ToolNode
# Initializing AgentState-based state graph workflow
workflow = StateGraph(AgentState)
# Defining and adding circular nodes within a workflow
workflow.add_node("agent", agent) # 에이전트 노드
retrieve = ToolNode([retriever_tool])
workflow.add_node("retrieve", retrieve) # 검색 노드
workflow.add_node("rewrite", rewrite) # I'm a little nervous
workflow.add_node("generate", generate) # Generate response node after checking related documents
# Connecting from a starting point to an agent node
workflow.add_edge(START, "agent")
# Add conditional edges to determine whether to search
workflow.add_conditional_edges(
"agent",
# Agent Decision Evaluation
tools_condition,
{
# Map conditional outputs to graph nodes
"tools": "retrieve",
END: END,
},
)
# Defines the edges to be processed after the action node is executed.
workflow.add_conditional_edges(
"retrieve",
# Document Quality Assessmentwwwww
grade_documents,
)
workflow.add_edge("generate", END)
workflow.add_edge("rewrite", "agent")
# 워크플로우 그래프 컴파일
graph = workflow.compile()
from langchain_teddynote.graphs import visualize_graph
visualize_graph(graph)
from langchain_teddynote.messages import stream_graph
from langchain_core.runnables import RunnableConfig
# config settings (maximum number of recursion, thread_id)
config = RunnableConfig(recursion_limit=10, configurable={"thread_id": "1"})
# Define an input data structure that contains questions about the user's agent memory type.
inputs = {
"messages": [
("user", "The name of the generative AI developed by Samsung Electronics is?"),
]
}
# Running the graph
stream_graph(graph, inputs, config, ["agent", "rewrite", "generate"])
# Examples of questions where document search is not possible
inputs = {
"messages": [
("user", "The capital of South Korea is?"),
]
}
# Running the graph
stream_graph(graph, inputs, config, ["agent", "rewrite", "generate"])
==================================================
🔄 Node: agent 🔄
- - - - - - - - - - - - - - - - - - - - - - - - - - - -
The capital of Korea is Seoul.
from langgraph.errors import GraphRecursionError
# Examples of questions where document search is not possible
inputs = {
"messages": [
("user", "The address of TeddyNote's LangChain tutorial is?"),
]
}
try:
# running a graph
stream_graph(graph, inputs, config, ["agent", "rewrite", "generate"])
except GraphRecursionError as recursion_error:
print(f"GraphRecursionError: {recursion_error}")
==================================================
🔄 Node: agent 🔄
- - - - - - - - - - - - - - - - - - - - - - - - - - - -
==== [DECISION: DOCS NOT RELEVANT] ====
no
==== [QUERY REWRITE] ====
==================================================
🔄 Node: rewrite 🔄
- - - - - - - - - - - - - - - - - - - - - - - - - - - -
Can you tell me the link to the LangChain tutorial provided by Teddy Note?
==================================================
🔄 Node: agent 🔄
- - - - - - - - - - - - - - - - - - - - - - - - - - - -
==== [DECISION: DOCS NOT RELEVANT] ====
no
==== [QUERY REWRITE] ====
==================================================
🔄 Node: rewrite 🔄
- - - - - - - - - - - - - - - - - - - - - - - - - - - -
Can you tell me the link to the LangChain tutorial provided by Teddy Note?
==================================================
🔄 Node: agent 🔄
- - - - - - - - - - - - - - - - - - - - - - - - - - - -
==== [DECISION: DOCS NOT RELEVANT] ====
no
==== [QUERY REWRITE] ====
==================================================
🔄 Node: rewrite 🔄
- - - - - - - - - - - - - - - - - - - - - - - - - - - -
Can you tell me the link to the LangChain tutorial provided by Teddy Note?
==================================================
🔄 Node: agent 🔄
- - - - - - - - - - - - - - - - - - - - - - - - - - - -
GraphRecursionError: Recursion limit of 10 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT