From 97ac768451f6ba40f0134286ccce687386561614 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Wed, 2 Oct 2024 09:28:48 -0700 Subject: [PATCH] feat(docs): Add migrating memory docs (#6873) --- docs/core_docs/.gitignore | 32 +- .../docs/how_to/chatbots_memory.ipynb | 1412 +++++-------- .../docs/how_to/chatbots_retrieval.ipynb | 1 + .../docs/how_to/chatbots_tools.ipynb | 611 +++--- .../docs/how_to/message_history.ipynb | 586 ++++++ .../core_docs/docs/how_to/message_history.mdx | 206 -- .../docs/how_to/qa_chat_history_how_to.ipynb | 1110 ++++++++-- docs/core_docs/docs/tutorials/chatbot.ipynb | 1201 +++++++---- .../docs/tutorials/qa_chat_history.ipynb | 1800 +++++++++++++---- .../migrating_memory/chat_history.ipynb | 268 +++ .../conversation_buffer_window_memory.ipynb | 643 ++++++ .../conversation_summary_memory.ipynb | 45 + .../docs/versions/migrating_memory/index.mdx | 139 ++ .../v0_2/migrating_astream_events.mdx | 2 +- docs/core_docs/sidebars.js | 30 +- .../img/conversational_retrieval_chain.png | Bin 0 -> 92811 bytes 16 files changed, 5695 insertions(+), 2391 deletions(-) create mode 100644 docs/core_docs/docs/how_to/message_history.ipynb delete mode 100644 docs/core_docs/docs/how_to/message_history.mdx create mode 100644 docs/core_docs/docs/versions/migrating_memory/chat_history.ipynb create mode 100644 docs/core_docs/docs/versions/migrating_memory/conversation_buffer_window_memory.ipynb create mode 100644 docs/core_docs/docs/versions/migrating_memory/conversation_summary_memory.ipynb create mode 100644 docs/core_docs/docs/versions/migrating_memory/index.mdx create mode 100644 docs/core_docs/static/img/conversational_retrieval_chain.png diff --git a/docs/core_docs/.gitignore b/docs/core_docs/.gitignore index 5b3735122408..4ddd437b5101 100644 --- a/docs/core_docs/.gitignore +++ b/docs/core_docs/.gitignore @@ -138,6 +138,8 @@ docs/how_to/multimodal_inputs.md docs/how_to/multimodal_inputs.mdx docs/how_to/migrate_agent.md docs/how_to/migrate_agent.mdx +docs/how_to/message_history.md +docs/how_to/message_history.mdx docs/how_to/merge_message_runs.md docs/how_to/merge_message_runs.mdx docs/how_to/logprobs.md @@ -198,14 +200,14 @@ docs/how_to/character_text_splitter.md docs/how_to/character_text_splitter.mdx docs/how_to/cancel_execution.md docs/how_to/cancel_execution.mdx +docs/how_to/callbacks_serverless.md +docs/how_to/callbacks_serverless.mdx docs/how_to/callbacks_runtime.md docs/how_to/callbacks_runtime.mdx docs/how_to/callbacks_custom_events.md docs/how_to/callbacks_custom_events.mdx docs/how_to/callbacks_constructor.md docs/how_to/callbacks_constructor.mdx -docs/how_to/callbacks_backgrounding.md -docs/how_to/callbacks_backgrounding.mdx docs/how_to/callbacks_attach.md docs/how_to/callbacks_attach.mdx docs/how_to/binding.md @@ -214,6 +216,12 @@ docs/how_to/assign.md docs/how_to/assign.mdx docs/how_to/agent_executor.md docs/how_to/agent_executor.mdx +docs/versions/migrating_memory/conversation_summary_memory.md +docs/versions/migrating_memory/conversation_summary_memory.mdx +docs/versions/migrating_memory/conversation_buffer_window_memory.md +docs/versions/migrating_memory/conversation_buffer_window_memory.mdx +docs/versions/migrating_memory/chat_history.md +docs/versions/migrating_memory/chat_history.mdx docs/integrations/vectorstores/weaviate.md docs/integrations/vectorstores/weaviate.mdx docs/integrations/vectorstores/upstash.md @@ -252,10 +260,6 @@ docs/integrations/toolkits/sql.md docs/integrations/toolkits/sql.mdx docs/integrations/toolkits/openapi.md docs/integrations/toolkits/openapi.mdx -docs/integrations/stores/in_memory.md -docs/integrations/stores/in_memory.mdx -docs/integrations/stores/file_system.md -docs/integrations/stores/file_system.mdx docs/integrations/text_embedding/togetherai.md docs/integrations/text_embedding/togetherai.mdx docs/integrations/text_embedding/openai.md @@ -278,12 +282,18 @@ docs/integrations/text_embedding/bedrock.md docs/integrations/text_embedding/bedrock.mdx docs/integrations/text_embedding/azure_openai.md docs/integrations/text_embedding/azure_openai.mdx +docs/integrations/stores/in_memory.md +docs/integrations/stores/in_memory.mdx +docs/integrations/stores/file_system.md +docs/integrations/stores/file_system.mdx docs/integrations/retrievers/tavily.md docs/integrations/retrievers/tavily.mdx docs/integrations/retrievers/kendra-retriever.md docs/integrations/retrievers/kendra-retriever.mdx docs/integrations/retrievers/exa.md docs/integrations/retrievers/exa.mdx +docs/integrations/retrievers/bm25.md +docs/integrations/retrievers/bm25.mdx docs/integrations/retrievers/bedrock-knowledge-bases.md docs/integrations/retrievers/bedrock-knowledge-bases.mdx docs/integrations/llms/together.md @@ -304,10 +314,10 @@ docs/integrations/llms/cloudflare_workersai.md docs/integrations/llms/cloudflare_workersai.mdx docs/integrations/llms/bedrock.md docs/integrations/llms/bedrock.mdx -docs/integrations/llms/arcjet.md -docs/integrations/llms/arcjet.mdx docs/integrations/llms/azure.md docs/integrations/llms/azure.mdx +docs/integrations/llms/arcjet.md +docs/integrations/llms/arcjet.mdx docs/integrations/chat/togetherai.md docs/integrations/chat/togetherai.mdx docs/integrations/chat/openai.md @@ -332,10 +342,10 @@ docs/integrations/chat/bedrock_converse.md docs/integrations/chat/bedrock_converse.mdx docs/integrations/chat/bedrock.md docs/integrations/chat/bedrock.mdx -docs/integrations/chat/arcjet.md -docs/integrations/chat/arcjet.mdx docs/integrations/chat/azure.md docs/integrations/chat/azure.mdx +docs/integrations/chat/arcjet.md +docs/integrations/chat/arcjet.mdx docs/integrations/chat/anthropic.md docs/integrations/chat/anthropic.mdx docs/integrations/retrievers/self_query/weaviate.md @@ -375,4 +385,4 @@ docs/integrations/document_loaders/file_loaders/pdf.mdx docs/integrations/document_loaders/file_loaders/directory.md docs/integrations/document_loaders/file_loaders/directory.mdx docs/integrations/document_loaders/file_loaders/csv.md -docs/integrations/document_loaders/file_loaders/csv.mdx +docs/integrations/document_loaders/file_loaders/csv.mdx \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/chatbots_memory.ipynb b/docs/core_docs/docs/how_to/chatbots_memory.ipynb index e41b3f1bb465..197a11eab4c5 100644 --- a/docs/core_docs/docs/how_to/chatbots_memory.ipynb +++ b/docs/core_docs/docs/how_to/chatbots_memory.ipynb @@ -1,18 +1,19 @@ { "cells": [ + { + "cell_type": "raw", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 1\n", + "---" + ] + }, { "cell_type": "markdown", "metadata": {}, "source": [ - "# How to manage memory\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following:\n", - "\n", - "- [Chatbots](/docs/tutorials/chatbot)\n", - "\n", - ":::\n", + "# How to add memory to chatbots\n", "\n", "A key feature of chatbots is their ability to use content of previous conversation turns as context. This state management can take several forms, including:\n", "\n", @@ -20,18 +21,29 @@ "- The above, but trimming old messages to reduce the amount of distracting information the model has to deal with.\n", "- More complex modifications like synthesizing summaries for long running conversations.\n", "\n", - "We’ll go into more detail on a few techniques below!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ + "We'll go into more detail on a few techniques below!\n", + "\n", + ":::note\n", + "\n", + "This how-to guide previously built a chatbot using [RunnableWithMessageHistory](https://v03.api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html). You can access this version of the tutorial in the [v0.2 docs](https://js.langchain.com/v0.2/docs/how_to/chatbots_memory/).\n", + "\n", + "The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n", + "\n", + ":::\n", + "\n", "## Setup\n", "\n", - "You’ll need to install a few packages, and set any LLM API keys:\n", + "You'll need to install a few packages, select your chat model, and set its enviroment variable.\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", + "\n", + "\n", + " @langchain/core @langchain/langgraph\n", + "\n", + "```\n", "\n", - "Let’s also set up a chat model that we’ll use for the below examples:\n", + "Let's set up a chat model that we'll use for the below examples.\n", "\n", "```{=mdx}\n", "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", @@ -46,42 +58,53 @@ "source": [ "## Message passing\n", "\n", - "The simplest form of memory is simply passing chat history messages into a chain. Here’s an example:" + "The simplest form of memory is simply passing chat history messages into a chain. Here's an example:" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({ model: \"gpt-4o\" })" + ] + }, + { + "cell_type": "code", + "execution_count": 23, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m`I said \"J'adore la programmation,\" which means \"I love programming\" in French.`\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m`I said \"J'adore la programmation,\" which means \"I love programming\" in French.`\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m21\u001b[39m, promptTokens: \u001b[33m61\u001b[39m, totalTokens: \u001b[33m82\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABSxUXVIBitFRBh9MpasB5jeEHfCA\",\n", + " \"content\": \"I said \\\"J'adore la programmation,\\\" which means \\\"I love programming\\\" in French.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 18,\n", + " \"promptTokens\": 58,\n", + " \"totalTokens\": 76\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 58,\n", + " \"output_tokens\": 18,\n", + " \"total_tokens\": 76\n", + " }\n", + "}\n" + ] } ], "source": [ @@ -119,303 +142,191 @@ "We can see that by passing the previous conversation into a chain, it can use it as context to answer questions. This is the basic concept underpinning chatbot memory - the rest of the guide will demonstrate convenient techniques for passing or reformatting messages." ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Chat history\n", - "\n", - "It’s perfectly fine to store and pass messages directly as an array, but we can use LangChain’s built-in message history class to store and load messages as well. Instances of this class are responsible for storing and loading chat messages from persistent storage. LangChain integrates with many providers but for this demo we will use an ephemeral demo class.\n", - "\n", - "Here’s an example of the API:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Translate this sentence from English to French: I love programming.\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Translate this sentence from English to French: I love programming.\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"J'adore la programmation.\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"J'adore la programmation.\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " }\n", - "]" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { ChatMessageHistory } from \"langchain/stores/message/in_memory\";\n", - "\n", - "const demoEphemeralChatMessageHistory = new ChatMessageHistory();\n", - "\n", - "await demoEphemeralChatMessageHistory.addMessage(\n", - " new HumanMessage(\n", - " \"Translate this sentence from English to French: I love programming.\"\n", - " )\n", - ");\n", - "\n", - "await demoEphemeralChatMessageHistory.addMessage(\n", - " new AIMessage(\"J'adore la programmation.\")\n", - ");\n", - "\n", - "await demoEphemeralChatMessageHistory.getMessages();" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can use it directly to store conversation turns for our chain:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m'You just asked me to translate the sentence \"I love programming\" from English to French.'\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m'You just asked me to translate the sentence \"I love programming\" from English to French.'\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m18\u001b[39m, promptTokens: \u001b[33m73\u001b[39m, totalTokens: \u001b[33m91\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await demoEphemeralChatMessageHistory.clear();\n", - "\n", - "const input1 =\n", - " \"Translate this sentence from English to French: I love programming.\";\n", - "\n", - "await demoEphemeralChatMessageHistory.addMessage(new HumanMessage(input1));\n", - "\n", - "const response = await chain.invoke({\n", - " messages: await demoEphemeralChatMessageHistory.getMessages(),\n", - "});\n", - "\n", - "await demoEphemeralChatMessageHistory.addMessage(response);\n", - "\n", - "const input2 = \"What did I just ask you?\";\n", - "\n", - "await demoEphemeralChatMessageHistory.addMessage(new HumanMessage(input2));\n", - "\n", - "await chain.invoke({\n", - " messages: await demoEphemeralChatMessageHistory.getMessages(),\n", - "});" - ] - }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Automatic history management\n", "\n", - "The previous examples pass messages to the chain explicitly. This is a completely acceptable approach, but it does require external management of new messages. LangChain also includes an wrapper for LCEL chains that can handle this process automatically called `RunnableWithMessageHistory`.\n", - "\n", - "To show how it works, let’s slightly modify the above prompt to take a final `input` variable that populates a `HumanMessage` template after the chat history. This means that we will expect a `chat_history` parameter that contains all messages BEFORE the current messages instead of all messages:" + "The previous examples pass messages to the chain (and model) explicitly. This is a completely acceptable approach, but it does require external management of new messages. LangChain also provides a way to build applications that have memory using LangGraph's persistence. You can enable persistence in LangGraph applications by providing a `checkpointer` when compiling the graph." ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 24, "metadata": {}, "outputs": [], "source": [ - "const runnableWithMessageHistoryPrompt = ChatPromptTemplate.fromMessages([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n", - " ],\n", - " new MessagesPlaceholder(\"chat_history\"),\n", - " [\"human\", \"{input}\"],\n", - "]);\n", + "import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n", "\n", - "const chain2 = runnableWithMessageHistoryPrompt.pipe(llm);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We’ll pass the latest input to the conversation here and let the `RunnableWithMessageHistory` class wrap our chain and do the work of appending that `input` variable to the chat history.\n", "\n", - "Next, let’s declare our wrapped chain:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", + "// Define the function that calls the model\n", + "const callModel = async (state: typeof MessagesAnnotation.State) => {\n", + " const systemPrompt = \n", + " \"You are a helpful assistant. \" +\n", + " \"Answer all questions to the best of your ability.\";\n", + " const messages = [{ role: \"system\", content: systemPrompt }, ...state.messages];\n", + " const response = await llm.invoke(messages);\n", + " return { messages: response };\n", + "};\n", "\n", - "const demoEphemeralChatMessageHistoryForChain = new ChatMessageHistory();\n", + "const workflow = new StateGraph(MessagesAnnotation)\n", + "// Define the node and edge\n", + " .addNode(\"model\", callModel)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", "\n", - "const chainWithMessageHistory = new RunnableWithMessageHistory({\n", - " runnable: chain2,\n", - " getMessageHistory: (_sessionId) => demoEphemeralChatMessageHistoryForChain,\n", - " inputMessagesKey: \"input\",\n", - " historyMessagesKey: \"chat_history\",\n", - "});" + "// Add simple in-memory checkpointer\n", + "// highlight-start\n", + "const memory = new MemorySaver();\n", + "const app = workflow.compile({ checkpointer: memory });\n", + "// highlight-end" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "This class takes a few parameters in addition to the chain that we want to wrap:\n", - "\n", - "- A factory function that returns a message history for a given session id. This allows your chain to handle multiple users at once by loading different messages for different conversations.\n", - "- An `inputMessagesKey` that specifies which part of the input should be tracked and stored in the chat history. In this example, we want to track the string passed in as input.\n", - "- A `historyMessagesKey` that specifies what the previous messages should be injected into the prompt as. Our prompt has a `MessagesPlaceholder` named `chat_history`, so we specify this property to match.\n", - " (For chains with multiple outputs) an `outputMessagesKey` which specifies which output to store as history. This is the inverse of `inputMessagesKey`.\n", - "\n", - "We can invoke this new chain as normal, with an additional `configurable` field that specifies the particular `sessionId` to pass to the factory function. This is unused for the demo, but in real-world chains, you’ll want to return a chat history corresponding to the passed session:" + " We'll pass the latest input to the conversation here and let the LangGraph keep track of the conversation history using the checkpointer:" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 25, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m`The translation of \"I love programming\" in French is \"J'adore la programmation.\"`\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m`The translation of \"I love programming\" in French is \"J'adore la programmation.\"`\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m20\u001b[39m, promptTokens: \u001b[33m39\u001b[39m, totalTokens: \u001b[33m59\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"227b82a9-4084-46a5-ac79-ab9a3faa140e\",\n", + " \"content\": \"Translate to French: I love programming.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABSxVrvztgnasTeMSFbpZQmyYqjJZ\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 5,\n", + " \"promptTokens\": 35,\n", + " \"totalTokens\": 40\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_52a7f40b0b\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 35,\n", + " \"output_tokens\": 5,\n", + " \"total_tokens\": 40\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] } ], "source": [ - "await chainWithMessageHistory.invoke(\n", + "await app.invoke(\n", " {\n", - " input:\n", - " \"Translate this sentence from English to French: I love programming.\",\n", + " messages: [\n", + " {\n", + " role: \"user\",\n", + " content: \"Translate to French: I love programming.\"\n", + " }\n", + " ]\n", " },\n", - " { configurable: { sessionId: \"unused\" } }\n", + " {\n", + " configurable: { thread_id: \"1\" }\n", + " }\n", ");" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 26, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m'You just asked for the translation of the sentence \"I love programming\" from English to French.'\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m'You just asked for the translation of the sentence \"I love programming\" from English to French.'\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m19\u001b[39m, promptTokens: \u001b[33m74\u001b[39m, totalTokens: \u001b[33m93\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"1a0560a4-9dcb-47a1-b441-80717e229706\",\n", + " \"content\": \"Translate to French: I love programming.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABSxVrvztgnasTeMSFbpZQmyYqjJZ\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 5,\n", + " \"promptTokens\": 35,\n", + " \"totalTokens\": 40\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_52a7f40b0b\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"4f233a7d-4b08-4f53-bb60-cf0141a59721\",\n", + " \"content\": \"What did I just ask you?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABSxVs5QnlPfbihTOmJrCVg1Dh7Ol\",\n", + " \"content\": \"You asked me to translate \\\"I love programming\\\" into French.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 13,\n", + " \"promptTokens\": 55,\n", + " \"totalTokens\": 68\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_9f2bfdaa89\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 55,\n", + " \"output_tokens\": 13,\n", + " \"total_tokens\": 68\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] } ], "source": [ - "await chainWithMessageHistory.invoke(\n", + "await app.invoke(\n", " {\n", - " input: \"What did I just ask you?\",\n", + " messages: [\n", + " {\n", + " role: \"user\",\n", + " content: \"What did I just ask you?\"\n", + " }\n", + " ]\n", " },\n", - " { configurable: { sessionId: \"unused\" } }\n", + " {\n", + " configurable: { thread_id: \"1\" }\n", + " }\n", ");" ] }, @@ -429,159 +340,98 @@ "\n", "### Trimming messages\n", "\n", - "LLMs and chat models have limited context windows, and even if you’re not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is to only load and store the most recent `n` messages. Let’s use an example history with some preloaded messages:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Hey there! I'm Nemo.\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Hey there! I'm Nemo.\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Hello!\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Hello!\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"How are you today?\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"How are you today?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Fine thanks!\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Fine thanks!\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " }\n", - "]" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await demoEphemeralChatMessageHistory.clear();\n", - "\n", - "await demoEphemeralChatMessageHistory.addMessage(\n", - " new HumanMessage(\"Hey there! I'm Nemo.\")\n", - ");\n", - "\n", - "await demoEphemeralChatMessageHistory.addMessage(new AIMessage(\"Hello!\"));\n", - "\n", - "await demoEphemeralChatMessageHistory.addMessage(\n", - " new HumanMessage(\"How are you today?\")\n", - ");\n", - "\n", - "await demoEphemeralChatMessageHistory.addMessage(new AIMessage(\"Fine thanks!\"));\n", - "\n", - "await demoEphemeralChatMessageHistory.getMessages();" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let’s use this message history with the `RunnableWithMessageHistory` chain we declared above:" + "LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is trim the history messages before passing them to the model. Let's use an example history with the `app` we declared above:" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 27, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Your name is Nemo!\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Your name is Nemo!\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m6\u001b[39m, promptTokens: \u001b[33m66\u001b[39m, totalTokens: \u001b[33m72\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"63057c3d-f980-4640-97d6-497a9f83ddee\",\n", + " \"content\": \"Hey there! I'm Nemo.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"c9f0c20a-8f55-4909-b281-88f2a45c4f05\",\n", + " \"content\": \"Hello!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"fd7fb3a0-7bc7-4e84-99a9-731b30637b55\",\n", + " \"content\": \"How are you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"09b0debb-1d4a-4856-8821-b037f5d96ecf\",\n", + " \"content\": \"Fine thanks!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"edc13b69-25a0-40ac-81b3-175e65dc1a9a\",\n", + " \"content\": \"What's my name?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABSxWKCTdRuh2ZifXsvFHSo5z5I0J\",\n", + " \"content\": \"Your name is Nemo! How can I assist you today, Nemo?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 63,\n", + " \"totalTokens\": 77\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_a5d11b2ef2\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 63,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 77\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] } ], "source": [ - "const chainWithMessageHistory2 = new RunnableWithMessageHistory({\n", - " runnable: chain2,\n", - " getMessageHistory: (_sessionId) => demoEphemeralChatMessageHistory,\n", - " inputMessagesKey: \"input\",\n", - " historyMessagesKey: \"chat_history\",\n", - "});\n", + "const demoEphemeralChatHistory = [\n", + " { role: \"user\", content: \"Hey there! I'm Nemo.\" },\n", + " { role: \"assistant\", content: \"Hello!\" },\n", + " { role: \"user\", content: \"How are you today?\" },\n", + " { role: \"assistant\", content: \"Fine thanks!\" },\n", + "];\n", "\n", - "await chainWithMessageHistory2.invoke(\n", + "await app.invoke(\n", " {\n", - " input: \"What's my name?\",\n", + " messages: [\n", + " ...demoEphemeralChatHistory,\n", + " { role: \"user\", content: \"What's my name?\" }\n", + " ]\n", " },\n", - " { configurable: { sessionId: \"unused\" } }\n", + " {\n", + " configurable: { thread_id: \"2\" }\n", + " }\n", ");" ] }, @@ -589,551 +439,324 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can see the chain remembers the preloaded name.\n", + "We can see the app remembers the preloaded name.\n", "\n", - "But let’s say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the `clear` method to remove messages and re-add them to the history. We don’t have to, but let’s put this method at the front of our chain to ensure it’s always called:" + "But let's say we have a very small context window, and we want to trim the number of messages passed to the model to only the 2 most recent ones. We can use the built in [trimMessages](/docs/how_to/trim_messages/) util to trim messages based on their token count before they reach our prompt. In this case we'll count each message as 1 \"token\" and keep only the last two messages:" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 28, "metadata": {}, "outputs": [], "source": [ - "import {\n", - " RunnablePassthrough,\n", - " RunnableSequence,\n", - "} from \"@langchain/core/runnables\";\n", - "\n", - "const trimMessages = async (_chainInput: Record) => {\n", - " const storedMessages = await demoEphemeralChatMessageHistory.getMessages();\n", - " if (storedMessages.length <= 2) {\n", - " return false;\n", - " }\n", - " await demoEphemeralChatMessageHistory.clear();\n", - " for (const message of storedMessages.slice(-2)) {\n", - " demoEphemeralChatMessageHistory.addMessage(message);\n", - " }\n", - " return true;\n", + "import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n", + "import { trimMessages } from \"@langchain/core/messages\";\n", + "\n", + "// Define trimmer\n", + "// highlight-start\n", + "// count each message as 1 \"token\" (tokenCounter: (msgs) => msgs.length) and keep only the last two messages\n", + "const trimmer = trimMessages({ strategy: \"last\", maxTokens: 2, tokenCounter: (msgs) => msgs.length });\n", + "// highlight-end\n", + "\n", + "// Define the function that calls the model\n", + "const callModel2 = async (state: typeof MessagesAnnotation.State) => {\n", + " // highlight-start\n", + " const trimmedMessages = await trimmer.invoke(state.messages);\n", + " const systemPrompt = \n", + " \"You are a helpful assistant. \" +\n", + " \"Answer all questions to the best of your ability.\";\n", + " const messages = [{ role: \"system\", content: systemPrompt }, ...trimmedMessages];\n", + " // highlight-end\n", + " const response = await llm.invoke(messages);\n", + " return { messages: response };\n", "};\n", "\n", - "const chainWithTrimming = RunnableSequence.from([\n", - " RunnablePassthrough.assign({ messages_trimmed: trimMessages }),\n", - " chainWithMessageHistory2,\n", - "]);" + "const workflow2 = new StateGraph(MessagesAnnotation)\n", + " // Define the node and edge\n", + " .addNode(\"model\", callModel2)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "// Add simple in-memory checkpointer\n", + "const app2 = workflow2.compile({ checkpointer: new MemorySaver() });" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Let’s call this new chain and check the messages afterwards:" + "Let's call this new app and check the response" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 29, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m'P. Sherman is a fictional character who lives at 42 Wallaby Way, Sydney, from the movie \"Finding Nem'\u001b[39m... 3 more characters,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m'P. Sherman is a fictional character who lives at 42 Wallaby Way, Sydney, from the movie \"Finding Nem'\u001b[39m... 3 more characters,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m26\u001b[39m, promptTokens: \u001b[33m53\u001b[39m, totalTokens: \u001b[33m79\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"0d9330a0-d9d1-4aaf-8171-ca1ac6344f7c\",\n", + " \"content\": \"What is my name?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"3a24e88b-7525-4797-9fcd-d751a378d22c\",\n", + " \"content\": \"Fine thanks!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"276039c8-eba8-4c68-b015-81ec7704140d\",\n", + " \"content\": \"How are you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"2ad4f461-20e1-4982-ba3b-235cb6b02abd\",\n", + " \"content\": \"Hello!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"52213cae-953a-463d-a4a0-a7368c9ee4db\",\n", + " \"content\": \"Hey there! I'm Nemo.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABSxWe9BRDl1pmzkNIDawWwU3hvKm\",\n", + " \"content\": \"I'm sorry, but I don't have access to personal information about you unless you've shared it with me during our conversation. How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 30,\n", + " \"promptTokens\": 39,\n", + " \"totalTokens\": 69\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_3537616b13\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 39,\n", + " \"output_tokens\": 30,\n", + " \"total_tokens\": 69\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] } ], "source": [ - "await chainWithTrimming.invoke(\n", + "await app2.invoke(\n", " {\n", - " input: \"Where does P. Sherman live?\",\n", + " messages: [\n", + " ...demoEphemeralChatHistory,\n", + " { role: \"user\", content: \"What is my name?\" }\n", + " ]\n", " },\n", - " { configurable: { sessionId: \"unused\" } }\n", - ");" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"What's my name?\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"What's my name?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Your name is Nemo!\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Your name is Nemo!\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m6\u001b[39m, promptTokens: \u001b[33m66\u001b[39m, totalTokens: \u001b[33m72\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Where does P. Sherman live?\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Where does P. Sherman live?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m'P. Sherman is a fictional character who lives at 42 Wallaby Way, Sydney, from the movie \"Finding Nem'\u001b[39m... 3 more characters,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m'P. Sherman is a fictional character who lives at 42 Wallaby Way, Sydney, from the movie \"Finding Nem'\u001b[39m... 3 more characters,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m26\u001b[39m, promptTokens: \u001b[33m53\u001b[39m, totalTokens: \u001b[33m79\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " }\n", - "]" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await demoEphemeralChatMessageHistory.getMessages();" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And we can see that our history has removed the two oldest messages while still adding the most recent conversation at the end. The next time the chain is called, `trimMessages` will be called again, and only the two most recent messages will be passed to the model. In this case, this means that the model will forget the name we gave it the next time we invoke it:" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"I'm sorry, I don't have access to your personal information. Can I help you with anything else?\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"I'm sorry, I don't have access to your personal information. Can I help you with anything else?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m22\u001b[39m, promptTokens: \u001b[33m73\u001b[39m, totalTokens: \u001b[33m95\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await chainWithTrimming.invoke(\n", " {\n", - " input: \"What is my name?\",\n", - " },\n", - " { configurable: { sessionId: \"unused\" } }\n", + " configurable: { thread_id: \"3\" }\n", + " }\n", ");" ] }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Where does P. Sherman live?\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Where does P. Sherman live?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m'P. Sherman is a fictional character who lives at 42 Wallaby Way, Sydney, from the movie \"Finding Nem'\u001b[39m... 3 more characters,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m'P. Sherman is a fictional character who lives at 42 Wallaby Way, Sydney, from the movie \"Finding Nem'\u001b[39m... 3 more characters,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m26\u001b[39m, promptTokens: \u001b[33m53\u001b[39m, totalTokens: \u001b[33m79\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"What is my name?\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"What is my name?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"I'm sorry, I don't have access to your personal information. Can I help you with anything else?\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"I'm sorry, I don't have access to your personal information. Can I help you with anything else?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m22\u001b[39m, promptTokens: \u001b[33m73\u001b[39m, totalTokens: \u001b[33m95\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " }\n", - "]" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await demoEphemeralChatMessageHistory.getMessages();" - ] - }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Summary memory\n", + "We can see that `trimMessages` was called and only the two most recent messages will be passed to the model. In this case, this means that the model forgot the name we gave it.\n", "\n", - "We can use this same pattern in other ways too. For example, we could use an additional LLM call to generate a summary of the conversation before calling our chain. Let’s recreate our chat history and chatbot chain:" + "Check out our [how to guide on trimming messages](/docs/how_to/trim_messages/) for more." ] }, { - "cell_type": "code", - "execution_count": 17, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "await demoEphemeralChatMessageHistory.clear();\n", - "\n", - "await demoEphemeralChatMessageHistory.addMessage(\n", - " new HumanMessage(\"Hey there! I'm Nemo.\")\n", - ");\n", - "\n", - "await demoEphemeralChatMessageHistory.addMessage(new AIMessage(\"Hello!\"));\n", - "\n", - "await demoEphemeralChatMessageHistory.addMessage(\n", - " new HumanMessage(\"How are you today?\")\n", - ");\n", + "### Summary memory\n", "\n", - "await demoEphemeralChatMessageHistory.addMessage(new AIMessage(\"Fine thanks!\"));" + "We can use this same pattern in other ways too. For example, we could use an additional LLM call to generate a summary of the conversation before calling our app. Let's recreate our chat history:" ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 30, "metadata": {}, "outputs": [], "source": [ - "const runnableWithSummaryMemoryPrompt = ChatPromptTemplate.fromMessages([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant. Answer all questions to the best of your ability. The provided chat history includes facts about the user you are speaking with.\",\n", - " ],\n", - " new MessagesPlaceholder(\"chat_history\"),\n", - " [\"human\", \"{input}\"],\n", - "]);\n", - "\n", - "const summaryMemoryChain = runnableWithSummaryMemoryPrompt.pipe(llm);\n", - "\n", - "const chainWithMessageHistory3 = new RunnableWithMessageHistory({\n", - " runnable: summaryMemoryChain,\n", - " getMessageHistory: (_sessionId) => demoEphemeralChatMessageHistory,\n", - " inputMessagesKey: \"input\",\n", - " historyMessagesKey: \"chat_history\",\n", - "});" + "const demoEphemeralChatHistory2 = [\n", + " { role: \"user\", content: \"Hey there! I'm Nemo.\" },\n", + " { role: \"assistant\", content: \"Hello!\" },\n", + " { role: \"user\", content: \"How are you today?\" },\n", + " { role: \"assistant\", content: \"Fine thanks!\" },\n", + "];" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "And now, let’s create a function that will distill previous interactions into a summary. We can add this one to the front of the chain too:" + "And now, let's update the model-calling function to distill previous interactions into a summary:" ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 31, "metadata": {}, "outputs": [], "source": [ - "const summarizeMessages = async (_chainInput: Record) => {\n", - " const storedMessages = await demoEphemeralChatMessageHistory.getMessages();\n", - " if (storedMessages.length === 0) {\n", - " return false;\n", + "import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n", + "import { RemoveMessage } from \"@langchain/core/messages\";\n", + "\n", + "\n", + "// Define the function that calls the model\n", + "const callModel3 = async (state: typeof MessagesAnnotation.State) => {\n", + " const systemPrompt = \n", + " \"You are a helpful assistant. \" +\n", + " \"Answer all questions to the best of your ability. \" +\n", + " \"The provided chat history includes a summary of the earlier conversation.\";\n", + " const systemMessage = { role: \"system\", content: systemPrompt };\n", + " const messageHistory = state.messages.slice(0, -1); // exclude the most recent user input\n", + " \n", + " // Summarize the messages if the chat history reaches a certain size\n", + " if (messageHistory.length >= 4) {\n", + " const lastHumanMessage = state.messages[state.messages.length - 1];\n", + " // Invoke the model to generate conversation summary\n", + " const summaryPrompt = \n", + " \"Distill the above chat messages into a single summary message. \" +\n", + " \"Include as many specific details as you can.\";\n", + " const summaryMessage = await llm.invoke([\n", + " ...messageHistory,\n", + " { role: \"user\", content: summaryPrompt }\n", + " ]);\n", + "\n", + " // Delete messages that we no longer want to show up\n", + " const deleteMessages = state.messages.map(m => new RemoveMessage({ id: m.id }));\n", + " // Re-add user message\n", + " const humanMessage = { role: \"user\", content: lastHumanMessage.content };\n", + " // Call the model with summary & response\n", + " const response = await llm.invoke([systemMessage, summaryMessage, humanMessage]);\n", + " return { messages: [summaryMessage, humanMessage, response, ...deleteMessages] };\n", + " } else {\n", + " const response = await llm.invoke([systemMessage, ...state.messages]);\n", + " return { messages: response };\n", " }\n", - " const summarizationPrompt = ChatPromptTemplate.fromMessages([\n", - " new MessagesPlaceholder(\"chat_history\"),\n", - " [\n", - " \"user\",\n", - " \"Distill the above chat messages into a single summary message. Include as many specific details as you can.\",\n", - " ],\n", - " ]);\n", - " const summarizationChain = summarizationPrompt.pipe(llm);\n", - " const summaryMessage = await summarizationChain.invoke({\n", - " chat_history: storedMessages,\n", - " });\n", - " await demoEphemeralChatMessageHistory.clear();\n", - " demoEphemeralChatMessageHistory.addMessage(summaryMessage);\n", - " return true;\n", "};\n", "\n", - "const chainWithSummarization = RunnableSequence.from([\n", - " RunnablePassthrough.assign({\n", - " messages_summarized: summarizeMessages,\n", - " }),\n", - " chainWithMessageHistory3,\n", - "]);" + "const workflow3 = new StateGraph(MessagesAnnotation)\n", + " // Define the node and edge\n", + " .addNode(\"model\", callModel3)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "// Add simple in-memory checkpointer\n", + "const app3 = workflow3.compile({ checkpointer: new MemorySaver() });" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Let’s see if it remembers the name we gave it:" + "Let's see if it remembers the name we gave it:" ] }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 32, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m'You introduced yourself as \"Nemo.\"'\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m'You introduced yourself as \"Nemo.\"'\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m8\u001b[39m, promptTokens: \u001b[33m87\u001b[39m, totalTokens: \u001b[33m95\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABSxXjFDj6WRo7VLSneBtlAxUumPE\",\n", + " \"content\": \"Nemo greeted the assistant and asked how it was doing, to which the assistant responded that it was fine.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 22,\n", + " \"promptTokens\": 60,\n", + " \"totalTokens\": 82\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 60,\n", + " \"output_tokens\": 22,\n", + " \"total_tokens\": 82\n", + " }\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"8b1309b7-c09e-47fb-9ab3-34047f6973e3\",\n", + " \"content\": \"What did I say my name was?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABSxYAQKiBsQ6oVypO4CLFDsi1HRH\",\n", + " \"content\": \"You mentioned that your name is Nemo.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 73,\n", + " \"totalTokens\": 81\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_52a7f40b0b\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 73,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 81\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] } ], "source": [ - "await chainWithSummarization.invoke(\n", + "await app3.invoke(\n", " {\n", - " input: \"What did I say my name was?\",\n", + " messages: [\n", + " ...demoEphemeralChatHistory2,\n", + " { role: \"user\", content: \"What did I say my name was?\" }\n", + " ]\n", " },\n", " {\n", - " configurable: { sessionId: \"unused\" },\n", + " configurable: { thread_id: \"4\" }\n", " }\n", ");" ] }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[\n", - " AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"The conversation consists of a greeting from someone named Nemo and a general inquiry about their we\"\u001b[39m... 86 more characters,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"The conversation consists of a greeting from someone named Nemo and a general inquiry about their we\"\u001b[39m... 86 more characters,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m34\u001b[39m, promptTokens: \u001b[33m62\u001b[39m, totalTokens: \u001b[33m96\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"What did I say my name was?\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"What did I say my name was?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m'You introduced yourself as \"Nemo.\"'\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m'You introduced yourself as \"Nemo.\"'\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m8\u001b[39m, promptTokens: \u001b[33m87\u001b[39m, totalTokens: \u001b[33m95\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " }\n", - "]" - ] - }, - "execution_count": 24, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await demoEphemeralChatMessageHistory.getMessages();" - ] - }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Note that invoking the chain again will generate another summary generated from the initial summary plus new messages and so on. You could also design a hybrid approach where a certain number of messages are retained in chat history while others are summarized.\n", - "\n", - "## Next steps\n", - "\n", - "You've now learned how to manage memory in your chatbots\n", - "\n", - "Next, check out some of the other guides in this section, such as [how to add retrieval to your chatbot](/docs/how_to/chatbots_retrieval)." + "Note that invoking the app again will keep accumulating the history until it reaches the specified number of messages (four in our case). At that point we will generate another summary generated from the initial summary plus new messages and so on." ] } ], @@ -1144,14 +767,17 @@ "name": "deno" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" + "version": "3.7.2" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/docs/core_docs/docs/how_to/chatbots_retrieval.ipynb b/docs/core_docs/docs/how_to/chatbots_retrieval.ipynb index a3af18fed5b4..eed68bdb0bc3 100644 --- a/docs/core_docs/docs/how_to/chatbots_retrieval.ipynb +++ b/docs/core_docs/docs/how_to/chatbots_retrieval.ipynb @@ -45,6 +45,7 @@ "outputs": [], "source": [ "// @lc-docs-hide-cell\n", + "\n", "import { ChatOpenAI } from \"@langchain/openai\";\n", "\n", "const llm = new ChatOpenAI({\n", diff --git a/docs/core_docs/docs/how_to/chatbots_tools.ipynb b/docs/core_docs/docs/how_to/chatbots_tools.ipynb index b5a3b2d0cdd1..d9f8ff25e52f 100644 --- a/docs/core_docs/docs/how_to/chatbots_tools.ipynb +++ b/docs/core_docs/docs/how_to/chatbots_tools.ipynb @@ -4,67 +4,107 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# How to use tools\n", + "# How to add tools to chatbots\n", "\n", ":::info Prerequisites\n", "\n", "This guide assumes familiarity with the following concepts:\n", "\n", "- [Chatbots](/docs/concepts/#messages)\n", - "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/)\n", + "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/multi_agent/agent_supervisor/)\n", "- [Chat history](/docs/concepts/#chat-history)\n", "\n", ":::\n", "\n", "This section will cover how to create conversational agents: chatbots that can interact with other systems and APIs using tools.\n", "\n", - "## Setup\n", + ":::note\n", + "\n", + "This how-to guide previously built a chatbot using [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html). You can access this version of the tutorial in the [v0.2 docs](https://js.langchain.com/v0.2/docs/how_to/chatbots_tools/).\n", + "\n", + "The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n", + "\n", + ":::\n", "\n", - "For this guide, we’ll be using an [tool calling agent](/docs/how_to/agent_executor) with a single tool for searching the web. The default will be powered by [Tavily](/docs/integrations/tools/tavily_search), but you can switch it out for any similar tool. The rest of this section will assume you’re using Tavily.\n", + "## Setup\n", "\n", - "You’ll need to [sign up for an account on the Tavily website](https://tavily.com), and install the following packages:\n", + "For this guide, we'll be using a [tool calling agent](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/#tool-calling-agent) with a single tool for searching the web. The default will be powered by [Tavily](/docs/integrations/tools/tavily_search), but you can switch it out for any similar tool. The rest of this section will assume you're using Tavily.\n", "\n", + "You'll need to [sign up for an account](https://tavily.com/) on the Tavily website, and install the following packages:\n", "\n", "```{=mdx}\n", "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", "\n", "\n", - " @langchain/openai langchain @langchain/core\n", + " @langchain/core @langchain/langgraph @langchain/community\n", "\n", + "```\n", + "\n", + "Let’s also set up a chat model that we’ll use for the below examples.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```\n", + "\n", + "```typescript\n", + "process.env.TAVILY_API_KEY = \"YOUR_API_KEY\";\n", "```" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating an agent\n", + "\n", + "Our end goal is to create an agent that can respond conversationally to user questions while looking up information as needed.\n", + "\n", + "First, let's initialize Tavily and an OpenAI chat model capable of tool calling:" + ] + }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ - "import { TavilySearchResults } from \"@langchain/community/tools/tavily_search\";\n", + "// @lc-docs-hide-cell\n", + "\n", "import { ChatOpenAI } from \"@langchain/openai\";\n", "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import { TavilySearchResults } from \"@langchain/community/tools/tavily_search\";\n", + "\n", "const tools = [\n", " new TavilySearchResults({\n", " maxResults: 1,\n", " }),\n", - "];\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-3.5-turbo-1106\",\n", - " temperature: 0,\n", - "});" + "];" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To make our agent conversational, we must also choose a prompt with a placeholder for our chat history. Here’s an example:\n" + "To make our agent conversational, we can also specify a prompt. Here's an example:" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -78,8 +118,6 @@ " \"system\",\n", " \"You are a helpful assistant. You may not need to use tools for every query - the user may just want to chat!\",\n", " ],\n", - " [\"placeholder\", \"{messages}\"],\n", - " [\"placeholder\", \"{agent_scratchpad}\"],\n", "]);" ] }, @@ -87,30 +125,21 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Great! Now let’s assemble our agent:\n", - "\n", - "```{=mdx}\n", - ":::tip\n", - "As of `langchain` version `0.2.8`, the `createOpenAIToolsAgent` function now supports [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core.language_models_base.ToolDefinition.html).\n", - ":::\n", - "```\n" + "Great! Now let's assemble our agent using LangGraph's prebuilt [createReactAgent](https://langchain-ai.github.io/langgraphjs/reference/functions/langgraph_prebuilt.createReactAgent.html), which allows you to create a [tool-calling agent](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/#tool-calling-agent):" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n", - "\n", - "const agent = await createToolCallingAgent({\n", - " llm,\n", - " tools,\n", - " prompt,\n", - "});\n", + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\"\n", "\n", - "const agentExecutor = new AgentExecutor({ agent, tools });" + "// messageModifier allows you to preprocess the inputs to the model inside ReAct agent\n", + "// in this case, since we're passing a prompt string, we'll just always add a SystemMessage\n", + "// with this prompt string before any other messages sent to the model\n", + "const agent = createReactAgent({ llm, tools, messageModifier: prompt })" ] }, { @@ -119,98 +148,108 @@ "source": [ "## Running the agent\n", "\n", - "Now that we’ve set up our agent, let’s try interacting with it! It can handle both trivial queries that require no lookup:\n" + "Now that we've set up our agent, let's try interacting with it! It can handle both trivial queries that require no lookup:" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 7, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"I'm Nemo!\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"I'm Nemo!\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " }\n", - " ],\n", - " output: \u001b[32m\"Hi Nemo! It's great to meet you. How can I assist you today?\"\u001b[39m\n", - "}" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"8c5fa465-e8d8-472a-9434-f574bf74537f\",\n", + " \"content\": \"I'm Nemo!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTKLLriRcZin65zLAMB3WUf9Sg1t\",\n", + " \"content\": \"How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 101\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_3537616b13\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 93,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 101\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] } ], "source": [ - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "await agentExecutor.invoke({\n", - " messages: [new HumanMessage(\"I'm Nemo!\")],\n", - "});" + "await agent.invoke({ messages: [{ role: \"user\", content: \"I'm Nemo!\" }]})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Or, it can use of the passed search tool to get up to date information if needed:\n" + "Or, it can use of the passed search tool to get up to date information if needed:" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 8, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"What is the current conservation status of the Great Barrier Reef?\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"What is the current conservation status of the Great Barrier Reef?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " }\n", - " ],\n", - " output: \u001b[32m\"The Great Barrier Reef has recorded its highest amount of coral cover since the Australian Institute\"\u001b[39m... 688 more characters\n", - "}" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"65c315b6-2433-4cb1-97c7-b60b5546f518\",\n", + " \"content\": \"What is the current conservation status of the Great Barrier Reef?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTKLQn1e4axRhqIhpKMyzWWTGauO\",\n", + " \"content\": \"How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 101\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_3537616b13\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 93,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 101\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] } ], "source": [ - "await agentExecutor.invoke({\n", - " messages: [\n", - " new HumanMessage(\n", - " \"What is the current conservation status of the Great Barrier Reef?\"\n", - " ),\n", - " ],\n", - "});" + "await agent.invoke({ messages: [{ role: \"user\", content: \"What is the current conservation status of the Great Barrier Reef?\" }]})" ] }, { @@ -219,246 +258,233 @@ "source": [ "## Conversational responses\n", "\n", - "Because our prompt contains a placeholder for chat history messages, our agent can also take previous interactions into account and respond conversationally like a standard chatbot:\n" + "Because our prompt contains a placeholder for chat history messages, our agent can also take previous interactions into account and respond conversationally like a standard chatbot:" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 9, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"I'm Nemo!\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"I'm Nemo!\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Hello Nemo! How can I assist you today?\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Hello Nemo! How can I assist you today?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " usage_metadata: \u001b[90mundefined\u001b[39m\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"What is my name?\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"What is my name?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " }\n", - " ],\n", - " output: \u001b[32m\"Your name is Nemo!\"\u001b[39m\n", - "}" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"6433afc5-31bd-44b3-b34c-f11647e1677d\",\n", + " \"content\": \"I'm Nemo!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"f163b5f1-ea29-4d7a-9965-7c7c563d9cea\",\n", + " \"content\": \"Hello Nemo! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"382c3354-d02b-4888-98d8-44d75d045044\",\n", + " \"content\": \"What is my name?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTKMKu7ThZDZW09yMIPTq2N723Cj\",\n", + " \"content\": \"How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 101\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 93,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 101\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] } ], "source": [ - "import { AIMessage } from \"@langchain/core/messages\";\n", - "\n", - "await agentExecutor.invoke({\n", + "await agent.invoke({\n", " messages: [\n", - " new HumanMessage(\"I'm Nemo!\"),\n", - " new AIMessage(\"Hello Nemo! How can I assist you today?\"),\n", - " new HumanMessage(\"What is my name?\"),\n", - " ],\n", - "});" + " { role: \"user\", content: \"I'm Nemo!\" },\n", + " { role: \"user\", content: \"Hello Nemo! How can I assist you today?\" },\n", + " { role: \"user\", content: \"What is my name?\" }\n", + " ]\n", + "})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "If preferred, you can also wrap the agent executor in a [`RunnableWithMessageHistory`](/docs/how_to/message_history/) class to internally manage history messages. Let's redeclare it this way:" + "If preferred, you can also add memory to the LangGraph agent to manage the history of messages. Let's redeclare it this way:" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ - "const agent2 = await createToolCallingAgent({\n", - " llm,\n", - " tools,\n", - " prompt,\n", - "});\n", + "import { MemorySaver } from \"@langchain/langgraph\"\n", "\n", - "const agentExecutor2 = new AgentExecutor({ agent: agent2, tools });" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then, because our agent executor has multiple outputs, we also have to set the `outputMessagesKey` property when initializing the wrapper:\n" + "// highlight-start\n", + "const memory = new MemorySaver()\n", + "const agent2 = createReactAgent({ llm, tools, messageModifier: prompt, checkpointSaver: memory })\n", + "// highlight-end" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 13, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"I'm Nemo!\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"I'm Nemo!\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " }\n", - " ],\n", - " output: \u001b[32m\"Hi Nemo! It's great to meet you. How can I assist you today?\"\u001b[39m\n", - "}" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"a4a4f663-8192-4179-afcc-88d9d186aa80\",\n", + " \"content\": \"I'm Nemo!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTKi4tBzOWMh3hgA46xXo7bJzb8r\",\n", + " \"content\": \"How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 101\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 93,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 101\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] } ], "source": [ - "import { ChatMessageHistory } from \"langchain/stores/message/in_memory\";\n", - "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", - "\n", - "const demoEphemeralChatMessageHistory = new ChatMessageHistory();\n", - "\n", - "const conversationalAgentExecutor = new RunnableWithMessageHistory({\n", - " runnable: agentExecutor2,\n", - " getMessageHistory: (_sessionId) => demoEphemeralChatMessageHistory,\n", - " inputMessagesKey: \"messages\",\n", - " outputMessagesKey: \"output\",\n", - "});\n", - "\n", - "await conversationalAgentExecutor.invoke(\n", - " { messages: [new HumanMessage(\"I'm Nemo!\")] },\n", - " { configurable: { sessionId: \"unused\" } }\n", - ");" + "await agent2.invoke({ messages: [{ role: \"user\", content: \"I'm Nemo!\" }]}, { configurable: { thread_id: \"1\" } })" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And then if we rerun our wrapped agent executor:" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 14, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"I'm Nemo!\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"I'm Nemo!\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Hi Nemo! It's great to meet you. How can I assist you today?\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Hi Nemo! It's great to meet you. How can I assist you today?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " usage_metadata: \u001b[90mundefined\u001b[39m\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"What is my name?\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"What is my name?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " }\n", - " ],\n", - " output: \u001b[32m\"Your name is Nemo!\"\u001b[39m\n", - "}" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"c5fd303c-eb49-41a0-868e-bc8c5aa02cf6\",\n", + " \"content\": \"I'm Nemo!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTKi4tBzOWMh3hgA46xXo7bJzb8r\",\n", + " \"content\": \"How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 101\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"635b17b9-2ec7-412f-bf45-85d0e9944430\",\n", + " \"content\": \"What is my name?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTKjBbmFlPb5t37aJ8p4NtoHb8YG\",\n", + " \"content\": \"How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 101\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 93,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 101\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] } ], "source": [ - "await conversationalAgentExecutor.invoke(\n", - " { messages: [new HumanMessage(\"What is my name?\")] },\n", - " { configurable: { sessionId: \"unused\" } }\n", - ");" + "await agent2.invoke({ messages: [{ role: \"user\", content: \"What is my name?\" }]}, { configurable: { thread_id: \"1\" } })" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Next steps\n", + "This [LangSmith trace](https://smith.langchain.com/public/16cbcfa5-5ef1-4d4c-92c9-538a6e71f23d/r) shows what's going on under the hood.\n", + "\n", + "## Further reading\n", + "\n", + "For more on how to build agents, check these [LangGraph](https://langchain-ai.github.io/langgraphjs/) guides:\n", "\n", - "You've now learned how to create chatbots with tool-use capabilities.\n", + "* [agents conceptual guide](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/)\n", + "* [agents tutorials](https://langchain-ai.github.io/langgraphjs/tutorials/multi_agent/multi_agent_collaboration/)\n", + "* [createReactAgent](https://langchain-ai.github.io/langgraphjs/how-tos/create-react-agent/)\n", "\n", - "For more, check out the other guides in this section, including [how to add history to your chatbots](/docs/how_to/chatbots_memory)." + "For more on tool usage, you can also check out [this use case section](/docs/how_to#tools)." ] } ], @@ -469,14 +495,17 @@ "name": "deno" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" + "version": "3.7.2" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/docs/core_docs/docs/how_to/message_history.ipynb b/docs/core_docs/docs/how_to/message_history.ipynb new file mode 100644 index 000000000000..dbca922041ff --- /dev/null +++ b/docs/core_docs/docs/how_to/message_history.ipynb @@ -0,0 +1,586 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "8165bd4c", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "keywords: [memory]\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "f47033eb", + "metadata": {}, + "source": [ + "# How to add message history\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chaining runnables](/docs/how_to/sequence/)\n", + "- [Prompt templates](/docs/concepts/#prompt-templates)\n", + "- [Chat Messages](/docs/concepts/#message-types)\n", + "\n", + ":::\n", + "\n", + "```{=mdx}\n", + ":::note\n", + "\n", + "This guide previously covered the [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html) abstraction. You can access this version of the guide in the [v0.2 docs](https://js.langchain.com/v0.2/docs/how_to/message_history/).\n", + "\n", + "The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n", + "\n", + ":::\n", + "```\n", + "\n", + "\n", + "Passing conversation state into and out a chain is vital when building a chatbot. LangGraph implements a built-in persistence layer, allowing chain states to be automatically persisted in memory, or external backends such as SQLite, Postgres or Redis. Details can be found in the LangGraph persistence documentation.\n", + "\n", + "In this guide we demonstrate how to add persistence to arbitrary LangChain runnables by wrapping them in a minimal LangGraph application. This lets us persist the message history and other elements of the chain's state, simplifying the development of multi-turn applications. It also supports multiple threads, enabling a single application to interact separately with multiple users.\n", + "\n", + "## Setup\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + " @langchain/core @langchain/langgraph\n", + "\n", + "```\n", + "\n", + "Let’s also set up a chat model that we’ll use for the below examples.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```\n" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "8a4e4708", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "1f6121bc-2080-4ccc-acf0-f77de4bc951d", + "metadata": {}, + "source": [ + "## Example: message inputs\n", + "\n", + "Adding memory to a [chat model](/docs/concepts/#chat-models) provides a simple example. Chat models accept a list of messages as input and output a message. LangGraph includes a built-in `MessagesState` that we can use for this purpose.\n", + "\n", + "Below, we:\n", + "1. Define the graph state to be a list of messages;\n", + "2. Add a single node to the graph that calls a chat model;\n", + "3. Compile the graph with an in-memory checkpointer to store messages between runs.\n", + "\n", + ":::info\n", + "\n", + "The output of a LangGraph application is its [state](https://langchain-ai.github.io/langgraphjs/concepts/low_level/).\n", + "\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "f691a73a-a866-4354-9fff-8315605e2b8f", + "metadata": {}, + "outputs": [], + "source": [ + "import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n", + "\n", + "// Define the function that calls the model\n", + "const callModel = async (state: typeof MessagesAnnotation.State) => {\n", + " const response = await llm.invoke(state.messages);\n", + " // Update message history with response:\n", + " return { messages: response };\n", + "};\n", + "\n", + "// Define a new graph\n", + "const workflow = new StateGraph(MessagesAnnotation)\n", + " // Define the (single) node in the graph\n", + " .addNode(\"model\", callModel)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "// Add memory\n", + "const memory = new MemorySaver();\n", + "const app = workflow.compile({ checkpointer: memory });" + ] + }, + { + "cell_type": "markdown", + "id": "c0b396a8-f81e-4139-b4b2-75adf61d8179", + "metadata": {}, + "source": [ + "When we run the application, we pass in a configuration object that specifies a `thread_id`. This ID is used to distinguish conversational threads (e.g., between different users)." + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "e4309511-2140-4d91-8f5f-ea3661e6d179", + "metadata": {}, + "outputs": [], + "source": [ + "import { v4 as uuidv4 } from \"uuid\";\n", + "\n", + "const config = { configurable: { thread_id: uuidv4() } }" + ] + }, + { + "cell_type": "markdown", + "id": "108c45a2-4971-4120-ba64-9a4305a414bb", + "metadata": {}, + "source": [ + "We can then invoke the application:" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "72a5ff6c-501f-4151-8dd9-f600f70554be", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABTqCeKnMQmG9IH8dNF5vPjsgXtcM\",\n", + " \"content\": \"Hi Bob! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 10,\n", + " \"promptTokens\": 12,\n", + " \"totalTokens\": 22\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 12,\n", + " \"output_tokens\": 10,\n", + " \"total_tokens\": 22\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const input = [\n", + " {\n", + " role: \"user\",\n", + " content: \"Hi! I'm Bob.\",\n", + " }\n", + "]\n", + "const output = await app.invoke({ messages: input }, config)\n", + "// The output contains all messages in the state.\n", + "// This will long the last message in the conversation.\n", + "console.log(output.messages[output.messages.length - 1]);" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "id": "5931fb35-0fac-40e7-8ac6-b14cb4e926cd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABTqD5jrJXeKCpvoIDp47fvgw2OPn\",\n", + " \"content\": \"Your name is Bob. How can I help you today, Bob?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 34,\n", + " \"totalTokens\": 48\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 34,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 48\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const input2 = [\n", + " {\n", + " role: \"user\",\n", + " content: \"What's my name?\",\n", + " }\n", + "]\n", + "const output2 = await app.invoke({ messages: input2 }, config)\n", + "console.log(output2.messages[output2.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "id": "91de6d12-881d-4d23-a421-f2e3bf829b79", + "metadata": {}, + "source": [ + "Note that states are separated for different threads. If we issue the same query to a thread with a new `thread_id`, the model indicates that it does not know the answer:" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "id": "6f12c26f-8913-4484-b2c5-b49eda2e6d7d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABTqDkctxwmXjeGOZpK6Km8jdCqdl\",\n", + " \"content\": \"I'm sorry, but I don't have access to personal information about users. How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 21,\n", + " \"promptTokens\": 11,\n", + " \"totalTokens\": 32\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_52a7f40b0b\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 11,\n", + " \"output_tokens\": 21,\n", + " \"total_tokens\": 32\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const config2 = { configurable: { thread_id: uuidv4() } }\n", + "const input3 = [\n", + " {\n", + " role: \"user\",\n", + " content: \"What's my name?\",\n", + " }\n", + "]\n", + "const output3 = await app.invoke({ messages: input3 }, config2)\n", + "console.log(output3.messages[output3.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "id": "6749ea95-3382-4843-bb96-cfececb9e4e5", + "metadata": {}, + "source": [ + "## Example: object inputs\n", + "\n", + "LangChain runnables often accept multiple inputs via separate keys in a single object argument. A common example is a prompt template with multiple parameters.\n", + "\n", + "Whereas before our runnable was a chat model, here we chain together a prompt template and chat model." + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "id": "6e7a402a-0994-4fc5-a607-fb990a248aa4", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"Answer in {language}.\"],\n", + " new MessagesPlaceholder(\"messages\"),\n", + "])\n", + "\n", + "const runnable = prompt.pipe(llm);" + ] + }, + { + "cell_type": "markdown", + "id": "f83107bd-ae61-45e1-a57e-94ab043aad4b", + "metadata": {}, + "source": [ + "For this scenario, we define the graph state to include these parameters (in addition to the message history). We then define a single-node graph in the same way as before.\n", + "\n", + "Note that in the below state:\n", + "- Updates to the `messages` list will append messages;\n", + "- Updates to the `language` string will overwrite the string." + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "id": "267429ea-be0f-4f80-8daf-c63d881a1436", + "metadata": {}, + "outputs": [], + "source": [ + "import { START, END, StateGraph, MemorySaver, MessagesAnnotation, Annotation } from \"@langchain/langgraph\";\n", + "\n", + "// Define the State\n", + "// highlight-next-line\n", + "const GraphAnnotation = Annotation.Root({\n", + " // highlight-next-line\n", + " language: Annotation(),\n", + " // Spread `MessagesAnnotation` into the state to add the `messages` field.\n", + " // highlight-next-line\n", + " ...MessagesAnnotation.spec,\n", + "})\n", + "\n", + "\n", + "// Define the function that calls the model\n", + "const callModel2 = async (state: typeof GraphAnnotation.State) => {\n", + " const response = await runnable.invoke(state);\n", + " // Update message history with response:\n", + " return { messages: [response] };\n", + "};\n", + "\n", + "const workflow2 = new StateGraph(GraphAnnotation)\n", + " .addNode(\"model\", callModel2)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "const app2 = workflow2.compile({ checkpointer: new MemorySaver() });" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "f3844fb4-58d7-43c8-b427-6d9f64d7411b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABTqFnCASRB5UhZ7XAbbf5T0Bva4U\",\n", + " \"content\": \"Lo siento, pero no tengo suficiente información para saber tu nombre. ¿Cómo te llamas?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 19,\n", + " \"totalTokens\": 38\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 19,\n", + " \"output_tokens\": 19,\n", + " \"total_tokens\": 38\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const config3 = { configurable: { thread_id: uuidv4() } }\n", + "const input4 = {\n", + " messages: [\n", + " {\n", + " role: \"user\",\n", + " content: \"What's my name?\",\n", + " }\n", + " ],\n", + " language: \"Spanish\",\n", + "} \n", + "const output4 = await app2.invoke(input4, config3)\n", + "console.log(output4.messages[output4.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "id": "7df47824-ef18-4a6e-a416-345ec9203f88", + "metadata": {}, + "source": [ + "## Managing message history\n", + "\n", + "The message history (and other elements of the application state) can be accessed via `.getState`:" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "1cbd6d82-43c1-4d11-98af-5c3ad9cd9b3b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Language: Spanish\n", + "[\n", + " HumanMessage {\n", + " \"content\": \"What's my name?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTqFnCASRB5UhZ7XAbbf5T0Bva4U\",\n", + " \"content\": \"Lo siento, pero no tengo suficiente información para saber tu nombre. ¿Cómo te llamas?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 19,\n", + " \"totalTokens\": 38\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const state = (await app2.getState(config3)).values\n", + "\n", + "console.log(`Language: ${state.language}`);\n", + "console.log(state.messages)" + ] + }, + { + "cell_type": "markdown", + "id": "acfbccda-0bd6-4c4d-ae6e-8118520314e1", + "metadata": {}, + "source": [ + "We can also update the state via `.updateState`. For example, we can manually append a new message:" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "id": "e98310d7-8ab1-461d-94a7-dd419494ab8d", + "metadata": {}, + "outputs": [], + "source": [ + "const _ = await app2.updateState(config3, { messages: [{ role: \"user\", content: \"test\" }]})" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "74ab3691-6f3b-49c5-aad0-2a90fc2a1e6a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Language: Spanish\n", + "[\n", + " HumanMessage {\n", + " \"content\": \"What's my name?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTqFnCASRB5UhZ7XAbbf5T0Bva4U\",\n", + " \"content\": \"Lo siento, pero no tengo suficiente información para saber tu nombre. ¿Cómo te llamas?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 19,\n", + " \"totalTokens\": 38\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"test\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const state2 = (await app2.getState(config3)).values\n", + "\n", + "console.log(`Language: ${state2.language}`);\n", + "console.log(state2.messages)" + ] + }, + { + "cell_type": "markdown", + "id": "e4a1ea00-d7ff-4f18-b9ec-9aec5909d027", + "metadata": {}, + "source": [ + "For details on managing state, including deleting messages, see the LangGraph documentation:\n", + "\n", + "- [How to delete messages](https://langchain-ai.github.io/langgraphjs/how-tos/delete-messages/)\n", + "- [How to view and update past graph state](https://langchain-ai.github.io/langgraphjs/how-tos/time-travel/)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/how_to/message_history.mdx b/docs/core_docs/docs/how_to/message_history.mdx deleted file mode 100644 index 2712135ff482..000000000000 --- a/docs/core_docs/docs/how_to/message_history.mdx +++ /dev/null @@ -1,206 +0,0 @@ -# How to add message history - -:::info Prerequisites - -This guide assumes familiarity with the following concepts: - -- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language) -- [Chaining runnables](/docs/how_to/sequence/) -- [Configuring chain parameters at runtime](/docs/how_to/binding) -- [Prompt templates](/docs/concepts/#prompt-templates) -- [Chat Messages](/docs/concepts/#message-types) - -::: - -The `RunnableWithMessageHistory` lets us add message history to certain types of chains. - -Specifically, it can be used for any Runnable that takes as input one of - -- a sequence of [`BaseMessages`](/docs/concepts/#message-types) -- a dict with a key that takes a sequence of `BaseMessage` -- a dict with a key that takes the latest message(s) as a string or sequence of `BaseMessage`, and a separate key that takes historical messages - -And returns as output one of - -- a string that can be treated as the contents of an `AIMessage` -- a sequence of `BaseMessage` -- a dict with a key that contains a sequence of `BaseMessage` - -Let's take a look at some examples to see how it works. - -## Setup - -We'll use Upstash to store our chat message histories and Anthropic's claude-2 model so we'll need to install the following dependencies: - -```bash npm2yarn -npm install @langchain/anthropic @langchain/community @langchain/core @upstash/redis -``` - -You'll need to set environment variables for `ANTHROPIC_API_KEY` and grab your Upstash REST url and secret token. - -### [LangSmith](https://smith.langchain.com/) - -LangSmith is especially useful for something like message history injection, where it can be hard to otherwise understand what the inputs are to various parts of the chain. - -Note that LangSmith is not needed, but it is helpful. -If you do want to use LangSmith, after you sign up at the link above, make sure to uncoment the below and set your environment variables to start logging traces: - -```bash -export LANGCHAIN_TRACING_V2="true" -export LANGCHAIN_API_KEY="" - -# Reduce tracing latency if you are not in a serverless environment -# export LANGCHAIN_CALLBACKS_BACKGROUND=true -``` - -Let's create a simple runnable that takes a dict as input and returns a `BaseMessage`. - -In this case the `"question"` key in the input represents our input message, and the `"history"` key is where our historical messages will be injected. - -```typescript -import { - ChatPromptTemplate, - MessagesPlaceholder, -} from "@langchain/core/prompts"; -import { ChatAnthropic } from "@langchain/anthropic"; -import { UpstashRedisChatMessageHistory } from "@langchain/community/stores/message/upstash_redis"; -// For demos, you can also use an in-memory store: -// import { ChatMessageHistory } from "langchain/stores/message/in_memory"; - -const prompt = ChatPromptTemplate.fromMessages([ - ["system", "You're an assistant who's good at {ability}"], - new MessagesPlaceholder("history"), - ["human", "{question}"], -]); - -const chain = prompt.pipe( - new ChatAnthropic({ model: "claude-3-sonnet-20240229" }) -); -``` - -### Adding message history - -To add message history to our original chain we wrap it in the `RunnableWithMessageHistory` class. - -Crucially, we also need to define a `getMessageHistory()` method that takes a `sessionId` string and based on it returns a `BaseChatMessageHistory`. Given the same input, this method should return an equivalent output. - -In this case, we'll also want to specify `inputMessagesKey` (the key to be treated as the latest input message) and `historyMessagesKey` (the key to add historical messages to). - -```typescript -import { RunnableWithMessageHistory } from "@langchain/core/runnables"; - -const chainWithHistory = new RunnableWithMessageHistory({ - runnable: chain, - getMessageHistory: (sessionId) => - new UpstashRedisChatMessageHistory({ - sessionId, - config: { - url: process.env.UPSTASH_REDIS_REST_URL!, - token: process.env.UPSTASH_REDIS_REST_TOKEN!, - }, - }), - inputMessagesKey: "question", - historyMessagesKey: "history", -}); -``` - -## Invoking with config - -Whenever we call our chain with message history, we need to include an additional config object that contains the `session_id` - -```typescript -{ - configurable: { - sessionId: ""; - } -} -``` - -Given the same configuration, our chain should be pulling from the same chat message history. - -```typescript -const result = await chainWithHistory.invoke( - { - ability: "math", - question: "What does cosine mean?", - }, - { - configurable: { - sessionId: "foobarbaz", - }, - } -); - -console.log(result); - -/* - AIMessage { - content: 'Cosine refers to one of the basic trigonometric functions. Specifically:\n' + - '\n' + - '- Cosine is one of the three main trigonometric functions, along with sine and tangent. It is often abbreviated as cos.\n' + - '\n' + - '- For a right triangle with sides a, b, and c (where c is the hypotenuse), cosine represents the ratio of the length of the adjacent side (a) to the length of the hypotenuse (c). So cos(A) = a/c, where A is the angle opposite side a.\n' + - '\n' + - '- On the Cartesian plane, cosine represents the x-coordinate of a point on the unit circle for a given angle. So if you take an angle θ on the unit circle, the cosine of θ gives you the x-coordinate of where the terminal side of that angle intersects the circle.\n' + - '\n' + - '- The cosine function has a periodic waveform that oscillates between 1 and -1. Its graph forms a cosine wave.\n' + - '\n' + - 'So in essence, cosine helps relate an angle in a right triangle to the ratio of two of its sides. Along with sine and tangent, it is foundational to trigonometry and mathematical modeling of periodic functions.', - name: undefined, - additional_kwargs: { - id: 'msg_01QnnAkKEz7WvhJrwLWGbLBm', - type: 'message', - role: 'assistant', - model: 'claude-3-sonnet-20240229', - stop_reason: 'end_turn', - stop_sequence: null - } - } -*/ - -const result2 = await chainWithHistory.invoke( - { - ability: "math", - question: "What's its inverse?", - }, - { - configurable: { - sessionId: "foobarbaz", - }, - } -); - -console.log(result2); - -/* - AIMessage { - content: 'The inverse of the cosine function is the arcsine or inverse sine function, often written as sin−1(x) or sin^{-1}(x).\n' + - '\n' + - 'Some key properties of the inverse cosine function:\n' + - '\n' + - '- It accepts values between -1 and 1 as inputs and returns angles from 0 to π radians (0 to 180 degrees). This is the inverse of the regular cosine function, which takes angles and returns the cosine ratio.\n' + - '\n' + - '- It is also called cos−1(x) or cos^{-1}(x) (read as "cosine inverse of x").\n' + - '\n' + - '- The notation sin−1(x) is usually preferred over cos−1(x) since it relates more directly to the unit circle definition of cosine. sin−1(x) gives the angle whose sine equals x.\n' + - '\n' + - '- The arcsine function is one-to-one on the domain [-1, 1]. This means every output angle maps back to exactly one input ratio x. This one-to-one mapping is what makes it the mathematical inverse of cosine.\n' + - '\n' + - 'So in summary, arcsine or inverse sine, written as sin−1(x) or sin^{-1}(x), gives you the angle whose cosine evaluates to the input x, undoing the cosine function. It is used throughout trigonometry and calculus.', - additional_kwargs: { - id: 'msg_01PYRhpoUudApdJvxug6R13W', - type: 'message', - role: 'assistant', - model: 'claude-3-sonnet-20240229', - stop_reason: 'end_turn', - stop_sequence: null - } - } -*/ -``` - -:::tip -[Langsmith trace](https://smith.langchain.com/public/50377a89-d0b8-413b-8cd7-8e6618835e00/r) -::: - -Looking at the Langsmith trace for the second call, we can see that when constructing the prompt, a "history" variable has been injected which is a list of two messages (our first input and first output). diff --git a/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb b/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb index cead1afc3fa2..4ed8e4bd849e 100644 --- a/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb +++ b/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb @@ -4,24 +4,29 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# How to add chat history to a question-answering chain\n", + "# How to add chat history\n", "\n", - ":::info Prerequisites\n", "\n", - "This guide assumes familiarity with the following:\n", + ":::note\n", "\n", - "- [Retrieval-augmented generation](/docs/tutorials/rag/)\n", + "This tutorial previously built a chatbot using [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html). You can access this version of the tutorial in the [v0.2 docs](https://js.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/).\n", + "\n", + "The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n", "\n", ":::\n", "\n", "In many Q&A applications we want to allow the user to have a back-and-forth conversation, meaning the application needs some sort of \"memory\" of past questions and answers, and some logic for incorporating those into its current thinking.\n", "\n", - "In this guide we focus on **adding logic for incorporating historical messages, and NOT on chat history management.** Chat history management is [covered here](/docs/how_to/message_history).\n", + "In this guide we focus on **adding logic for incorporating historical messages.**\n", + "\n", + "This is largely a condensed version of the [Conversational RAG tutorial](/docs/tutorials/qa_chat_history).\n", + "\n", + "We will cover two approaches:\n", "\n", - "We'll work off of the Q&A app we built over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng. We'll need to update two things about our existing app:\n", + "1. [Chains](/docs/how_to/qa_chat_history_how_to#chains), in which we always execute a retrieval step;\n", + "2. [Agents](/docs/how_to/qa_chat_history_how_to#agents), in which we give an LLM discretion over whether and how to execute a retrieval step (or multiple steps).\n", "\n", - "1. **Prompt**: Update our prompt to support historical messages as an input.\n", - "2. **Contextualizing questions**: Add a sub-chain that takes the latest user question and reformulates it in the context of the chat history. This is needed in case the latest question references some context from past messages. For example, if a user asks a follow-up question like \"Can you elaborate on the second point?\", this cannot be understood without the context of the previous message. Therefore we can't effectively perform retrieval with a question like this." + "For the external knowledge source, we will use the same [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng from the [RAG tutorial](/docs/tutorials/rag)." ] }, { @@ -36,7 +41,7 @@ "We’ll use the following packages:\n", "\n", "```bash\n", - "npm install --save langchain @langchain/openai cheerio\n", + "npm install --save langchain @langchain/openai langchain cheerio uuid\n", "```\n", "\n", "We need to set environment variable `OPENAI_API_KEY`:\n", @@ -66,6 +71,43 @@ "```" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chains {#chains}\n", + "\n", + "In a conversational RAG application, queries issued to the retriever should be informed by the context of the conversation. LangChain provides a [createHistoryAwareRetriever](https://api.js.langchain.com/functions/langchain.chains_history_aware_retriever.createHistoryAwareRetriever.html) constructor to simplify this. It constructs a chain that accepts keys `input` and `chat_history` as input, and has the same output schema as a retriever. `createHistoryAwareRetriever` requires as inputs: \n", + "\n", + "1. LLM;\n", + "2. Retriever;\n", + "3. Prompt.\n", + "\n", + "First we obtain these objects:\n", + "\n", + "### LLM\n", + "\n", + "We can use any supported chat model:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\"\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({ model: \"gpt-4o\" });" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -75,21 +117,14 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ - "import \"cheerio\";\n", "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", "import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n", - "import { OpenAIEmbeddings, ChatOpenAI } from \"@langchain/openai\";\n", - "import { pull } from \"langchain/hub\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnableSequence, RunnablePassthrough } from \"@langchain/core/runnables\";\n", - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "\n", - "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", "\n", "const loader = new CheerioWebBaseLoader(\n", " \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\n", @@ -102,14 +137,112 @@ "const vectorStore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n", "\n", "// Retrieve and generate using the relevant snippets of the blog.\n", - "const retriever = vectorStore.asRetriever();\n", - "// Tip - you can edit this!\n", - "const prompt = await pull(\"rlm/rag-prompt\");\n", - "const llm = new ChatOpenAI({ model: \"gpt-3.5-turbo\", temperature: 0 });\n", - "const ragChain = await createStuffDocumentsChain({\n", + "const retriever = vectorStore.asRetriever();" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Prompt\n", + "\n", + "We'll use a prompt that includes a `MessagesPlaceholder` variable under the name \"chat_history\". This allows us to pass in a list of Messages to the prompt using the \"chat_history\" input key, and these messages will be inserted after the system message and before the human message containing the latest question." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "\n", + "const contextualizeQSystemPrompt = (\n", + " \"Given a chat history and the latest user question \" +\n", + " \"which might reference context in the chat history, \" +\n", + " \"formulate a standalone question which can be understood \" +\n", + " \"without the chat history. Do NOT answer the question, \" +\n", + " \"just reformulate it if needed and otherwise return it as is.\"\n", + ")\n", + "\n", + "const contextualizeQPrompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\"system\", contextualizeQSystemPrompt],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Assembling the chain\n", + "\n", + "We can then instantiate the history-aware retriever:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import { createHistoryAwareRetriever } from \"langchain/chains/history_aware_retriever\";\n", + "\n", + "const historyAwareRetriever = await createHistoryAwareRetriever({\n", " llm,\n", - " prompt,\n", - " outputParser: new StringOutputParser(),\n", + " retriever,\n", + " rephrasePrompt: contextualizeQPrompt\n", + "});\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This chain prepends a rephrasing of the input query to our retriever, so that the retrieval incorporates the context of the conversation.\n", + "\n", + "Now we can build our full QA chain.\n", + "\n", + "As in the [RAG tutorial](/docs/tutorials/rag), we will use [createStuffDocumentsChain](https://api.js.langchain.com/functions/langchain.chains_combine_documents.createStuffDocumentsChain.html) to generate a `questionAnswerChain`, with input keys `context`, `chat_history`, and `input`-- it accepts the retrieved context alongside the conversation history and query to generate an answer.\n", + "\n", + "We build our final `ragChain` with [createRetrievalChain](https://api.js.langchain.com/functions/langchain.chains_retrieval.createRetrievalChain.html). This chain applies the `historyAwareRetriever` and `questionAnswerChain` in sequence, retaining intermediate outputs such as the retrieved context for convenience. It has input keys `input` and `chat_history`, and includes `input`, `chat_history`, `context`, and `answer` in its output." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", + "import { createRetrievalChain } from \"langchain/chains/retrieval\";\n", + "\n", + "const systemPrompt = \n", + " \"You are an assistant for question-answering tasks. \" +\n", + " \"Use the following pieces of retrieved context to answer \" +\n", + " \"the question. If you don't know the answer, say that you \" +\n", + " \"don't know. Use three sentences maximum and keep the \" +\n", + " \"answer concise.\" +\n", + " \"\\n\\n\" +\n", + " \"{context}\";\n", + "\n", + "const qaPrompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", systemPrompt],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "const questionAnswerChain = await createStuffDocumentsChain({\n", + " llm,\n", + " prompt: qaPrompt,\n", + "});\n", + "\n", + "const ragChain = await createRetrievalChain({\n", + " retriever: historyAwareRetriever,\n", + " combineDocsChain: questionAnswerChain,\n", "});" ] }, @@ -117,278 +250,885 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Let's see what this prompt actually looks like" + "### Stateful Management of chat history\n", + "\n", + "We have added application logic for incorporating chat history, but we are still manually plumbing it through our application. In production, the Q&A application we usually persist the chat history into a database, and be able to read and update it appropriately.\n", + "\n", + "[LangGraph](https://langchain-ai.github.io/langgraphjs/) implements a built-in [persistence layer](https://langchain-ai.github.io/langgraphjs/concepts/persistence/), making it ideal for chat applications that support multiple conversational turns.\n", + "\n", + "Wrapping our chat model in a minimal LangGraph application allows us to automatically persist the message history, simplifying the development of multi-turn applications.\n", + "\n", + "LangGraph comes with a simple [in-memory checkpointer](https://langchain-ai.github.io/langgraphjs/reference/classes/checkpoint.MemorySaver.html), which we use below. See its documentation for more detail, including how to use different persistence backends (e.g., SQLite or Postgres).\n", + "\n", + "For a detailed walkthrough of how to manage message history, head to the How to add message history (memory) guide." ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "import { AIMessage, BaseMessage, HumanMessage } from \"@langchain/core/messages\";\n", + "import { StateGraph, START, END, MemorySaver, messagesStateReducer, Annotation } from \"@langchain/langgraph\";\n", + "\n", + "// Define the State interface\n", + "const GraphAnnotation = Annotation.Root({\n", + " input: Annotation(),\n", + " chat_history: Annotation({\n", + " reducer: messagesStateReducer,\n", + " default: () => [],\n", + " }),\n", + " context: Annotation(),\n", + " answer: Annotation(),\n", + "})\n", + "\n", + "// Define the call_model function\n", + "async function callModel(state: typeof GraphAnnotation.State) {\n", + " const response = await ragChain.invoke(state);\n", + " return {\n", + " chat_history: [\n", + " new HumanMessage(state.input),\n", + " new AIMessage(response.answer),\n", + " ],\n", + " context: response.context,\n", + " answer: response.answer,\n", + " };\n", + "}\n", + "\n", + "// Create the workflow\n", + "const workflow = new StateGraph(GraphAnnotation)\n", + " .addNode(\"model\", callModel)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "// Compile the graph with a checkpointer object\n", + "const memory = new MemorySaver();\n", + "const app = workflow.compile({ checkpointer: memory });" + ] + }, + { + "cell_type": "code", + "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\n", - "Question: {question} \n", - "Context: {context} \n", - "Answer:\n" + "Task Decomposition is the process of breaking down a complicated task into smaller, simpler, and more manageable steps. Techniques like Chain of Thought (CoT) and Tree of Thoughts expand on this by enabling agents to think step by step or explore multiple reasoning possibilities at each step. This allows for a more structured and interpretable approach to handling complex tasks.\n" ] } ], "source": [ - "console.log(prompt.promptMessages.map((msg) => msg.prompt.template).join(\"\\n\"));" + "import { v4 as uuidv4 } from \"uuid\";\n", + "\n", + "const threadId = uuidv4();\n", + "const config = { configurable: { thread_id: threadId } };\n", + "\n", + "const result = await app.invoke(\n", + " { input: \"What is Task Decomposition?\" },\n", + " config,\n", + ")\n", + "console.log(result.answer);" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 10, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "\u001b[32m\"Task Decomposition involves breaking down complex tasks into smaller and simpler steps to make them \"\u001b[39m... 243 more characters" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "One way of doing task decomposition is by using an LLM with simple prompting, such as asking \"Steps for XYZ.\\n1.\" or \"What are the subgoals for achieving XYZ?\" This method leverages direct prompts to guide the model in breaking down tasks.\n" + ] } ], "source": [ - "await ragChain.invoke({\n", - " context: await retriever.invoke(\"What is Task Decomposition?\"),\n", - " question: \"What is Task Decomposition?\"\n", - "});" + "const result2 = await app.invoke(\n", + " { input: \"What is one way of doing it?\" },\n", + " config,\n", + ")\n", + "console.log(result2.answer);" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Contextualizing the question\n", + "The conversation history can be inspected via the state of the application:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HumanMessage {\n", + " \"content\": \"What is Task Decomposition?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + "}\n", + "AIMessage {\n", + " \"content\": \"Task Decomposition is the process of breaking down a complicated task into smaller, simpler, and more manageable steps. Techniques like Chain of Thought (CoT) and Tree of Thoughts expand on this by enabling agents to think step by step or explore multiple reasoning possibilities at each step. This allows for a more structured and interpretable approach to handling complex tasks.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + "}\n", + "HumanMessage {\n", + " \"content\": \"What is one way of doing it?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + "}\n", + "AIMessage {\n", + " \"content\": \"One way of doing task decomposition is by using an LLM with simple prompting, such as asking \\\"Steps for XYZ.\\\\n1.\\\" or \\\"What are the subgoals for achieving XYZ?\\\" This method leverages direct prompts to guide the model in breaking down tasks.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + "}\n" + ] + } + ], + "source": [ + "const chatHistory = (await app.getState(config)).values.chat_history;\n", + "for (const message of chatHistory) {\n", + " console.log(message);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tying it together\n", "\n", - "First we'll need to define a sub-chain that takes historical messages and the latest user question, and reformulates the question if it makes reference to any information in the historical information.\n", + "![](../../static/img/conversational_retrieval_chain.png)\n", "\n", - "We'll use a prompt that includes a `MessagesPlaceholder` variable under the name \"chat_history\". This allows us to pass in a list of Messages to the prompt using the \"chat_history\" input key, and these messages will be inserted after the system message and before the human message containing the latest question." + "For convenience, we tie together all of the necessary steps in a single code cell:" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 12, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Task Decomposition is the process of breaking a complicated task into smaller, simpler steps to enhance model performance on complex tasks. Techniques like Chain of Thought (CoT) and Tree of Thoughts (ToT) are used for this, with CoT focusing on step-by-step thinking and ToT exploring multiple reasoning possibilities at each step. Decomposition can be carried out by the LLM itself, using task-specific instructions, or through human inputs.\n", + "One way of doing task decomposition is by prompting the LLM with simple instructions such as \"Steps for XYZ.\\n1.\" or \"What are the subgoals for achieving XYZ?\" This encourages the model to break down the task into smaller, manageable steps on its own.\n" + ] + } + ], "source": [ + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n", + "import { OpenAIEmbeddings, ChatOpenAI } from \"@langchain/openai\";\n", "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "import { createHistoryAwareRetriever } from \"langchain/chains/history_aware_retriever\";\n", + "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", + "import { createRetrievalChain } from \"langchain/chains/retrieval\";\n", + "import { AIMessage, BaseMessage, HumanMessage } from \"@langchain/core/messages\";\n", + "import { StateGraph, START, END, MemorySaver, messagesStateReducer, Annotation } from \"@langchain/langgraph\";\n", + "import { v4 as uuidv4 } from \"uuid\";\n", "\n", - "const contextualizeQSystemPrompt = `Given a chat history and the latest user question\n", - "which might reference context in the chat history, formulate a standalone question\n", - "which can be understood without the chat history. Do NOT answer the question,\n", - "just reformulate it if needed and otherwise return it as is.`;\n", + "const llm2 = new ChatOpenAI({ model: \"gpt-4o\" });\n", "\n", - "const contextualizeQPrompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", contextualizeQSystemPrompt],\n", + "const loader2 = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\n", + ");\n", + "\n", + "const docs2 = await loader2.load();\n", + "\n", + "const textSplitter2 = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", + "const splits2 = await textSplitter2.splitDocuments(docs2);\n", + "const vectorStore2 = await MemoryVectorStore.fromDocuments(splits2, new OpenAIEmbeddings());\n", + "\n", + "// Retrieve and generate using the relevant snippets of the blog.\n", + "const retriever2 = vectorStore2.asRetriever();\n", + "\n", + "const contextualizeQSystemPrompt2 =\n", + " \"Given a chat history and the latest user question \" +\n", + " \"which might reference context in the chat history, \" +\n", + " \"formulate a standalone question which can be understood \" +\n", + " \"without the chat history. Do NOT answer the question, \" +\n", + " \"just reformulate it if needed and otherwise return it as is.\";\n", + "\n", + "const contextualizeQPrompt2 = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\"system\", contextualizeQSystemPrompt2],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const historyAwareRetriever2 = await createHistoryAwareRetriever({\n", + " llm: llm2,\n", + " retriever: retriever2,\n", + " rephrasePrompt: contextualizeQPrompt2\n", + "});\n", + "\n", + "const systemPrompt2 = \n", + " \"You are an assistant for question-answering tasks. \" +\n", + " \"Use the following pieces of retrieved context to answer \" +\n", + " \"the question. If you don't know the answer, say that you \" +\n", + " \"don't know. Use three sentences maximum and keep the \" +\n", + " \"answer concise.\" +\n", + " \"\\n\\n\" +\n", + " \"{context}\";\n", + "\n", + "const qaPrompt2 = ChatPromptTemplate.fromMessages([\n", + " [\"system\", systemPrompt2],\n", " new MessagesPlaceholder(\"chat_history\"),\n", - " [\"human\", \"{question}\"]\n", + " [\"human\", \"{input}\"],\n", "]);\n", - "const contextualizeQChain = contextualizeQPrompt.pipe(llm).pipe(new StringOutputParser());" + "\n", + "const questionAnswerChain2 = await createStuffDocumentsChain({\n", + " llm: llm2,\n", + " prompt: qaPrompt2,\n", + "});\n", + "\n", + "const ragChain2 = await createRetrievalChain({\n", + " retriever: historyAwareRetriever2,\n", + " combineDocsChain: questionAnswerChain2,\n", + "});\n", + "\n", + "// Define the State interface\n", + "const GraphAnnotation2 = Annotation.Root({\n", + " input: Annotation(),\n", + " chat_history: Annotation({\n", + " reducer: messagesStateReducer,\n", + " default: () => [],\n", + " }),\n", + " context: Annotation(),\n", + " answer: Annotation(),\n", + "})\n", + "\n", + "// Define the call_model function\n", + "async function callModel2(state: typeof GraphAnnotation2.State) {\n", + " const response = await ragChain2.invoke(state);\n", + " return {\n", + " chat_history: [\n", + " new HumanMessage(state.input),\n", + " new AIMessage(response.answer),\n", + " ],\n", + " context: response.context,\n", + " answer: response.answer,\n", + " };\n", + "}\n", + "\n", + "// Create the workflow\n", + "const workflow2 = new StateGraph(GraphAnnotation2)\n", + " .addNode(\"model\", callModel2)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "// Compile the graph with a checkpointer object\n", + "const memory2 = new MemorySaver();\n", + "const app2 = workflow2.compile({ checkpointer: memory2 });\n", + "\n", + "const threadId2 = uuidv4();\n", + "const config2 = { configurable: { thread_id: threadId2 } };\n", + "\n", + "const result3 = await app2.invoke(\n", + " { input: \"What is Task Decomposition?\" },\n", + " config2,\n", + ")\n", + "console.log(result3.answer);\n", + "\n", + "const result4 = await app2.invoke(\n", + " { input: \"What is one way of doing it?\" },\n", + " config2,\n", + ")\n", + "console.log(result4.answer);" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Using this chain we can ask follow-up questions that reference past messages and have them reformulated into standalone questions:" + "## Agents {#agents}\n", + "\n", + "Agents leverage the reasoning capabilities of LLMs to make decisions during execution. Using agents allow you to offload some discretion over the retrieval process. Although their behavior is less predictable than chains, they offer some advantages in this context:\n", + "- Agents generate the input to the retriever directly, without necessarily needing us to explicitly build in contextualization, as we did above;\n", + "- Agents can execute multiple retrieval steps in service of a query, or refrain from executing a retrieval step altogether (e.g., in response to a generic greeting from a user).\n", + "\n", + "### Retrieval tool\n", + "\n", + "Agents can access \"tools\" and manage their execution. In this case, we will convert our retriever into a LangChain tool to be wielded by the agent:" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 13, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\u001b[32m'What is the definition of \"large\" in this context?'\u001b[39m" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n", + "import { createRetrieverTool } from \"langchain/tools/retriever\";\n", "\n", - "await contextualizeQChain.invoke({\n", - " chat_history: [\n", - " new HumanMessage(\"What does LLM stand for?\"),\n", - " new AIMessage(\"Large language model\") \n", - " ],\n", - " question: \"What is meant by large\",\n", - "})" + "const tool = createRetrieverTool(\n", + " retriever,\n", + " {\n", + " name: \"blog_post_retriever\",\n", + " description: \"Searches and returns excerpts from the Autonomous Agents blog post.\",\n", + " }\n", + ")\n", + "const tools = [tool]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Chain with chat history\n", + "### Agent constructor\n", "\n", - "And now we can build our full QA chain. \n", + "Now that we have defined the tools and the LLM, we can create the agent. We will be using [LangGraph](/docs/concepts/#langgraph) to construct the agent. \n", + "Currently we are using a high level interface to construct the agent, but the nice thing about LangGraph is that this high-level interface is backed by a low-level, highly controllable API in case you want to modify the agent logic." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", "\n", - "Notice we add some routing functionality to only run the \"condense question chain\" when our chat history isn't empty. Here we're taking advantage of the fact that if a function in an LCEL chain returns another chain, that chain will itself be invoked." + "const agentExecutor = createReactAgent({ llm, tools })" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now try it out. Note that so far it is not stateful (we still need to add in memory)" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 17, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"Task decomposition involves breaking down a complex task into smaller and simpler steps to make it m\"... 358 more characters,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: undefined, tool_calls: undefined },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"Task decomposition involves breaking down a complex task into smaller and simpler steps to make it m\"... 358 more characters,\n", - " name: undefined,\n", - " additional_kwargs: { function_call: undefined, tool_calls: undefined },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: 83, promptTokens: 701, totalTokens: 784 },\n", - " finish_reason: \"stop\"\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}\n" + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-AB7xlcJBGSKSp1GvgDY9FP8KvXxwB\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_Ev0nA6nzGwOeMC5upJUUxTuw\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 66,\n", + " \"totalTokens\": 85\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_52a7f40b0b\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"blog_post_retriever\",\n", + " \"args\": {\n", + " \"query\": \"Task Decomposition\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_Ev0nA6nzGwOeMC5upJUUxTuw\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 66,\n", + " \"output_tokens\": 19,\n", + " \"total_tokens\": 85\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " tools: {\n", + " messages: [\n", + " ToolMessage {\n", + " \"content\": \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\n\\nTask decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\n\\nAgent System Overview\\n \\n Component One: Planning\\n \\n \\n Task Decomposition\\n \\n Self-Reflection\\n \\n \\n Component Two: Memory\\n \\n \\n Types of Memory\\n \\n Maximum Inner Product Search (MIPS)\\n \\n \\n Component Three: Tool Use\\n \\n Case Studies\\n \\n \\n Scientific Discovery Agent\\n \\n Generative Agents Simulation\\n \\n Proof-of-Concept Examples\\n \\n \\n Challenges\\n \\n Citation\\n \\n References\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\",\n", + " \"name\": \"blog_post_retriever\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_Ev0nA6nzGwOeMC5upJUUxTuw\"\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-AB7xmiPNPbMX2KvZKHM2oPfcoFMnY\",\n", + " \"content\": \"**Task Decomposition** involves breaking down a complicated or large task into smaller, more manageable subtasks. Here are some insights based on current techniques and research:\\n\\n1. **Chain of Thought (CoT)**:\\n - Introduced by Wei et al. (2022), this technique prompts the model to \\\"think step by step\\\".\\n - It helps decompose hard tasks into several simpler steps.\\n - Enhances the interpretability of the model's thought process.\\n\\n2. **Tree of Thoughts (ToT)**:\\n - An extension of CoT by Yao et al. (2023).\\n - Decomposes problems into multiple thought steps and generates several possibilities at each step.\\n - Utilizes tree structures through BFS (Breadth-First Search) or DFS (Depth-First Search) with evaluation by a classifier or majority vote.\\n\\n3. **Methods of Task Decomposition**:\\n - **Simple Prompting**: Asking the model directly, e.g., \\\"Steps for XYZ.\\\\n1.\\\" or \\\"What are the subgoals for achieving XYZ?\\\".\\n - **Task-Specific Instructions**: Tailoring instructions to the task, such as \\\"Write a story outline\\\" for writing a novel.\\n - **Human Inputs**: Receiving inputs from humans to refine the process.\\n\\n4. **LLM+P Approach**:\\n - Suggested by Liu et al. (2023), combines language models with an external classical planner.\\n - Uses Planning Domain Definition Language (PDDL) for long-horizon planning:\\n 1. Translates the problem into a PDDL problem.\\n 2. Requests an external planner to generate a PDDL plan.\\n 3. Translates the PDDL plan back into natural language.\\n - This method offloads the planning complexity to a specialized tool, especially relevant for domains utilizing robotic setups.\\n\\nTask Decomposition is a fundamental component of planning in autonomous agent systems, aiding in the efficient accomplishment of complex tasks by breaking them into smaller, actionable steps.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 411,\n", + " \"promptTokens\": 732,\n", + " \"totalTokens\": 1143\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 732,\n", + " \"output_tokens\": 411,\n", + " \"total_tokens\": 1143\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n" ] - }, - { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Common ways of task decomposition include using simple prompting techniques like Chain of Thought (C\"\u001b[39m... 353 more characters,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Common ways of task decomposition include using simple prompting techniques like Chain of Thought (C\"\u001b[39m... 353 more characters,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m81\u001b[39m, promptTokens: \u001b[33m779\u001b[39m, totalTokens: \u001b[33m860\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" } ], "source": [ - "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\"\n", - "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", - "import { formatDocumentsAsString } from \"langchain/util/document\";\n", + "const query = \"What is Task Decomposition?\"\n", "\n", - "const qaSystemPrompt = `You are an assistant for question-answering tasks.\n", - "Use the following pieces of retrieved context to answer the question.\n", - "If you don't know the answer, just say that you don't know.\n", - "Use three sentences maximum and keep the answer concise.\n", + "for await (const s of await agentExecutor.stream(\n", + " { messages: [{ role: \"user\", content: query }] },\n", + ")){\n", + " console.log(s)\n", + " console.log(\"----\")\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "LangGraph comes with built in persistence, so we don't need to use `ChatMessageHistory`! Rather, we can pass in a checkpointer to our LangGraph agent directly.\n", "\n", - "{context}`\n", + "Distinct conversations are managed by specifying a key for a conversation thread in the config object, as shown below." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "import { MemorySaver } from \"@langchain/langgraph\";\n", "\n", - "const qaPrompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", qaSystemPrompt],\n", - " new MessagesPlaceholder(\"chat_history\"),\n", - " [\"human\", \"{question}\"]\n", - "]);\n", + "const memory3 = new MemorySaver();\n", "\n", - "const contextualizedQuestion = (input: Record) => {\n", - " if (\"chat_history\" in input) {\n", - " return contextualizeQChain;\n", - " }\n", - " return input.question;\n", - "};\n", - "\n", - "const ragChain = RunnableSequence.from([\n", - " RunnablePassthrough.assign({\n", - " context: async (input: Record) => {\n", - " if (\"chat_history\" in input) {\n", - " const chain = contextualizedQuestion(input);\n", - " return chain.pipe(retriever).pipe(formatDocumentsAsString);\n", - " }\n", - " return \"\";\n", - " },\n", - " }),\n", - " qaPrompt,\n", - " llm\n", - "]);\n", + "const agentExecutor2 = createReactAgent({ llm, tools, checkpointSaver: memory3 })" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is all we need to construct a conversational RAG agent.\n", "\n", - "const chat_history = [];\n", + "Let's observe its behavior. Note that if we input a query that does not require a retrieval step, the agent does not execute one:" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-AB7y8P8AGHkxOwKpwMc3qj6r0skYr\",\n", + " \"content\": \"Hello, Bob! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 12,\n", + " \"promptTokens\": 64,\n", + " \"totalTokens\": 76\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 64,\n", + " \"output_tokens\": 12,\n", + " \"total_tokens\": 76\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n" + ] + } + ], + "source": [ + "const threadId3 = uuidv4();\n", + "const config3 = { configurable: { thread_id: threadId3 } };\n", "\n", - "const question = \"What is task decomposition?\";\n", - "const aiMsg = await ragChain.invoke({ question, chat_history });\n", + "for await (const s of await agentExecutor2.stream({ messages: [{ role: \"user\", content: \"Hi! I'm bob\" }] }, config3)) {\n", + " console.log(s)\n", + " console.log(\"----\")\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Further, if we input a query that does require a retrieval step, the agent generates the input to the tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-AB7y8Do2IHJ2rnUvvMU3pTggmuZud\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_3tSaOZ3xdKY4miIJdvBMR80V\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 89,\n", + " \"totalTokens\": 108\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"blog_post_retriever\",\n", + " \"args\": {\n", + " \"query\": \"Task Decomposition\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_3tSaOZ3xdKY4miIJdvBMR80V\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 89,\n", + " \"output_tokens\": 19,\n", + " \"total_tokens\": 108\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " tools: {\n", + " messages: [\n", + " ToolMessage {\n", + " \"content\": \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\n\\nTask decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\n\\nAgent System Overview\\n \\n Component One: Planning\\n \\n \\n Task Decomposition\\n \\n Self-Reflection\\n \\n \\n Component Two: Memory\\n \\n \\n Types of Memory\\n \\n Maximum Inner Product Search (MIPS)\\n \\n \\n Component Three: Tool Use\\n \\n Case Studies\\n \\n \\n Scientific Discovery Agent\\n \\n Generative Agents Simulation\\n \\n Proof-of-Concept Examples\\n \\n \\n Challenges\\n \\n Citation\\n \\n References\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\",\n", + " \"name\": \"blog_post_retriever\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_3tSaOZ3xdKY4miIJdvBMR80V\"\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-AB7y9tpoTvM3lsrhoxCWkkerk9fb2\",\n", + " \"content\": \"Task decomposition is a methodology used to break down complex tasks into smaller, more manageable steps. Here’s an overview of various approaches to task decomposition:\\n\\n1. **Chain of Thought (CoT)**: This technique prompts a model to \\\"think step by step,\\\" which aids in transforming big tasks into multiple smaller tasks. This method enhances the model’s performance on complex tasks by making the problem more manageable and interpretable.\\n\\n2. **Tree of Thoughts (ToT)**: An extension of Chain of Thought, this approach explores multiple reasoning possibilities at each step, effectively creating a tree structure. The search process can be carried out using Breadth-First Search (BFS) or Depth-First Search (DFS), with each state evaluated by either a classifier or a majority vote.\\n\\n3. **Simple Prompting**: Involves straightforward instructions to decompose a task, such as starting with \\\"Steps for XYZ. 1.\\\" or asking \\\"What are the subgoals for achieving XYZ?\\\". This can also include task-specific instructions like \\\"Write a story outline\\\" for writing a novel.\\n\\n4. **LLM+P**: Combines Large Language Models (LLMs) with an external classical planner. The problem is translated into a Planning Domain Definition Language (PDDL) format, an external planner generates a plan, and then the plan is translated back into natural language. This approach highlights a synergy between modern AI techniques and traditional planning strategies.\\n\\nThese approaches allow complex problems to be approached and solved more efficiently by focusing on manageable sub-tasks.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 311,\n", + " \"promptTokens\": 755,\n", + " \"totalTokens\": 1066\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_52a7f40b0b\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 755,\n", + " \"output_tokens\": 311,\n", + " \"total_tokens\": 1066\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n" + ] + } + ], + "source": [ + "const query2 = \"What is Task Decomposition?\"\n", "\n", - "console.log(aiMsg)\n", + "for await (const s of await agentExecutor2.stream({ messages: [{ role: \"user\", content: query2 }] }, config3)) {\n", + " console.log(s)\n", + " console.log(\"----\")\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Above, instead of inserting our query verbatim into the tool, the agent stripped unnecessary words like \"what\" and \"is\".\n", "\n", - "chat_history.push(aiMsg);\n", + "This same principle allows the agent to use the context of the conversation when necessary:" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-AB7yDE4rCOXTPZ3595GknUgVzASmt\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_cWnDZq2aloVtMB4KjZlTxHmZ\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 21,\n", + " \"promptTokens\": 1089,\n", + " \"totalTokens\": 1110\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_52a7f40b0b\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"blog_post_retriever\",\n", + " \"args\": {\n", + " \"query\": \"common ways of task decomposition\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_cWnDZq2aloVtMB4KjZlTxHmZ\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 1089,\n", + " \"output_tokens\": 21,\n", + " \"total_tokens\": 1110\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " tools: {\n", + " messages: [\n", + " ToolMessage {\n", + " \"content\": \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\n\\nTask decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\n\\nAgent System Overview\\n \\n Component One: Planning\\n \\n \\n Task Decomposition\\n \\n Self-Reflection\\n \\n \\n Component Two: Memory\\n \\n \\n Types of Memory\\n \\n Maximum Inner Product Search (MIPS)\\n \\n \\n Component Three: Tool Use\\n \\n Case Studies\\n \\n \\n Scientific Discovery Agent\\n \\n Generative Agents Simulation\\n \\n Proof-of-Concept Examples\\n \\n \\n Challenges\\n \\n Citation\\n \\n References\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\",\n", + " \"name\": \"blog_post_retriever\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_cWnDZq2aloVtMB4KjZlTxHmZ\"\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-AB7yGASxz0Z0g2jiCxwx4gYHYJTi4\",\n", + " \"content\": \"According to the blog post, there are several common methods of task decomposition:\\n\\n1. **Simple Prompting by LLMs**: This involves straightforward instructions to decompose a task. Examples include:\\n - \\\"Steps for XYZ. 1.\\\"\\n - \\\"What are the subgoals for achieving XYZ?\\\"\\n - Task-specific instructions like \\\"Write a story outline\\\" for writing a novel.\\n\\n2. **Human Inputs**: Decomposition can be guided by human insights and instructions.\\n\\n3. **Chain of Thought (CoT)**: This technique prompts a model to think step-by-step, enabling it to break down complex tasks into smaller, more manageable tasks. CoT has become a standard method to enhance model performance on intricate tasks.\\n\\n4. **Tree of Thoughts (ToT)**: An extension of CoT, this approach decomposes the problem into multiple thought steps and generates several thoughts per step, forming a tree structure. The search process can be performed using Breadth-First Search (BFS) or Depth-First Search (DFS), with each state evaluated by a classifier or through a majority vote.\\n\\n5. **LLM+P (Large Language Model plus Planner)**: This method integrates LLMs with an external classical planner. It involves:\\n - Translating the problem into “Problem PDDL” (Planning Domain Definition Language).\\n - Using an external planner to generate a PDDL plan based on an existing “Domain PDDL”.\\n - Translating the PDDL plan back into natural language.\\n \\nBy utilizing these methods, tasks can be effectively decomposed into more manageable parts, allowing for more efficient problem-solving and planning.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 334,\n", + " \"promptTokens\": 1746,\n", + " \"totalTokens\": 2080\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_52a7f40b0b\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 1746,\n", + " \"output_tokens\": 334,\n", + " \"total_tokens\": 2080\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n" + ] + } + ], + "source": [ + "const query3 = \"What according to the blog post are common ways of doing it? redo the search\"\n", "\n", - "const secondQuestion = \"What are common ways of doing it?\";\n", - "await ragChain.invoke({ question: secondQuestion, chat_history });" + "for await (const s of await agentExecutor2.stream({ messages: [{ role: \"user\", content: query3 }] }, config3)) {\n", + " console.log(s)\n", + " console.log(\"----\")\n", + "}" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "See the first [LangSmith trace here](https://smith.langchain.com/public/527981c6-5018-4b68-a11a-ebcde77843e7/r) and the [second trace here](https://smith.langchain.com/public/7b97994a-ab9f-4bf3-a2e4-abb609e5610a/r)" + "Note that the agent was able to infer that \"it\" in our query refers to \"task decomposition\", and generated a reasonable search query as a result-- in this case, \"common ways of task decomposition\"." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tying it together\n", + "\n", + "For convenience, we tie together all of the necessary steps in a single code cell:" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "import { createRetrieverTool } from \"langchain/tools/retriever\";\n", + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", + "import { MemorySaver } from \"@langchain/langgraph\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const llm3 = new ChatOpenAI({ model: \"gpt-4o\" });\n", + "\n", + "const loader3 = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\n", + ");\n", + "\n", + "const docs3 = await loader3.load();\n", + "\n", + "const textSplitter3 = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", + "const splits3 = await textSplitter3.splitDocuments(docs3);\n", + "const vectorStore3 = await MemoryVectorStore.fromDocuments(splits3, new OpenAIEmbeddings());\n", + "\n", + "// Retrieve and generate using the relevant snippets of the blog.\n", + "const retriever3 = vectorStore3.asRetriever();\n", + "\n", + "const tool2 = createRetrieverTool(\n", + " retriever3,\n", + " {\n", + " name: \"blog_post_retriever\",\n", + " description: \"Searches and returns excerpts from the Autonomous Agents blog post.\",\n", + " }\n", + ")\n", + "const tools2 = [tool2]\n", + "const memory4 = new MemorySaver();\n", + "\n", + "const agentExecutor3 = createReactAgent({ llm: llm3, tools: tools2, checkpointSaver: memory4 })" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Here we've gone over how to add application logic for incorporating historical outputs, but we're still manually updating the chat history and inserting it into each input. In a real Q&A application we'll want some way of persisting chat history and some way of automatically inserting and updating it.\n", + "## Next steps\n", + "\n", + "We've covered the steps to build a basic conversational Q&A application:\n", "\n", - "For this we can use:\n", + "- We used chains to build a predictable application that generates search queries for each user input;\n", + "- We used agents to build an application that \"decides\" when and how to generate search queries.\n", "\n", - "- [BaseChatMessageHistory](https://api.js.langchain.com/classes/langchain_core.chat_history.BaseChatMessageHistory.html): Store chat history.\n", - "- [RunnableWithMessageHistory](/docs/how_to/message_history/): Wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n", + "To explore different types of retrievers and retrieval strategies, visit the [retrievers](/docs/how_to#retrievers) section of the how-to guides.\n", "\n", - "For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/how_to/message_history/) LCEL page." + "For a detailed walkthrough of LangChain's conversation memory abstractions, visit the [How to add message history (memory)](/docs/how_to/message_history) LCEL page.\n" ] } ], "metadata": { "kernelspec": { - "display_name": "Deno", + "display_name": "TypeScript", "language": "typescript", - "name": "deno" + "name": "tslab" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" + "version": "3.7.2" } }, "nbformat": 4, diff --git a/docs/core_docs/docs/tutorials/chatbot.ipynb b/docs/core_docs/docs/tutorials/chatbot.ipynb index 829549e8c1c8..91490c7fa1d0 100644 --- a/docs/core_docs/docs/tutorials/chatbot.ipynb +++ b/docs/core_docs/docs/tutorials/chatbot.ipynb @@ -2,10 +2,15 @@ "cells": [ { "cell_type": "raw", - "metadata": {}, + "metadata": { + "vscode": { + "languageId": "raw" + } + }, "source": [ "---\n", "sidebar_position: 1\n", + "keywords: [conversationchain]\n", "---" ] }, @@ -13,14 +18,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Build a Chatbot" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Overview\n", + "# Build a Chatbot\n", + "\n", "\n", ":::info Prerequisites\n", "\n", @@ -30,34 +29,57 @@ "- [Prompt Templates](/docs/concepts/#prompt-templates)\n", "- [Chat History](/docs/concepts/#chat-history)\n", "\n", + "This guide requires `langgraph >= 0.2.28`.\n", + "\n", + ":::\n", + "\n", + "\n", + "```{=mdx}\n", + "\n", + ":::note\n", + "\n", + "This tutorial previously built a chatbot using [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html). You can access this version of the tutorial in the [v0.2 docs](https://js.langchain.com/v0.2/docs/tutorials/chatbot/).\n", + "\n", + "The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n", + "\n", ":::\n", "\n", + "```\n", + "\n", + "## Overview\n", + "\n", "We'll go over an example of how to design and implement an LLM-powered chatbot. \n", "This chatbot will be able to have a conversation and remember previous interactions.\n", "\n", + "\n", "Note that this chatbot that we build will only use the language model to have a conversation.\n", "There are several other related concepts that you may be looking for:\n", "\n", "- [Conversational RAG](/docs/tutorials/qa_chat_history): Enable a chatbot experience over an external source of data\n", - "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/): Build a chatbot that can take actions\n", + "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/multi_agent/agent_supervisor/): Build a chatbot that can take actions\n", "\n", "This tutorial will cover the basics which will be helpful for those two more advanced topics, but feel free to skip directly to there should you choose.\n", "\n", "## Setup\n", "\n", + "### Jupyter Notebook\n", + "\n", + "This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them.\n", + "\n", + "This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n", + "\n", "### Installation\n", "\n", - "To install LangChain run:\n", + "For this tutorial we will need `@langchain/core` and `langgraph`:\n", "\n", "```{=mdx}\n", "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", "\n", "\n", - " langchain @langchain/core\n", + " @langchain/core @langchain/langgraph uuid\n", "\n", "```\n", "\n", - "\n", "For more details, see our [Installation guide](/docs/how_to/installation).\n", "\n", "### LangSmith\n", @@ -68,35 +90,25 @@ "\n", "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", "\n", - "```shell\n", - "export LANGCHAIN_TRACING_V2=\"true\"\n", - "export LANGCHAIN_API_KEY=\"...\"\n", - "\n", - "# Reduce tracing latency if you are not in a serverless environment\n", - "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", + "```typescript\n", + "process.env.LANGCHAIN_TRACING_V2 = \"true\"\n", + "process.env.LANGCHAIN_API_KEY = \"...\"\n", "```\n", "\n", "## Quickstart\n", "\n", - "First up, let's learn how to use a language model by itself. LangChain supports many different language models that you can use interchangably - select the one you want to use below!\n", + "First up, let's learn how to use a language model by itself. LangChain supports many different language models that you can use interchangeably - select the one you want to use below!\n", "\n", "```{=mdx}\n", "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's first use the model directly. `ChatModel`s are instances of LangChain \"Runnables\", which means they expose a standard interface for interacting with them. To just simply call the model, we can pass in a list of messages to the `.invoke` method." + "\n", + "```\n" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 27, "metadata": {}, "outputs": [], "source": [ @@ -104,51 +116,51 @@ "\n", "import { ChatOpenAI } from \"@langchain/openai\";\n", "\n", - "const model = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - " temperature: 0,\n", - "});" + "const llm = new ChatOpenAI({ model: \"gpt-4o-mini\" })" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's first use the model directly. `ChatModel`s are instances of LangChain \"Runnables\", which means they expose a standard interface for interacting with them. To just simply call the model, we can pass in a list of messages to the `.invoke` method." ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 28, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-A64of8iD4GIFNSYlOaFHxPdCeyl9E\",\n", - " \"content\": \"Hi Bob! How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 10,\n", - " \"promptTokens\": 11,\n", - " \"totalTokens\": 21\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 11,\n", - " \"output_tokens\": 10,\n", - " \"total_tokens\": 21\n", - " }\n", - "}" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXeSO4JQpxO96lj7iudUptJ6nfW\",\n", + " \"content\": \"Hi Bob! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 10,\n", + " \"promptTokens\": 10,\n", + " \"totalTokens\": 20\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 10,\n", + " \"output_tokens\": 10,\n", + " \"total_tokens\": 20\n", + " }\n", + "}\n" + ] } ], "source": [ - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "await model.invoke([new HumanMessage({ content: \"Hi! I'm Bob\" })]);" + "await llm.invoke([{ role: \"user\", content: \"Hi im bob\" }])" ] }, { @@ -160,48 +172,46 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 29, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-A64ogC7owxmPla3ggZERNCFZpVHSp\",\n", - " \"content\": \"I'm sorry, but I don't have access to personal information about users unless it has been shared with me in the course of our conversation. If you'd like to tell me your name, feel free!\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 39,\n", - " \"promptTokens\": 11,\n", - " \"totalTokens\": 50\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 11,\n", - " \"output_tokens\": 39,\n", - " \"total_tokens\": 50\n", - " }\n", - "}" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXe1Zih4gMe3XgotWL83xeWub2h\",\n", + " \"content\": \"I'm sorry, but I don't have access to personal information about individuals unless it has been shared with me during our conversation. If you'd like to tell me your name, feel free to do so!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 39,\n", + " \"promptTokens\": 10,\n", + " \"totalTokens\": 49\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 10,\n", + " \"output_tokens\": 39,\n", + " \"total_tokens\": 49\n", + " }\n", + "}\n" + ] } ], "source": [ - "await model.invoke([new HumanMessage({ content: \"What's my name?\" })])" + "await llm.invoke([{ role: \"user\", content: \"Whats my name\" }])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Let's take a look at the example [LangSmith trace](https://smith.langchain.com/public/e5a0ae1b-32b9-4beb-836d-38f40bfa6762/r)\n", + "Let's take a look at the example [LangSmith trace](https://smith.langchain.com/public/3b768e44-a319-453a-bd6e-30f9df75f16a/r)\n", "\n", "We can see that it doesn't take the previous conversation turn into context, and cannot answer the question.\n", "This makes for a terrible chatbot experience!\n", @@ -211,49 +221,43 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 30, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-A64ohhg3P4BuIiw8mUCLI3zYHNOvS\",\n", - " \"content\": \"Your name is Bob! How can I help you today, Bob?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 14,\n", - " \"promptTokens\": 33,\n", - " \"totalTokens\": 47\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 33,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 47\n", - " }\n", - "}" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXfX4Fnp247rOxyPlBUYMQgahj2\",\n", + " \"content\": \"Your name is Bob! How can I help you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 12,\n", + " \"promptTokens\": 33,\n", + " \"totalTokens\": 45\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 33,\n", + " \"output_tokens\": 12,\n", + " \"total_tokens\": 45\n", + " }\n", + "}\n" + ] } ], "source": [ - "import { AIMessage } from \"@langchain/core/messages\"\n", - "\n", - "await model.invoke(\n", - " [\n", - " new HumanMessage({ content: \"Hi! I'm Bob\" }),\n", - " new AIMessage({ content: \"Hello Bob! How can I assist you today?\" }),\n", - " new HumanMessage({ content: \"What's my name?\" }),\n", - " ]\n", - ");" + "await llm.invoke([\n", + " { role: \"user\", content: \"Hi! I'm Bob\" },\n", + " { role: \"assistant\", content: \"Hello Bob! How can I assist you today?\" },\n", + " { role: \"user\", content: \"What's my name?\" }\n", + "]);" ] }, { @@ -270,153 +274,208 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Message History\n", + "## Message persistence\n", + "\n", + "[LangGraph](https://langchain-ai.github.io/langgraphjs/) implements a built-in persistence layer, making it ideal for chat applications that support multiple conversational turns.\n", + "\n", + "Wrapping our chat model in a minimal LangGraph application allows us to automatically persist the message history, simplifying the development of multi-turn applications.\n", "\n", - "We can use a Message History class to wrap our model and make it stateful.\n", - "This will keep track of inputs and outputs of the model, and store them in some datastore.\n", - "Future interactions will then load those messages and pass them into the chain as part of the input.\n", - "Let's see how to use this!" + "LangGraph comes with a simple in-memory checkpointer, which we use below." ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 31, "metadata": {}, + "outputs": [], "source": [ - "We import the relevant classes and set up our chain which wraps the model and adds in this message history. A key part here is the function we pass into as the `getSessionHistory()`. This function is expected to take in a `sessionId` and return a Message History object. This `sessionId` is used to distinguish between separate conversations, and should be passed in as part of the config when calling the new chain.\n", + "import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n", "\n", - "Let's also create a simple chain by adding a prompt to help with formatting:" + "// Define the function that calls the model\n", + "const callModel = async (state: typeof MessagesAnnotation.State) => {\n", + " const response = await llm.invoke(state.messages);\n", + " return { messages: response };\n", + "};\n", + "\n", + "// Define a new graph\n", + "const workflow = new StateGraph(MessagesAnnotation)\n", + " // Define the node and edge\n", + " .addNode(\"model\", callModel)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "// Add memory\n", + "const memory = new MemorySaver();\n", + "const app = workflow.compile({ checkpointer: memory });" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We now need to create a `config` that we pass into the runnable every time. This config contains information that is not part of the input directly, but is still useful. In this case, we want to include a `thread_id`. This should look like:" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 32, "metadata": {}, "outputs": [], "source": [ - "// We use an ephemeral, in-memory chat history for this demo.\n", - "import { InMemoryChatMessageHistory } from \"@langchain/core/chat_history\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", + "import { v4 as uuidv4 } from \"uuid\";\n", "\n", - "const messageHistories: Record = {};\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", `You are a helpful assistant who remembers all details the user shares with you.`],\n", - " [\"placeholder\", \"{chat_history}\"],\n", - " [\"human\", \"{input}\"],\n", - "]);\n", - "\n", - "const chain = prompt.pipe(model);\n", - "\n", - "const withMessageHistory = new RunnableWithMessageHistory({\n", - " runnable: chain,\n", - " getMessageHistory: async (sessionId) => {\n", - " if (messageHistories[sessionId] === undefined) {\n", - " messageHistories[sessionId] = new InMemoryChatMessageHistory();\n", - " }\n", - " return messageHistories[sessionId];\n", - " },\n", - " inputMessagesKey: \"input\",\n", - " historyMessagesKey: \"chat_history\",\n", - "});" + "const config = { configurable: { thread_id: uuidv4() } };" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We now need to create a `config` that we pass into the runnable every time. This config contains information that is not part of the input directly, but is still useful. In this case, we want to include a `sessionId`. This should look like:" + "This enables us to support multiple conversation threads with a single application, a common requirement when your application has multiple users.\n", + "\n", + "We can then invoke the application:" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 33, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "\u001b[32m\"Hi Bob! How can I assist you today?\"\u001b[39m" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXfjqCno78CGXCHoAgamqXG1pnZ\",\n", + " \"content\": \"Hi Bob! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 10,\n", + " \"promptTokens\": 12,\n", + " \"totalTokens\": 22\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 12,\n", + " \"output_tokens\": 10,\n", + " \"total_tokens\": 22\n", + " }\n", + "}\n" + ] } ], "source": [ - "const config = {\n", - " configurable: {\n", - " sessionId: \"abc2\"\n", + "const input = [\n", + " {\n", + " role: \"user\",\n", + " content: \"Hi! I'm Bob.\",\n", " }\n", - "};\n", - "\n", - "const response = await withMessageHistory.invoke({\n", - " input: \"Hi! I'm Bob\",\n", - "}, config);\n", - "\n", - "response.content;" + "]\n", + "const output = await app.invoke({ messages: input }, config)\n", + "// The output contains all messages in the state.\n", + "// This will long the last message in the conversation.\n", + "console.log(output.messages[output.messages.length - 1]);" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 34, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "\u001b[32m\"Your name is Bob. How can I help you today?\"\u001b[39m" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXgzHFHk4KsaNmDJyvflHq4JY2L\",\n", + " \"content\": \"Your name is Bob! How can I help you today, Bob?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 34,\n", + " \"totalTokens\": 48\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 34,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 48\n", + " }\n", + "}\n" + ] } ], "source": [ - "const followupResponse = await withMessageHistory.invoke({\n", - " input: \"What's my name?\",\n", - "}, config);\n", - "\n", - "followupResponse.content" + "const input2 = [\n", + " {\n", + " role: \"user\",\n", + " content: \"What's my name?\",\n", + " }\n", + "]\n", + "const output2 = await app.invoke({ messages: input2 }, config)\n", + "console.log(output2.messages[output2.messages.length - 1]);" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Great! Our chatbot now remembers things about us. If we change the config to reference a different `sessionId`, we can see that it starts the conversation fresh." + "Great! Our chatbot now remembers things about us. If we change the config to reference a different `thread_id`, we can see that it starts the conversation fresh." ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 35, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "\u001b[32m\"I'm sorry, but I don't have your name. If you tell me, I'll remember it for our future conversations\"\u001b[39m... 1 more character" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXhT4EVx8mGgmKXJ1s132qEluxR\",\n", + " \"content\": \"I'm sorry, but I don’t have access to personal data about individuals unless it has been shared in the course of our conversation. Therefore, I don't know your name. How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 41,\n", + " \"promptTokens\": 11,\n", + " \"totalTokens\": 52\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 11,\n", + " \"output_tokens\": 41,\n", + " \"total_tokens\": 52\n", + " }\n", + "}\n" + ] } ], "source": [ - "const config2 = {\n", - " configurable: {\n", - " sessionId: \"abc3\"\n", + "const config2 = { configurable: { thread_id: uuidv4() } }\n", + "const input3 = [\n", + " {\n", + " role: \"user\",\n", + " content: \"What's my name?\",\n", " }\n", - "};\n", - "\n", - "const response2 = await withMessageHistory.invoke({\n", - " input: \"What's my name?\",\n", - "}, config2);\n", - "\n", - "response2.content" + "]\n", + "const output3 = await app.invoke({ messages: input3 }, config2)\n", + "console.log(output3.messages[output3.messages.length - 1]);" ] }, { @@ -428,338 +487,623 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 36, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "\u001b[32m\"Your name is Bob. What would you like to talk about?\"\u001b[39m" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXhZmtzvV3kqKig47xxhKEnvVfH\",\n", + " \"content\": \"Your name is Bob! If there's anything else you'd like to talk about or ask, feel free!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 20,\n", + " \"promptTokens\": 60,\n", + " \"totalTokens\": 80\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 60,\n", + " \"output_tokens\": 20,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n" + ] } ], "source": [ - "const config3 = {\n", - " configurable: {\n", - " sessionId: \"abc2\"\n", - " }\n", - "};\n", + "const output4 = await app.invoke({ messages: input2 }, config)\n", + "console.log(output4.messages[output4.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is how we can support a chatbot having conversations with many users!\n", + "\n", + "Right now, all we've done is add a simple persistence layer around the model. We can start to make the more complicated and personalized by adding in a prompt template.\n", "\n", - "const response3 = await withMessageHistory.invoke({\n", - " input: \"What's my name?\",\n", - "}, config3);\n", + "## Prompt templates\n", "\n", - "response3.content" + "Prompt Templates help to turn raw user information into a format that the LLM can work with. In this case, the raw user input is just a message, which we are passing to the LLM. Let's now make that a bit more complicated. First, let's add in a system message with some custom instructions (but still taking messages as input). Next, we'll add in more input besides just the messages.\n", + "\n", + "To add in a system message, we will create a `ChatPromptTemplate`. We will utilize `MessagesPlaceholder` to pass all the messages in." ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 37, "metadata": {}, + "outputs": [], "source": [ - "This is how we can support a chatbot having conversations with many users!" + "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You talk like a pirate. Answer all questions to the best of your ability.\"],\n", + " new MessagesPlaceholder(\"messages\"),\n", + "]);" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Managing Conversation History\n", - "\n", - "One important concept to understand when building chatbots is how to manage conversation history. If left unmanaged, the list of messages will grow unbounded and potentially overflow the context window of the LLM. Therefore, it is important to add a step that limits the size of the messages you are passing in.\n", - "\n", - "**Importantly, you will want to do this BEFORE the prompt template but AFTER you load previous messages from Message History.**\n", - "\n", - "We can do this by adding a simple step in front of the prompt that modifies the `chat_history` key appropriately, and then wrap that new chain in the Message History class. First, let's define a function that will modify the messages passed in. Let's make it so that it selects the 10 most recent messages. We can then create a new chain by adding that at the start." + "We can now update our application to incorporate this template:" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 38, "metadata": {}, "outputs": [], "source": [ - "import type { BaseMessage } from \"@langchain/core/messages\";\n", - "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", - "\n", - "type ChainInput = {\n", - " chat_history: BaseMessage[];\n", - " input: string;\n", - "}\n", + "import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n", + "\n", + "// Define the function that calls the model\n", + "const callModel2 = async (state: typeof MessagesAnnotation.State) => {\n", + " // highlight-start\n", + " const chain = prompt.pipe(llm);\n", + " const response = await chain.invoke(state);\n", + " // highlight-end\n", + " // Update message history with response:\n", + " return { messages: [response] };\n", + "};\n", "\n", - "const filterMessages = (input: ChainInput) => input.chat_history.slice(-10);\n", + "// Define a new graph\n", + "const workflow2 = new StateGraph(MessagesAnnotation)\n", + " // Define the (single) node in the graph\n", + " .addNode(\"model\", callModel2)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", "\n", - "const chain2 = RunnableSequence.from([\n", - " RunnablePassthrough.assign({\n", - " chat_history: filterMessages\n", - " }),\n", - " prompt,\n", - " model,\n", - "]);" + "// Add memory\n", + "const app2 = workflow2.compile({ checkpointer: new MemorySaver() });" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Let's now try it out! If we create a list of messages more than 10 messages long, we can see what it no longer remembers information in the early messages." + "We invoke the application in the same way:" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 39, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXio2Vy1YNRDiFdKKEyN3Yw1B9I\",\n", + " \"content\": \"Ahoy, Jim! What brings ye to these treacherous waters today? Speak up, matey!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 22,\n", + " \"promptTokens\": 32,\n", + " \"totalTokens\": 54\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 32,\n", + " \"output_tokens\": 22,\n", + " \"total_tokens\": 54\n", + " }\n", + "}\n" + ] + } + ], "source": [ - "const messages = [\n", - " new HumanMessage({ content: \"hi! I'm bob\" }),\n", - " new AIMessage({ content: \"hi!\" }),\n", - " new HumanMessage({ content: \"I like vanilla ice cream\" }),\n", - " new AIMessage({ content: \"nice\" }),\n", - " new HumanMessage({ content: \"whats 2 + 2\" }),\n", - " new AIMessage({ content: \"4\" }),\n", - " new HumanMessage({ content: \"thanks\" }),\n", - " new AIMessage({ content: \"No problem!\" }),\n", - " new HumanMessage({ content: \"having fun?\" }),\n", - " new AIMessage({ content: \"yes!\" }),\n", - " new HumanMessage({ content: \"That's great!\" }),\n", - " new AIMessage({ content: \"yes it is!\" }),\n", - "];" + "const config3 = { configurable: { thread_id: uuidv4() } }\n", + "const input4 = [\n", + " {\n", + " role: \"user\",\n", + " content: \"Hi! I'm Jim.\",\n", + " }\n", + "]\n", + "const output5 = await app2.invoke({ messages: input4 }, config3)\n", + "console.log(output5.messages[output5.messages.length - 1]);" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 40, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "\u001b[32m\"You haven't shared your name with me yet. What is it?\"\u001b[39m" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXjZNHiT5g7eTf52auWGXDUUcDs\",\n", + " \"content\": \"Ye be callin' yerself Jim, if me memory serves me right! Arrr, what else can I do fer ye, matey?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 31,\n", + " \"promptTokens\": 67,\n", + " \"totalTokens\": 98\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_3a215618e8\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 67,\n", + " \"output_tokens\": 31,\n", + " \"total_tokens\": 98\n", + " }\n", + "}\n" + ] } ], "source": [ - "const response4 = await chain2.invoke(\n", + "const input5 = [\n", " {\n", - " chat_history: messages,\n", - " input: \"what's my name?\"\n", + " role: \"user\",\n", + " content: \"What is my name?\"\n", " }\n", - ")\n", - "response4.content" + "]\n", + "const output6 = await app2.invoke({ messages: input5 }, config3)\n", + "console.log(output6.messages[output6.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Awesome! Let's now make our prompt a little bit more complicated. Let's assume that the prompt template now looks something like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "const prompt2 = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant. Answer all questions to the best of your ability in {language}.\"],\n", + " new MessagesPlaceholder(\"messages\"),\n", + "]);" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "But if we ask about information that is within the last ten messages, it still remembers it" + "Note that we have added a new `language` input to the prompt. Our application now has two parameters-- the input `messages` and `language`. We should update our application's state to reflect this:" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [], + "source": [ + "import { START, END, StateGraph, MemorySaver, MessagesAnnotation, Annotation } from \"@langchain/langgraph\";\n", + "\n", + "// Define the State\n", + "const GraphAnnotation = Annotation.Root({\n", + " ...MessagesAnnotation.spec,\n", + " language: Annotation(),\n", + "});\n", + "\n", + "// Define the function that calls the model\n", + "const callModel3 = async (state: typeof GraphAnnotation.State) => {\n", + " const chain = prompt2.pipe(llm);\n", + " const response = await chain.invoke(state);\n", + " return { messages: [response] };\n", + "};\n", + "\n", + "const workflow3 = new StateGraph(GraphAnnotation)\n", + " .addNode(\"model\", callModel3)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "const app3 = workflow3.compile({ checkpointer: new MemorySaver() });" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 43, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "\u001b[32m\"Your favorite ice cream is vanilla!\"\u001b[39m" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXkq2ZV9xmOBSM2iJbYSn8Epvqa\",\n", + " \"content\": \"¡Hola, Bob! ¿En qué puedo ayudarte hoy?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 12,\n", + " \"promptTokens\": 32,\n", + " \"totalTokens\": 44\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 32,\n", + " \"output_tokens\": 12,\n", + " \"total_tokens\": 44\n", + " }\n", + "}\n" + ] } ], "source": [ - "const response5 = await chain2.invoke(\n", - " {\n", - " chat_history: messages,\n", - " input: \"what's my fav ice cream\"\n", - " }\n", - ")\n", - "response5.content" + "const config4 = { configurable: { thread_id: uuidv4() } }\n", + "const input6 = {\n", + " messages: [\n", + " {\n", + " role: \"user\",\n", + " content: \"Hi im bob\"\n", + " }\n", + " ],\n", + " language: \"Spanish\"\n", + "}\n", + "const output7 = await app3.invoke(input6, config4)\n", + "console.log(output7.messages[output7.messages.length - 1]);" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Let's now wrap this chain in a `RunnableWithMessageHistory` constructor. For demo purposes, we will also slightly modify our `getMessageHistory()` method to always start new sessions with the previously declared list of 10 messages to simulate several conversation turns:" + "Note that the entire state is persisted, so we can omit parameters like `language` if no changes are desired:" ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 44, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "\u001b[32m\"You haven't shared your name with me yet. What is it?\"\u001b[39m" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXk9Ccr1dhmA9lZ1VmZ998PFyJF\",\n", + " \"content\": \"Tu nombre es Bob. ¿Hay algo más en lo que te pueda ayudar?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 16,\n", + " \"promptTokens\": 57,\n", + " \"totalTokens\": 73\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 57,\n", + " \"output_tokens\": 16,\n", + " \"total_tokens\": 73\n", + " }\n", + "}\n" + ] } ], "source": [ - "const messageHistories2: Record = {};\n", - "\n", - "const withMessageHistory2 = new RunnableWithMessageHistory({\n", - " runnable: chain2,\n", - " getMessageHistory: async (sessionId) => {\n", - " if (messageHistories2[sessionId] === undefined) {\n", - " const messageHistory = new InMemoryChatMessageHistory();\n", - " await messageHistory.addMessages(messages);\n", - " messageHistories2[sessionId] = messageHistory;\n", + "const input7 = {\n", + " messages: [\n", + " {\n", + " role: \"user\",\n", + " content: \"What is my name?\"\n", " }\n", - " return messageHistories2[sessionId];\n", - " },\n", - " inputMessagesKey: \"input\",\n", - " historyMessagesKey: \"chat_history\",\n", - "})\n", - "\n", - "const config4 = {\n", - " configurable: {\n", - " sessionId: \"abc4\"\n", - " }\n", - "};\n", - "\n", - "const response7 = await withMessageHistory2.invoke(\n", - " {\n", - " input: \"whats my name?\",\n", - " chat_history: [],\n", - " },\n", - " config4,\n", - ")\n", - "\n", - "response7.content" + " ],\n", + "}\n", + "const output8 = await app3.invoke(input7, config4)\n", + "console.log(output8.messages[output8.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To help you understand what's happening internally, check out [this LangSmith trace](https://smith.langchain.com/public/d61630b7-6a52-4dc9-974c-8452008c498a/r)." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "There's now two new messages in the chat history. This means that even more information that used to be accessible in our conversation history is no longer available!" + "## Managing Conversation History\n", + "\n", + "One important concept to understand when building chatbots is how to manage conversation history. If left unmanaged, the list of messages will grow unbounded and potentially overflow the context window of the LLM. Therefore, it is important to add a step that limits the size of the messages you are passing in.\n", + "\n", + "**Importantly, you will want to do this BEFORE the prompt template but AFTER you load previous messages from Message History.**\n", + "\n", + "We can do this by adding a simple step in front of the prompt that modifies the `messages` key appropriately, and then wrap that new chain in the Message History class. \n", + "\n", + "LangChain comes with a few built-in helpers for [managing a list of messages](/docs/how_to/#messages). In this case we'll use the [trimMessages](/docs/how_to/trim_messages/) helper to reduce how many messages we're sending to the model. The trimmer allows us to specify how many tokens we want to keep, along with other parameters like if we want to always keep the system message and whether to allow partial messages:" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 54, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "\u001b[32m\"You haven't mentioned your favorite ice cream yet. What is it?\"\u001b[39m" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " SystemMessage {\n", + " \"content\": \"you're a good assistant\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"I like vanilla ice cream\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"content\": \"nice\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"whats 2 + 2\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"content\": \"4\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"thanks\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"content\": \"no problem!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"having fun?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"content\": \"yes!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " }\n", + "]\n" + ] } ], "source": [ - "const response8 = await withMessageHistory2.invoke({\n", - " input: \"whats my favorite ice cream?\",\n", - " chat_history: [],\n", - "}, config4);\n", + "import { SystemMessage, HumanMessage, AIMessage, trimMessages } from \"@langchain/core/messages\"\n", + "\n", + "const trimmer = trimMessages({\n", + " maxTokens: 10,\n", + " strategy: \"last\",\n", + " tokenCounter: (msgs) => msgs.length,\n", + " includeSystem: true,\n", + " allowPartial: false,\n", + " startOn: \"human\",\n", + "})\n", "\n", - "response8.content" + "const messages = [\n", + " new SystemMessage(\"you're a good assistant\"),\n", + " new HumanMessage(\"hi! I'm bob\"),\n", + " new AIMessage(\"hi!\"),\n", + " new HumanMessage(\"I like vanilla ice cream\"),\n", + " new AIMessage(\"nice\"),\n", + " new HumanMessage(\"whats 2 + 2\"),\n", + " new AIMessage(\"4\"),\n", + " new HumanMessage(\"thanks\"),\n", + " new AIMessage(\"no problem!\"),\n", + " new HumanMessage(\"having fun?\"),\n", + " new AIMessage(\"yes!\"),\n", + "]\n", + "\n", + "await trimmer.invoke(messages)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "If you take a look at LangSmith, you can see exactly what is happening under the hood in the [LangSmith trace](https://smith.langchain.com/public/ebc2e1e7-0703-43f7-a476-8cb8cbd7f61a/r). Navigate to the chat model call to see exactly which messages are getting filtered out." + "To use it in our chain, we just need to run the trimmer before we pass the `messages` input to our prompt. " ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 55, "metadata": {}, + "outputs": [], "source": [ - "## Streaming\n", + "const callModel4 = async (state: typeof GraphAnnotation.State) => {\n", + " const chain = prompt2.pipe(llm);\n", + " // highlight-start\n", + " const trimmedMessage = await trimmer.invoke(state.messages);\n", + " const response = await chain.invoke({ messages: trimmedMessage, language: state.language });\n", + " // highlight-end\n", + " return { messages: [response] };\n", + "};\n", "\n", - "Now we've got a functional chatbot. However, one *really* important UX consideration for chatbot application is streaming. LLMs can sometimes take a while to respond, and so in order to improve the user experience one thing that most application do is stream back each token as it is generated. This allows the user to see progress.\n", "\n", - "It's actually super easy to do this!\n", + "const workflow4 = new StateGraph(GraphAnnotation)\n", + " .addNode(\"model\", callModel4)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", "\n", - "All chains expose a `.stream()` method, and ones that use message history are no different. We can simply use that method to get back a streaming response." + "const app4 = workflow4.compile({ checkpointer: new MemorySaver() });" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now if we try asking the model our name, it won't know it since we trimmed that part of the chat history:" ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 56, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "| \n", - "| Hi\n", - "| Todd\n", - "| !\n", - "| Here\n", - "| ’s\n", - "| a\n", - "| joke\n", - "| for\n", - "| you\n", - "| :\n", - "| \n", - "\n", - "\n", - "| Why\n", - "| did\n", - "| the\n", - "| scare\n", - "| crow\n", - "| win\n", - "| an\n", - "| award\n", - "| ?\n", - "| \n", - "\n", - "\n", - "| Because\n", - "| he\n", - "| was\n", - "| outstanding\n", - "| in\n", - "| his\n", - "| field\n", - "| !\n", - "| \n" + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUdCOvzRAvgoxd2sf93oGKQfA9vh\",\n", + " \"content\": \"I don’t know your name, but I’d be happy to learn it if you’d like to share!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 22,\n", + " \"promptTokens\": 97,\n", + " \"totalTokens\": 119\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 97,\n", + " \"output_tokens\": 22,\n", + " \"total_tokens\": 119\n", + " }\n", + "}\n" ] } ], "source": [ - "const config5 = {\n", - " configurable: {\n", - " sessionId: \"abc6\"\n", - " }\n", - "};\n", + "const config5 = { configurable: { thread_id: uuidv4() }}\n", + "const input8 = {\n", + " // highlight-next-line\n", + " messages: [...messages, new HumanMessage(\"What is my name?\")],\n", + " language: \"English\"\n", + "}\n", "\n", - "const stream = await withMessageHistory2.stream({\n", - " input: \"hi! I'm todd. tell me a joke\",\n", - " chat_history: [],\n", - "}, config5);\n", + "const output9 = await app4.invoke(\n", + " input8,\n", + " config5,\n", + ")\n", + "console.log(output9.messages[output9.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "But if we ask about information that is within the last few messages, it remembers:" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUdChq5JOMhcFA1dB7PvCHLyliwM\",\n", + " \"content\": \"You asked for the solution to the math problem \\\"what's 2 + 2,\\\" and I answered that it equals 4.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 27,\n", + " \"promptTokens\": 99,\n", + " \"totalTokens\": 126\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 99,\n", + " \"output_tokens\": 27,\n", + " \"total_tokens\": 126\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const config6 = { configurable: { thread_id: uuidv4() }}\n", + "const input9 = {\n", + " // highlight-next-line\n", + " messages: [...messages, new HumanMessage(\"What math problem did I ask?\")],\n", + " language: \"English\"\n", + "}\n", "\n", - "for await (const chunk of stream) {\n", - " console.log(\"|\", chunk.content);\n", - "}" + "const output10 = await app4.invoke(\n", + " input9,\n", + " config6,\n", + ")\n", + "console.log(output10.messages[output10.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you take a look at LangSmith, you can see exactly what is happening under the hood in the [LangSmith trace](https://smith.langchain.com/public/bf1b1a10-0fe0-42f6-9f0f-b70d9f7520dc/r)." ] }, { @@ -771,12 +1115,14 @@ "Now that you understand the basics of how to create a chatbot in LangChain, some more advanced tutorials you may be interested in are:\n", "\n", "- [Conversational RAG](/docs/tutorials/qa_chat_history): Enable a chatbot experience over an external source of data\n", - "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/): Build a chatbot that can take actions\n", + "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/multi_agent/agent_supervisor/): Build a chatbot that can take actions\n", "\n", "If you want to dive deeper on specifics, some things worth checking out are:\n", "\n", "- [Streaming](/docs/how_to/streaming): streaming is *crucial* for chat applications\n", - "- [How to add message history](/docs/how_to/message_history): for a deeper dive into all things related to message history" + "- [How to add message history](/docs/how_to/message_history): for a deeper dive into all things related to message history\n", + "- [How to manage large message history](/docs/how_to/trim_messages/): more techniques for managing a large chat history\n", + "- [LangGraph main docs](https://langchain-ai.github.io/langgraph/): for more detail on building with LangGraph" ] } ], @@ -787,12 +1133,15 @@ "name": "deno" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" + "version": "3.7.2" } }, "nbformat": 4, diff --git a/docs/core_docs/docs/tutorials/qa_chat_history.ipynb b/docs/core_docs/docs/tutorials/qa_chat_history.ipynb index b6bd8146c71a..b1694c4413ab 100644 --- a/docs/core_docs/docs/tutorials/qa_chat_history.ipynb +++ b/docs/core_docs/docs/tutorials/qa_chat_history.ipynb @@ -1,390 +1,1438 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Conversational RAG\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat history](/docs/concepts/#chat-history)\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [Embeddings](/docs/concepts/#embedding-models)\n", - "- [Vector stores](/docs/concepts/#vector-stores)\n", - "- [Retrieval-augmented generation](/docs/tutorials/rag/)\n", - "- [Tools](/docs/concepts/#tools)\n", - "- [Agents](/docs/concepts/#agents)\n", - "\n", - ":::\n", - "\n", - "In many Q&A applications we want to allow the user to have a back-and-forth conversation, meaning the application needs some sort of \"memory\" of past questions and answers, and some logic for incorporating those into its current thinking.\n", - "\n", - "In this guide we focus on **adding logic for incorporating historical messages.** Further details on chat history management is [covered here](/docs/how_to/message_history).\n", - "\n", - "We will cover two approaches:\n", - "\n", - "1. Chains, in which we always execute a retrieval step;\n", - "2. Agents, in which we give an LLM discretion over whether and how to execute a retrieval step (or multiple steps).\n", - "\n", - "For the external knowledge source, we will use the same [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng from the [RAG tutorial](/docs/tutorials/rag)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setup\n", - "### Dependencies\n", - "\n", - "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/#chat-models) or [LLM](/docs/concepts#llms), [Embeddings](/docs/concepts#embedding-models), and [VectorStore](/docs/concepts#vectorstores) or [Retriever](/docs/concepts#retrievers).\n", - "\n", - "We’ll use the following packages:\n", - "\n", - "```bash\n", - "npm install --save langchain @langchain/openai cheerio\n", - "```\n", - "\n", - "We need to set environment variable `OPENAI_API_KEY`:\n", - "\n", - "```bash\n", - "export OPENAI_API_KEY=YOUR_KEY\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### LangSmith\n", - "\n", - "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com/).\n", - "\n", - "Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n", - "\n", - "\n", - "```bash\n", - "export LANGCHAIN_TRACING_V2=true\n", - "export LANGCHAIN_API_KEY=YOUR_KEY\n", - "\n", - "# Reduce tracing latency if you are not in a serverless environment\n", - "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Initial setup" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "import \"cheerio\";\n", - "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", - "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n", - "import { OpenAIEmbeddings, ChatOpenAI } from \"@langchain/openai\";\n", - "import { pull } from \"langchain/hub\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnableSequence, RunnablePassthrough } from \"@langchain/core/runnables\";\n", - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "\n", - "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", - "\n", - "const loader = new CheerioWebBaseLoader(\n", - " \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\n", - ");\n", - "\n", - "const docs = await loader.load();\n", - "\n", - "const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", - "const splits = await textSplitter.splitDocuments(docs);\n", - "const vectorStore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n", - "\n", - "// Retrieve and generate using the relevant snippets of the blog.\n", - "const retriever = vectorStore.asRetriever();\n", - "const prompt = await pull(\"rlm/rag-prompt\");\n", - "const llm = new ChatOpenAI({ model: \"gpt-3.5-turbo\", temperature: 0 });\n", - "const ragChain = await createStuffDocumentsChain({\n", - " llm,\n", - " prompt,\n", - " outputParser: new StringOutputParser(),\n", - "});" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's see what this prompt actually looks like:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\n", - "Question: {question} \n", - "Context: {context} \n", - "Answer:\n" - ] - } - ], - "source": [ - "console.log(prompt.promptMessages.map((msg) => msg.prompt.template).join(\"\\n\"));" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\u001b[32m\"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. I\"\u001b[39m... 208 more characters" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await ragChain.invoke({\n", - " context: await retriever.invoke(\"What is Task Decomposition?\"),\n", - " question: \"What is Task Decomposition?\"\n", - "});" - ] - }, + "cells": [ + { + "cell_type": "raw", + "id": "023635f2-71cf-43f2-a2e2-a7b4ced30a74", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 2\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "86fc5bb2-017f-434e-8cd6-53ab214a5604", + "metadata": {}, + "source": [ + "# Conversational RAG\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat history](/docs/concepts/#chat-history)\n", + "- [Chat models](/docs/concepts/#chat-models)\n", + "- [Embeddings](/docs/concepts/#embedding-models)\n", + "- [Vector stores](/docs/concepts/#vector-stores)\n", + "- [Retrieval-augmented generation](/docs/tutorials/rag/)\n", + "- [Tools](/docs/concepts/#tools)\n", + "- [Agents](/docs/concepts/#agents)\n", + "\n", + ":::\n", + "\n", + "In many Q&A applications we want to allow the user to have a back-and-forth conversation, meaning the application needs some sort of \"memory\" of past questions and answers, and some logic for incorporating those into its current thinking.\n", + "\n", + "In this guide we focus on **adding logic for incorporating historical messages.** Further details on chat history management is [covered here](/docs/how_to/message_history).\n", + "\n", + "We will cover two approaches:\n", + "\n", + "1. Chains, in which we always execute a retrieval step;\n", + "2. Agents, in which we give an LLM discretion over whether and how to execute a retrieval step (or multiple steps).\n", + "\n", + "For the external knowledge source, we will use the same [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng from the [RAG tutorial](/docs/tutorials/rag)." + ] + }, + { + "cell_type": "markdown", + "id": "487d8d79-5ee9-4aa4-9fdf-cd5f4303e099", + "metadata": {}, + "source": [ + "## Setup\n", + "### Dependencies\n", + "\n", + "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/#chat-models) or [LLM](/docs/concepts#llms), [Embeddings](/docs/concepts#embedding-models), and [VectorStore](/docs/concepts#vectorstores) or [Retriever](/docs/concepts#retrievers).\n", + "\n", + "We’ll use the following packages:\n", + "\n", + "```bash\n", + "npm install --save langchain @langchain/openai langchain cheerio\n", + "```\n", + "\n", + "We need to set environment variable `OPENAI_API_KEY`:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=YOUR_KEY\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "1665e740-ce01-4f09-b9ed-516db0bd326f", + "metadata": {}, + "source": [ + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://docs.smith.langchain.com).\n", + "\n", + "Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "\n", + "```bash\n", + "export LANGCHAIN_TRACING_V2=true\n", + "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "\n", + "# Reduce tracing latency if you are not in a serverless environment\n", + "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "fa6ba684-26cf-4860-904e-a4d51380c134", + "metadata": {}, + "source": [ + "## Chains {#chains}\n" + ] + }, + { + "cell_type": "markdown", + "id": "7d2cf4ef", + "metadata": {}, + "source": [ + "\n", + "Let's first revisit the Q&A app we built over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng in the [RAG tutorial](/docs/tutorials/rag)." + ] + }, + { + "cell_type": "markdown", + "id": "646840fb-5212-48ea-8bc7-ec7be5ec727e", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "cb58f273-2111-4a9b-8932-9b64c95030c8", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({ model: \"gpt-4o\" });" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "820244ae-74b4-4593-b392-822979dd91b8", + "metadata": {}, + "outputs": [], + "source": [ + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { createRetrievalChain } from \"langchain/chains/retrieval\";\n", + "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", + "\n", + "// 1. Load, chunk and index the contents of the blog to create a retriever.\n", + "const loader = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", + " {\n", + " selector: \".post-content, .post-title, .post-header\"\n", + " }\n", + ");\n", + "const docs = await loader.load();\n", + "\n", + "const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", + "const splits = await textSplitter.splitDocuments(docs);\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n", + "const retriever = vectorstore.asRetriever();\n", + "\n", + "// 2. Incorporate the retriever into a question-answering chain.\n", + "const systemPrompt = \n", + " \"You are an assistant for question-answering tasks. \" +\n", + " \"Use the following pieces of retrieved context to answer \" +\n", + " \"the question. If you don't know the answer, say that you \" +\n", + " \"don't know. Use three sentences maximum and keep the \" +\n", + " \"answer concise.\" +\n", + " \"\\n\\n\" +\n", + " \"{context}\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", systemPrompt],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "const questionAnswerChain = await createStuffDocumentsChain({\n", + " llm,\n", + " prompt,\n", + "});\n", + "\n", + "const ragChain = await createRetrievalChain({\n", + " retriever,\n", + " combineDocsChain: questionAnswerChain,\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "bf55faaf-0d17-4b74-925d-c478b555f7b2", + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Contextualizing the question\n", - "\n", - "First we'll need to define a sub-chain that takes historical messages and the latest user question, and reformulates the question if it makes reference to any information in the historical information.\n", - "\n", - "We'll use a prompt that includes a `MessagesPlaceholder` variable under the name \"chat_history\". This allows us to pass in a list of Messages to the prompt using the \"chat_history\" input key, and these messages will be inserted after the system message and before the human message containing the latest question." - ] - }, + "name": "stdout", + "output_type": "stream", + "text": [ + "Task decomposition involves breaking down large and complex tasks into smaller, more manageable subgoals or steps. This approach helps agents or models efficiently handle intricate tasks by simplifying them into easier components. Task decomposition can be achieved through techniques like Chain of Thought, Tree of Thoughts, or by using task-specific instructions and human input.\n" + ] + } + ], + "source": [ + "const response = await ragChain.invoke({ input: \"What is Task Decomposition?\" });\n", + "console.log(response.answer);" + ] + }, + { + "cell_type": "markdown", + "id": "187404c7-db47-49c5-be29-9ecb96dc9afa", + "metadata": {}, + "source": [ + "Note that we have used the built-in chain constructors `createStuffDocumentsChain` and `createRetrievalChain`, so that the basic ingredients to our solution are:\n", + "\n", + "1. retriever;\n", + "2. prompt;\n", + "3. LLM.\n", + "\n", + "This will simplify the process of incorporating chat history.\n", + "\n", + "### Adding chat history\n", + "\n", + "The chain we have built uses the input query directly to retrieve relevant context. But in a conversational setting, the user query might require conversational context to be understood. For example, consider this exchange:\n", + "\n", + "> Human: \"What is Task Decomposition?\"\n", + ">\n", + "> AI: \"Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable for an agent or model.\"\n", + ">\n", + "> Human: \"What are common ways of doing it?\"\n", + "\n", + "In order to answer the second question, our system needs to understand that \"it\" refers to \"Task Decomposition.\"\n", + "\n", + "We'll need to update two things about our existing app:\n", + "\n", + "1. **Prompt**: Update our prompt to support historical messages as an input.\n", + "2. **Contextualizing questions**: Add a sub-chain that takes the latest user question and reformulates it in the context of the chat history. This can be thought of simply as building a new \"history aware\" retriever. Whereas before we had:\n", + " - `query` -> `retriever` \n", + " Now we will have:\n", + " - `(query, conversation history)` -> `LLM` -> `rephrased query` -> `retriever`" + ] + }, + { + "cell_type": "markdown", + "id": "776ae958-cbdc-4471-8669-c6087436f0b5", + "metadata": {}, + "source": [ + "#### Contextualizing the question\n", + "\n", + "First we'll need to define a sub-chain that takes historical messages and the latest user question, and reformulates the question if it makes reference to any information in the historical information.\n", + "\n", + "We'll use a prompt that includes a `MessagesPlaceholder` variable under the name \"chat_history\". This allows us to pass in a list of Messages to the prompt using the \"chat_history\" input key, and these messages will be inserted after the system message and before the human message containing the latest question.\n", + "\n", + "Note that we leverage a helper function [createHistoryAwareRetriever](https://api.js.langchain.com/functions/langchain.chains_history_aware_retriever.createHistoryAwareRetriever.html) for this step, which manages the case where `chat_history` is empty, and otherwise applies `prompt.pipe(llm).pipe(new StringOutputParser()).pipe(retriever)` in sequence.\n", + "\n", + "`createHistoryAwareRetriever` constructs a chain that accepts keys `input` and `chat_history` as input, and has the same output schema as a retriever." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "2b685428-8b82-4af1-be4f-7232c5d55b73", + "metadata": {}, + "outputs": [], + "source": [ + "import { createHistoryAwareRetriever } from \"langchain/chains/history_aware_retriever\";\n", + "import { MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "\n", + "const contextualizeQSystemPrompt = \n", + " \"Given a chat history and the latest user question \" +\n", + " \"which might reference context in the chat history, \" +\n", + " \"formulate a standalone question which can be understood \" +\n", + " \"without the chat history. Do NOT answer the question, \" +\n", + " \"just reformulate it if needed and otherwise return it as is.\";\n", + "\n", + "const contextualizeQPrompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", contextualizeQSystemPrompt],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "const historyAwareRetriever = await createHistoryAwareRetriever({\n", + " llm,\n", + " retriever,\n", + " rephrasePrompt: contextualizeQPrompt,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "42a47168-4a1f-4e39-bd2d-d5b03609a243", + "metadata": {}, + "source": [ + "This chain prepends a rephrasing of the input query to our retriever, so that the retrieval incorporates the context of the conversation.\n", + "\n", + "Now we can build our full QA chain. This is as simple as updating the retriever to be our new `historyAwareRetriever`.\n", + "\n", + "Again, we will use [createStuffDocumentsChain](https://api.js.langchain.com/functions/langchain.chains_combine_documents.createStuffDocumentsChain.html) to generate a `questionAnswerChain2`, with input keys `context`, `chat_history`, and `input`-- it accepts the retrieved context alongside the conversation history and query to generate an answer. A more detailed explaination is over [here](/docs/tutorials/rag/#built-in-chains)\n", + "\n", + "We build our final `ragChain2` with [createRetrievalChain](https://api.js.langchain.com/functions/langchain.chains_retrieval.createRetrievalChain.html). This chain applies the `historyAwareRetriever` and `questionAnswerChain2` in sequence, retaining intermediate outputs such as the retrieved context for convenience. It has input keys `input` and `chat_history`, and includes `input`, `chat_history`, `context`, and `answer` in its output." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "66f275f3-ddef-4678-b90d-ee64576878f9", + "metadata": {}, + "outputs": [], + "source": [ + "const qaPrompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", systemPrompt],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "const questionAnswerChain2 = await createStuffDocumentsChain({\n", + " llm,\n", + " prompt: qaPrompt,\n", + "});\n", + "\n", + "const ragChain2 = await createRetrievalChain({\n", + " retriever: historyAwareRetriever,\n", + " combineDocsChain: questionAnswerChain2,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "1ba1ae56-7ecb-4563-b792-50a1a5042df3", + "metadata": {}, + "source": [ + "Let's try this. Below we ask a question and a follow-up question that requires contextualization to return a sensible response. Because our chain includes a `\"chat_history\"` input, the caller needs to manage the chat history. We can achieve this by appending input and output messages to a list:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "0005810b-1b95-4666-a795-08d80e478b83", + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", - "\n", - "const contextualizeQSystemPrompt = `Given a chat history and the latest user question\n", - "which might reference context in the chat history, formulate a standalone question\n", - "which can be understood without the chat history. Do NOT answer the question,\n", - "just reformulate it if needed and otherwise return it as is.`;\n", - "\n", - "const contextualizeQPrompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", contextualizeQSystemPrompt],\n", - " new MessagesPlaceholder(\"chat_history\"),\n", - " [\"human\", \"{question}\"]\n", - "]);\n", - "const contextualizeQChain = contextualizeQPrompt.pipe(llm).pipe(new StringOutputParser());" - ] - }, + "name": "stdout", + "output_type": "stream", + "text": [ + "Common ways of doing Task Decomposition include:\n", + "1. Using simple prompting with an LLM, such as asking it to outline steps or subgoals for a task.\n", + "2. Employing task-specific instructions, like \"Write a story outline\" for writing a novel.\n", + "3. Incorporating human inputs for guidance.\n", + "Additionally, advanced approaches like Chain of Thought (CoT) and Tree of Thoughts (ToT) can further refine the process, and using an external classical planner with PDDL (as in LLM+P) is another option.\n" + ] + } + ], + "source": [ + "import { BaseMessage, HumanMessage, AIMessage } from \"@langchain/core/messages\";\n", + "\n", + "let chatHistory: BaseMessage[] = [];\n", + "\n", + "const question = \"What is Task Decomposition?\";\n", + "const aiMsg1 = await ragChain2.invoke({ input: question, chat_history: chatHistory });\n", + "chatHistory = chatHistory.concat([\n", + " new HumanMessage(question),\n", + " new AIMessage(aiMsg1.answer),\n", + "]);\n", + "\n", + "const secondQuestion = \"What are common ways of doing it?\";\n", + "const aiMsg2 = await ragChain2.invoke({ input: secondQuestion, chat_history: chatHistory });\n", + "\n", + "console.log(aiMsg2.answer);" + ] + }, + { + "cell_type": "markdown", + "id": "53a662c2-f38b-45f9-95c4-66de15637614", + "metadata": {}, + "source": [ + "#### Stateful management of chat history\n", + "\n", + "Here we've gone over how to add application logic for incorporating historical outputs, but we're still manually updating the chat history and inserting it into each input. In a real Q&A application we'll want some way of persisting chat history and some way of automatically inserting and updating it.\n", + "\n", + "For this we can use:\n", + "\n", + "- [BaseChatMessageHistory](https://api.js.langchain.com/classes/_langchain_core.chat_history.BaseChatMessageHistory.html): Store chat history.\n", + "- [RunnableWithMessageHistory](/docs/how_to/message_history): Wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n", + "\n", + "For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/how_to/message_history) LCEL page.\n", + "\n", + "Instances of `RunnableWithMessageHistory` manage the chat history for you. They accept a config with a key (`\"sessionId\"` by default) that specifies what conversation history to fetch and prepend to the input, and append the output to the same conversation history. Below is an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "9c3fb176-8d6a-4dc7-8408-6a22c5f7cc72", + "metadata": {}, + "outputs": [], + "source": [ + "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", + "import { ChatMessageHistory } from \"langchain/stores/message/in_memory\";\n", + "\n", + "const demoEphemeralChatMessageHistoryForChain = new ChatMessageHistory();\n", + "\n", + "const conversationalRagChain = new RunnableWithMessageHistory({\n", + " runnable: ragChain2,\n", + " getMessageHistory: (_sessionId) => demoEphemeralChatMessageHistoryForChain,\n", + " inputMessagesKey: \"input\",\n", + " historyMessagesKey: \"chat_history\",\n", + " outputMessagesKey: \"answer\",\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "1046c92f-21b3-4214-907d-92878d8cba23", + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Using this chain we can ask follow-up questions that reference past messages and have them reformulated into standalone questions:" - ] - }, + "name": "stdout", + "output_type": "stream", + "text": [ + "Task Decomposition involves breaking down complicated tasks into smaller, more manageable subgoals. Techniques such as the Chain of Thought (CoT) and Tree of Thoughts extend this by decomposing problems into multiple thought steps and exploring multiple reasoning possibilities at each step. LLMs can perform task decomposition using simple prompts, task-specific instructions, or human inputs, and some approaches like LLM+P involve using external classical planners.\n" + ] + } + ], + "source": [ + "const result1 = await conversationalRagChain.invoke(\n", + " { input: \"What is Task Decomposition?\" },\n", + " { configurable: { sessionId: \"abc123\" } }\n", + ");\n", + "console.log(result1.answer);" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "0e89c75f-7ad7-4331-a2fe-57579eb8f840", + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\u001b[32m'What is the definition of \"large\" in the context of a language model?'\u001b[39m" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "await contextualizeQChain.invoke({\n", - " chat_history: [\n", - " new HumanMessage(\"What does LLM stand for?\"),\n", - " new AIMessage(\"Large language model\") \n", - " ],\n", - " question: \"What is meant by large\",\n", - "})" - ] - }, + "name": "stdout", + "output_type": "stream", + "text": [ + "Common ways of doing task decomposition include:\n", + "\n", + "1. Using simple prompting with an LLM, such as \"Steps for XYZ.\\n1.\" or \"What are the subgoals for achieving XYZ?\"\n", + "2. Utilizing task-specific instructions, like \"Write a story outline.\" for writing a novel.\n", + "3. Incorporating human inputs to guide and refine the decomposition process. \n", + "\n", + "Additionally, the LLM+P approach utilizes an external classical planner, involving PDDL to describe and plan complex tasks.\n" + ] + } + ], + "source": [ + "const result2 = await conversationalRagChain.invoke(\n", + " { input: \"What are common ways of doing it?\" },\n", + " { configurable: { sessionId: \"abc123\" } }\n", + ");\n", + "console.log(result2.answer);" + ] + }, + { + "cell_type": "markdown", + "id": "0ab1ded4-76d9-453f-9b9b-db9a4560c737", + "metadata": {}, + "source": [ + "### Tying it together" + ] + }, + { + "cell_type": "markdown", + "id": "8a08a5ea-df5b-4547-93c6-2a3940dd5c3e", + "metadata": {}, + "source": [ + "![](../../static/img/conversational_retrieval_chain.png)\n", + "\n", + "For convenience, we tie together all of the necessary steps in a single code cell:" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "71c32048-1a41-465f-a9e2-c4affc332fd9", + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Chain with chat history\n", - "\n", - "And now we can build our full QA chain. \n", - "\n", - "Notice we add some routing functionality to only run the \"condense question chain\" when our chat history isn't empty. Here we're taking advantage of the fact that if a function in an LCEL chain returns another chain, that chain will itself be invoked." - ] - }, + "name": "stdout", + "output_type": "stream", + "text": [ + "{ input: 'What is Task Decomposition?' }\n", + "----\n", + "{ chat_history: [] }\n", + "----\n", + "{\n", + " context: [\n", + " Document {\n", + " pageContent: 'Fig. 1. Overview of a LLM-powered autonomous agent system.\\n' +\n", + " 'Component One: Planning#\\n' +\n", + " 'A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\n' +\n", + " 'Task Decomposition#\\n' +\n", + " 'Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n' +\n", + " 'Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.',\n", + " metadata: [Object],\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n' +\n", + " 'Another quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\n' +\n", + " 'Self-Reflection#',\n", + " metadata: [Object],\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Planning\\n' +\n", + " '\\n' +\n", + " 'Subgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\n' +\n", + " 'Reflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'Memory\\n' +\n", + " '\\n' +\n", + " 'Short-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\n' +\n", + " 'Long-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'Tool use\\n' +\n", + " '\\n' +\n", + " 'The agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.',\n", + " metadata: [Object],\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Resources:\\n' +\n", + " '1. Internet access for searches and information gathering.\\n' +\n", + " '2. Long Term memory management.\\n' +\n", + " '3. GPT-3.5 powered Agents for delegation of simple tasks.\\n' +\n", + " '4. File output.\\n' +\n", + " '\\n' +\n", + " 'Performance Evaluation:\\n' +\n", + " '1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n' +\n", + " '2. Constructively self-criticize your big-picture behavior constantly.\\n' +\n", + " '3. Reflect on past decisions and strategies to refine your approach.\\n' +\n", + " '4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.',\n", + " metadata: [Object],\n", + " id: undefined\n", + " }\n", + " ]\n", + "}\n", + "----\n", + "{ answer: '' }\n", + "----\n", + "{ answer: 'Task' }\n", + "----\n", + "{ answer: ' decomposition' }\n", + "----\n", + "{ answer: ' involves' }\n", + "----\n", + "{ answer: ' breaking' }\n", + "----\n", + "{ answer: ' down' }\n", + "----\n", + "{ answer: ' a' }\n", + "----\n", + "{ answer: ' complex' }\n", + "----\n", + "{ answer: ' task' }\n", + "----\n", + "{ answer: ' into' }\n", + "----\n", + "{ answer: ' smaller' }\n", + "----\n", + "{ answer: ' and' }\n", + "----\n", + "{ answer: ' more' }\n", + "----\n", + "{ answer: ' manageable' }\n", + "----\n", + "{ answer: ' sub' }\n", + "----\n", + "{ answer: 'goals' }\n", + "----\n", + "{ answer: ' or' }\n", + "----\n", + "{ answer: ' steps' }\n", + "----\n", + "{ answer: '.' }\n", + "----\n", + "{ answer: ' This' }\n", + "----\n", + "{ answer: ' process' }\n", + "----\n", + "{ answer: ' allows' }\n", + "----\n", + "{ answer: ' an' }\n", + "----\n", + "{ answer: ' agent' }\n", + "----\n", + "{ answer: ' or' }\n", + "----\n", + "{ answer: ' model' }\n", + "----\n", + "{ answer: ' to' }\n", + "----\n", + "{ answer: ' efficiently' }\n", + "----\n", + "{ answer: ' handle' }\n", + "----\n", + "{ answer: ' intricate' }\n", + "----\n", + "{ answer: ' tasks' }\n", + "----\n", + "{ answer: ' by' }\n", + "----\n", + "{ answer: ' dividing' }\n", + "----\n", + "{ answer: ' them' }\n", + "----\n", + "{ answer: ' into' }\n", + "----\n", + "{ answer: ' simpler' }\n", + "----\n", + "{ answer: ' components' }\n", + "----\n", + "{ answer: '.' }\n", + "----\n", + "{ answer: ' Task' }\n", + "----\n", + "{ answer: ' decomposition' }\n", + "----\n", + "{ answer: ' can' }\n", + "----\n", + "{ answer: ' be' }\n", + "----\n", + "{ answer: ' achieved' }\n", + "----\n", + "{ answer: ' through' }\n", + "----\n", + "{ answer: ' techniques' }\n", + "----\n", + "{ answer: ' like' }\n", + "----\n", + "{ answer: ' Chain' }\n", + "----\n", + "{ answer: ' of' }\n", + "----\n", + "{ answer: ' Thought' }\n", + "----\n", + "{ answer: ',' }\n", + "----\n", + "{ answer: ' Tree' }\n", + "----\n", + "{ answer: ' of' }\n", + "----\n", + "{ answer: ' Thoughts' }\n", + "----\n", + "{ answer: ',' }\n", + "----\n", + "{ answer: ' or' }\n", + "----\n", + "{ answer: ' by' }\n", + "----\n", + "{ answer: ' using' }\n", + "----\n", + "{ answer: ' task' }\n", + "----\n", + "{ answer: '-specific' }\n", + "----\n", + "{ answer: ' instructions' }\n", + "----\n", + "{ answer: '.' }\n", + "----\n", + "{ answer: '' }\n", + "----\n", + "{ answer: '' }\n", + "----\n" + ] + } + ], + "source": [ + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { OpenAIEmbeddings, ChatOpenAI } from \"@langchain/openai\";\n", + "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "import { createHistoryAwareRetriever } from \"langchain/chains/history_aware_retriever\";\n", + "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", + "import { createRetrievalChain } from \"langchain/chains/retrieval\";\n", + "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", + "import { ChatMessageHistory } from \"langchain/stores/message/in_memory\";\n", + "import { BaseChatMessageHistory } from \"@langchain/core/chat_history\";\n", + "\n", + "const llm2 = new ChatOpenAI({ model: \"gpt-3.5-turbo\", temperature: 0 });\n", + "\n", + "// Construct retriever\n", + "const loader2 = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", + " {\n", + " selector: \".post-content, .post-title, .post-header\"\n", + " }\n", + ");\n", + "\n", + "const docs2 = await loader2.load();\n", + "\n", + "const textSplitter2 = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", + "const splits2 = await textSplitter2.splitDocuments(docs2);\n", + "const vectorstore2 = await MemoryVectorStore.fromDocuments(splits2, new OpenAIEmbeddings());\n", + "const retriever2 = vectorstore2.asRetriever();\n", + "\n", + "// Contextualize question\n", + "const contextualizeQSystemPrompt2 = \n", + " \"Given a chat history and the latest user question \" +\n", + " \"which might reference context in the chat history, \" +\n", + " \"formulate a standalone question which can be understood \" +\n", + " \"without the chat history. Do NOT answer the question, \" +\n", + " \"just reformulate it if needed and otherwise return it as is.\";\n", + "\n", + "const contextualizeQPrompt2 = ChatPromptTemplate.fromMessages([\n", + " [\"system\", contextualizeQSystemPrompt2],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "const historyAwareRetriever2 = await createHistoryAwareRetriever({\n", + " llm: llm2,\n", + " retriever: retriever2,\n", + " rephrasePrompt: contextualizeQPrompt2\n", + "});\n", + "\n", + "// Answer question\n", + "const systemPrompt2 = \n", + " \"You are an assistant for question-answering tasks. \" +\n", + " \"Use the following pieces of retrieved context to answer \" +\n", + " \"the question. If you don't know the answer, say that you \" +\n", + " \"don't know. Use three sentences maximum and keep the \" +\n", + " \"answer concise.\" +\n", + " \"\\n\\n\" +\n", + " \"{context}\";\n", + "\n", + "const qaPrompt2 = ChatPromptTemplate.fromMessages([\n", + " [\"system\", systemPrompt2],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "const questionAnswerChain3 = await createStuffDocumentsChain({\n", + " llm,\n", + " prompt: qaPrompt2,\n", + "});\n", + "\n", + "const ragChain3 = await createRetrievalChain({\n", + " retriever: historyAwareRetriever2,\n", + " combineDocsChain: questionAnswerChain3,\n", + "});\n", + "\n", + "// Statefully manage chat history\n", + "const store2: Record = {};\n", + "\n", + "function getSessionHistory2(sessionId: string): BaseChatMessageHistory {\n", + " if (!(sessionId in store2)) {\n", + " store2[sessionId] = new ChatMessageHistory();\n", + " }\n", + " return store2[sessionId];\n", + "}\n", + "\n", + "const conversationalRagChain2 = new RunnableWithMessageHistory({\n", + " runnable: ragChain3,\n", + " getMessageHistory: getSessionHistory2,\n", + " inputMessagesKey: \"input\",\n", + " historyMessagesKey: \"chat_history\",\n", + " outputMessagesKey: \"answer\",\n", + "});\n", + "\n", + "// Example usage\n", + "const query2 = \"What is Task Decomposition?\";\n", + "\n", + "for await (const s of await conversationalRagChain2.stream(\n", + " { input: query2 },\n", + " { configurable: { sessionId: \"unique_session_id\" } }\n", + ")) {\n", + " console.log(s);\n", + " console.log(\"----\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "861da8ed-d890-4fdc-a3bf-30433db61e0d", + "metadata": {}, + "source": [ + "## Agents {#agents}\n", + "\n", + "Agents leverage the reasoning capabilities of LLMs to make decisions during execution. Using agents allow you to offload some discretion over the retrieval process. Although their behavior is less predictable than chains, they offer some advantages in this context:\n", + "\n", + "- Agents generate the input to the retriever directly, without necessarily needing us to explicitly build in contextualization, as we did above;\n", + "- Agents can execute multiple retrieval steps in service of a query, or refrain from executing a retrieval step altogether (e.g., in response to a generic greeting from a user).\n", + "\n", + "### Retrieval tool\n", + "\n", + "Agents can access \"tools\" and manage their execution. In this case, we will convert our retriever into a LangChain tool to be wielded by the agent:" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "809cc747-2135-40a2-8e73-e4556343ee64", + "metadata": {}, + "outputs": [], + "source": [ + "import { createRetrieverTool } from \"langchain/tools/retriever\";\n", + "\n", + "const tool = createRetrieverTool(\n", + " retriever,\n", + " {\n", + " name: \"blog_post_retriever\",\n", + " description: \"Searches and returns excerpts from the Autonomous Agents blog post.\",\n", + " }\n", + ")\n", + "const tools = [tool]" + ] + }, + { + "cell_type": "markdown", + "id": "07dcb968-ed9a-458a-85e1-528cd28c6965", + "metadata": {}, + "source": [ + "Tools are LangChain [Runnables](/docs/concepts#langchain-expression-language-lcel), and implement the usual interface:" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "931c4fe3-c603-4efb-9b37-5f7cbbb1cbbd", + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\"\n", - "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", - "import { formatDocumentsAsString } from \"langchain/util/document\";\n", - "\n", - "const qaSystemPrompt = `You are an assistant for question-answering tasks.\n", - "Use the following pieces of retrieved context to answer the question.\n", - "If you don't know the answer, just say that you don't know.\n", - "Use three sentences maximum and keep the answer concise.\n", - "\n", - "{context}`\n", - "\n", - "const qaPrompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", qaSystemPrompt],\n", - " new MessagesPlaceholder(\"chat_history\"),\n", - " [\"human\", \"{question}\"]\n", - "]);\n", - "\n", - "const contextualizedQuestion = (input: Record) => {\n", - " if (\"chat_history\" in input) {\n", - " return contextualizeQChain;\n", - " }\n", - " return input.question;\n", - "};\n", - "\n", - "const ragChain = RunnableSequence.from([\n", - " RunnablePassthrough.assign({\n", - " context: (input: Record) => {\n", - " if (\"chat_history\" in input) {\n", - " const chain = contextualizedQuestion(input);\n", - " return chain.pipe(retriever).pipe(formatDocumentsAsString);\n", - " }\n", - " return \"\";\n", - " },\n", - " }),\n", - " qaPrompt,\n", - " llm\n", - "])" - ] - }, + "name": "stdout", + "output_type": "stream", + "text": [ + "Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\n", + "Another quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\n", + "Self-Reflection#\n", + "\n", + "Fig. 1. Overview of a LLM-powered autonomous agent system.\n", + "Component One: Planning#\n", + "A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\n", + "Task Decomposition#\n", + "Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\n", + "Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\n", + "\n", + "(3) Task execution: Expert models execute on the specific tasks and log results.\n", + "Instruction:\n", + "\n", + "With the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\n", + "\n", + "Resources:\n", + "1. Internet access for searches and information gathering.\n", + "2. Long Term memory management.\n", + "3. GPT-3.5 powered Agents for delegation of simple tasks.\n", + "4. File output.\n", + "\n", + "Performance Evaluation:\n", + "1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\n", + "2. Constructively self-criticize your big-picture behavior constantly.\n", + "3. Reflect on past decisions and strategies to refine your approach.\n", + "4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\n" + ] + } + ], + "source": [ + "console.log(await tool.invoke({ query: \"task decomposition\" }))" + ] + }, + { + "cell_type": "markdown", + "id": "f77e0217-28be-4b8b-b4c4-9cc4ed5ec201", + "metadata": {}, + "source": [ + "### Agent constructor\n", + "\n", + "Now that we have defined the tools and the LLM, we can create the agent. We will be using [LangGraph](/docs/concepts/#langgraph) to construct the agent. \n", + "Currently we are using a high level interface to construct the agent, but the nice thing about LangGraph is that this high-level interface is backed by a low-level, highly controllable API in case you want to modify the agent logic." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "1726d151-4653-4c72-a187-a14840add526", + "metadata": {}, + "outputs": [], + "source": [ + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", + "\n", + "const agentExecutor = createReactAgent({ llm, tools });" + ] + }, + { + "cell_type": "markdown", + "id": "6d5152ca-1c3b-4f58-bb28-f31c0be7ba66", + "metadata": {}, + "source": [ + "We can now try it out. Note that so far it is not stateful (we still need to add in memory)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "170403a2-c914-41db-85d8-a2c381da112d", + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"Task decomposition is a technique used to break down complex tasks into smaller and more manageable \"... 278 more characters,\n", - " additional_kwargs: { function_call: undefined, tool_calls: undefined }\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"Task decomposition is a technique used to break down complex tasks into smaller and more manageable \"... 278 more characters,\n", - " name: undefined,\n", - " additional_kwargs: { function_call: undefined, tool_calls: undefined }\n", - "}\n" - ] - }, - { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Common ways of task decomposition include using prompting techniques like Chain of Thought (CoT) or \"\u001b[39m... 332 more characters,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m }\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Common ways of task decomposition include using prompting techniques like Chain of Thought (CoT) or \"\u001b[39m... 332 more characters,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m }\n", - "}" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "let chat_history = [];\n", - "\n", - "const question = \"What is task decomposition?\";\n", - "const aiMsg = await ragChain.invoke({ question, chat_history });\n", - "console.log(aiMsg)\n", - "chat_history = chat_history.concat(aiMsg);\n", - "\n", - "const secondQuestion = \"What are common ways of doing it?\";\n", - "await ragChain.invoke({ question: secondQuestion, chat_history });" - ] - }, + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABABtUmgD1ZlOHZd0nD9TR8yb3mMe\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_dWxEY41mg9VSLamVYHltsUxL\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 66,\n", + " \"totalTokens\": 85\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_3537616b13\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"blog_post_retriever\",\n", + " \"args\": {\n", + " \"query\": \"Task Decomposition\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_dWxEY41mg9VSLamVYHltsUxL\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 66,\n", + " \"output_tokens\": 19,\n", + " \"total_tokens\": 85\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " tools: {\n", + " messages: [\n", + " ToolMessage {\n", + " \"content\": \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\n\\nTask decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\",\n", + " \"name\": \"blog_post_retriever\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_dWxEY41mg9VSLamVYHltsUxL\"\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABABuSj5FHmHFdeR2Pv7Cxcmq5aQz\",\n", + " \"content\": \"Task Decomposition is a technique that allows an agent to break down a complex task into smaller, more manageable subtasks or steps. The primary goal is to simplify the task to ensure efficient execution and better understanding. \\n\\n### Methods in Task Decomposition:\\n1. **Chain of Thought (CoT)**:\\n - **Description**: This technique involves instructing the model to “think step by step” to decompose hard tasks into smaller ones. It transforms large tasks into multiple manageable tasks, enhancing the model's performance and providing insight into its thinking process. \\n - **Example**: When given a complex problem, the model outlines sequential steps to reach a solution.\\n\\n2. **Tree of Thoughts**:\\n - **Description**: This extends CoT by exploring multiple reasoning possibilities at each step. The problem is decomposed into multiple thought steps, with several thoughts generated per step, forming a sort of decision tree.\\n - **Example**: For a given task, the model might consider various alternative actions at each stage, evaluating each before proceeding.\\n\\n3. **LLM with Prompts**:\\n - **Description**: Basic task decomposition can be done via simple prompts like \\\"Steps for XYZ\\\" or \\\"What are the subgoals for achieving XYZ?\\\" This can also be guided by task-specific instructions or human inputs when necessary.\\n - **Example**: Asking the model to list the subgoals for writing a novel might produce an outline broken down into chapters, character development, and plot points.\\n\\n4. **LLM+P**:\\n - **Description**: This approach involves outsourcing long-term planning to an external classical planner using Planning Domain Definition Language (PDDL). The task is translated into a PDDL problem by the model, planned using classical planning tools, and then translated back into natural language.\\n - **Example**: In robotics, translating a task into PDDL and then using a domain-specific planner to generate a sequence of actions.\\n\\n### Applications:\\n- **Planning**: Helps an agent plan tasks by breaking them into clear, manageable steps.\\n- **Self-Reflection**: Allows agents to reflect and refine their actions, learning from past mistakes to improve future performance.\\n- **Memory**: Utilizes short-term memory for immediate context and long-term memory for retaining and recalling information over extended periods.\\n- **Tool Use**: Enables the agent to call external APIs for additional information or capabilities not inherent in the model.\\n\\nIn essence, task decomposition leverages various methodologies to simplify complex tasks, ensuring better performance, improved reasoning, and effective task execution.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 522,\n", + " \"promptTokens\": 821,\n", + " \"totalTokens\": 1343\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 821,\n", + " \"output_tokens\": 522,\n", + " \"total_tokens\": 1343\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n" + ] + } + ], + "source": [ + "const query = \"What is Task Decomposition?\";\n", + "\n", + "for await (const s of await agentExecutor.stream(\n", + " { messages: [new HumanMessage(query)] }\n", + ")) {\n", + " console.log(s);\n", + " console.log(\"----\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "1df703b1-aad6-48fb-b6fa-703e32ea88b9", + "metadata": {}, + "source": [ + "LangGraph comes with built in persistence, so we don't need to use ChatMessageHistory! Rather, we can pass in a checkpointer to our LangGraph agent directly" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "04a3a664-3c3f-4cd1-9995-26662a52da7c", + "metadata": {}, + "outputs": [], + "source": [ + "import { MemorySaver } from \"@langchain/langgraph\";\n", + "\n", + "const memory = new MemorySaver();\n", + "\n", + "const agentExecutorWithMemory = createReactAgent({ llm, tools, checkpointSaver: memory });" + ] + }, + { + "cell_type": "markdown", + "id": "02026f78-338e-4d18-9f05-131e1dd59197", + "metadata": {}, + "source": [ + "This is all we need to construct a conversational RAG agent.\n", + "\n", + "Let's observe its behavior. Note that if we input a query that does not require a retrieval step, the agent does not execute one:" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "d6d70833-b958-4cd7-9e27-29c1c08bb1b8", + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "See the first [LastSmith trace here](https://smith.langchain.com/public/527981c6-5018-4b68-a11a-ebcde77843e7/r) and the [second trace here](https://smith.langchain.com/public/7b97994a-ab9f-4bf3-a2e4-abb609e5610a/r)" - ] - }, + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABACGc1vDPUSHYN7YVkuUMwpKR20P\",\n", + " \"content\": \"Hello, Bob! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 12,\n", + " \"promptTokens\": 64,\n", + " \"totalTokens\": 76\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 64,\n", + " \"output_tokens\": 12,\n", + " \"total_tokens\": 76\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n" + ] + } + ], + "source": [ + "const config = { configurable: { thread_id: \"abc123\" } };\n", + "\n", + "for await (const s of await agentExecutorWithMemory.stream(\n", + " { messages: [new HumanMessage(\"Hi! I'm bob\")] },\n", + " config\n", + ")) {\n", + " console.log(s);\n", + " console.log(\"----\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "a7928865-3dd6-4d36-abc6-2a30de770d09", + "metadata": {}, + "source": [ + "Further, if we input a query that does require a retrieval step, the agent generates the input to the tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "e2c570ae-dd91-402c-8693-ae746de63b16", + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we've gone over how to add application logic for incorporating historical outputs, but we're still manually updating the chat history and inserting it into each input. In a real Q&A application we'll want some way of persisting chat history and some way of automatically inserting and updating it.\n", - "\n", - "For this we can use:\n", - "\n", - "- [BaseChatMessageHistory](https://api.js.langchain.com/classes/langchain_core.chat_history.BaseChatMessageHistory.html): Store chat history.\n", - "- [RunnableWithMessageHistory](/docs/how_to/message_history/): Wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n", - "\n", - "For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/how_to/message_history/) LCEL page." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABACI6WN7hkfJjFhIUBGt3TswtPOv\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_Lys2G4TbOMJ6RBuVvKnFSK4V\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 89,\n", + " \"totalTokens\": 108\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_f82f5b050c\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"blog_post_retriever\",\n", + " \"args\": {\n", + " \"query\": \"Task Decomposition\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_Lys2G4TbOMJ6RBuVvKnFSK4V\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 89,\n", + " \"output_tokens\": 19,\n", + " \"total_tokens\": 108\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " tools: {\n", + " messages: [\n", + " ToolMessage {\n", + " \"content\": \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\n\\nTask decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\",\n", + " \"name\": \"blog_post_retriever\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_Lys2G4TbOMJ6RBuVvKnFSK4V\"\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABACJu56eYSAyyMNaV9UEUwHS8vRu\",\n", + " \"content\": \"Task Decomposition is a method used to break down complicated tasks into smaller, more manageable steps. This approach leverages the \\\"Chain of Thought\\\" (CoT) technique, which prompts models to \\\"think step by step\\\" to enhance performance on complex tasks. Here’s a summary of the key concepts related to Task Decomposition:\\n\\n1. **Chain of Thought (CoT):**\\n - A prompting technique that encourages models to decompose hard tasks into simpler steps, transforming big tasks into multiple manageable sub-tasks.\\n - CoT helps to provide insights into the model’s thinking process.\\n\\n2. **Tree of Thoughts:**\\n - An extension of CoT, this approach explores multiple reasoning paths at each step.\\n - It creates a tree structure by generating multiple thoughts per step, and uses search methods like breadth-first search (BFS) or depth-first search (DFS) to explore these thoughts.\\n - Each state is evaluated by a classifier or majority vote.\\n\\n3. **Methods for Task Decomposition:**\\n - Simple prompting such as instructing with phrases like \\\"Steps for XYZ: 1., 2., 3.\\\" or \\\"What are the subgoals for achieving XYZ?\\\".\\n - Using task-specific instructions like \\\"Write a story outline\\\" for specific tasks such as writing a novel.\\n - Incorporating human inputs for better granularity.\\n\\n4. **LLM+P (Long-horizon Planning):**\\n - A method that involves using an external classical planner for long-horizon planning.\\n - The process involves translating the problem into a Planning Domain Definition Language (PDDL) problem, using a classical planner to generate a PDDL plan, and then translating it back into natural language.\\n\\nTask Decomposition is essential in planning complex tasks, allowing for efficient handling by breaking them into sub-tasks and sub-goals. This process is integral to the functioning of autonomous agent systems and enhances their capability to execute intricate tasks effectively.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 396,\n", + " \"promptTokens\": 844,\n", + " \"totalTokens\": 1240\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_9f2bfdaa89\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 844,\n", + " \"output_tokens\": 396,\n", + " \"total_tokens\": 1240\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n" + ] } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" - }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" + ], + "source": [ + "for await (const s of await agentExecutorWithMemory.stream(\n", + " { messages: [new HumanMessage(query)] },\n", + " config\n", + ")) {\n", + " console.log(s);\n", + " console.log(\"----\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "26eaae33-3c4e-49fc-9fc6-db8967e25579", + "metadata": {}, + "source": [ + "Above, instead of inserting our query verbatim into the tool, the agent stripped unnecessary words like \"what\" and \"is\".\n", + "\n", + "This same principle allows the agent to use the context of the conversation when necessary:" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "570d8c68-136e-4ba5-969a-03ba195f6118", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABACPZzSugzrREQRO4mVQfI3cQOeL\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_5nSZb396Tcg73Pok6Bx1XV8b\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 22,\n", + " \"promptTokens\": 1263,\n", + " \"totalTokens\": 1285\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_9f2bfdaa89\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"blog_post_retriever\",\n", + " \"args\": {\n", + " \"query\": \"common ways of doing task decomposition\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_5nSZb396Tcg73Pok6Bx1XV8b\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 1263,\n", + " \"output_tokens\": 22,\n", + " \"total_tokens\": 1285\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " tools: {\n", + " messages: [\n", + " ToolMessage {\n", + " \"content\": \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\n\\nTask decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\",\n", + " \"name\": \"blog_post_retriever\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_5nSZb396Tcg73Pok6Bx1XV8b\"\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABACQt9pT5dKCTaGQpVawcmCCWdET\",\n", + " \"content\": \"According to the blog post, common ways of performing Task Decomposition include:\\n\\n1. **Using Large Language Models (LLMs) with Simple Prompting:**\\n - Providing clear and structured prompts such as \\\"Steps for XYZ: 1., 2., 3.\\\" or asking \\\"What are the subgoals for achieving XYZ?\\\"\\n - This allows the model to break down the tasks step-by-step.\\n\\n2. **Task-Specific Instructions:**\\n - Employing specific instructions tailored to the task at hand, for example, \\\"Write a story outline\\\" for writing a novel.\\n - These instructions guide the model in decomposing the task appropriately.\\n\\n3. **Involving Human Inputs:**\\n - Integrating insights and directives from humans to aid in the decomposition process.\\n - This can ensure that the breakdown is comprehensive and accurately reflects the nuances of the task.\\n\\n4. **LLM+P Approach for Long-Horizon Planning:**\\n - Utilizing an external classical planner by translating the problem into Planning Domain Definition Language (PDDL).\\n - The process involves:\\n 1. Translating the problem into “Problem PDDL”.\\n 2. Requesting a classical planner to generate a PDDL plan based on an existing “Domain PDDL”.\\n 3. Translating the PDDL plan back into natural language.\\n\\nThese methods enable effective management and execution of complex tasks by transforming them into simpler, more manageable components.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 292,\n", + " \"promptTokens\": 2010,\n", + " \"totalTokens\": 2302\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_9f2bfdaa89\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 2010,\n", + " \"output_tokens\": 292,\n", + " \"total_tokens\": 2302\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n" + ] } + ], + "source": [ + "const query3 = \"What according to the blog post are common ways of doing it? redo the search\";\n", + "\n", + "for await (const s of await agentExecutorWithMemory.stream(\n", + " { messages: [new HumanMessage(query3)] },\n", + " config\n", + ")) {\n", + " console.log(s);\n", + " console.log(\"----\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "f2724616-c106-4e15-a61a-3077c535f692", + "metadata": {}, + "source": [ + "Note that the agent was able to infer that \"it\" in our query refers to \"task decomposition\", and generated a reasonable search query as a result-- in this case, \"common ways of task decomposition\"." + ] + }, + { + "cell_type": "markdown", + "id": "1cf87847-23bb-4672-b41c-12ad9cf81ed4", + "metadata": {}, + "source": [ + "### Tying it together\n", + "\n", + "For convenience, we tie together all of the necessary steps in a single code cell:" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "b1d2b4d4-e604-497d-873d-d345b808578e", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { MemorySaver } from \"@langchain/langgraph\";\n", + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { createRetrieverTool } from \"langchain/tools/retriever\";\n", + "\n", + "const memory3 = new MemorySaver();\n", + "const llm3 = new ChatOpenAI({ model: \"gpt-4o\", temperature: 0 });\n", + "\n", + "// Construct retriever\n", + "const loader3 = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", + " {\n", + " selector: \".post-content, .post-title, .post-header\"\n", + " }\n", + ");\n", + "\n", + "const docs3 = await loader3.load();\n", + "\n", + "const textSplitter3 = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", + "const splits3 = await textSplitter3.splitDocuments(docs3);\n", + "const vectorstore3 = await MemoryVectorStore.fromDocuments(splits3, new OpenAIEmbeddings());\n", + "const retriever3 = vectorstore3.asRetriever();\n", + "\n", + "// Build retriever tool\n", + "const tool3 = createRetrieverTool(\n", + " retriever3,\n", + " {\n", + " name: \"blog_post_retriever\",\n", + " description: \"Searches and returns excerpts from the Autonomous Agents blog post.\",\n", + " }\n", + ");\n", + "const tools3 = [tool3];\n", + "\n", + "const agentExecutor3 = createReactAgent({ llm: llm3, tools: tools3, checkpointSaver: memory3 });" + ] + }, + { + "cell_type": "markdown", + "id": "cd6bf4f4-74f4-419d-9e26-f0ed83cf05fa", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "We've covered the steps to build a basic conversational Q&A application:\n", + "\n", + "- We used chains to build a predictable application that generates search queries for each user input;\n", + "- We used agents to build an application that \"decides\" when and how to generate search queries.\n", + "\n", + "To explore different types of retrievers and retrieval strategies, visit the [retrievers](/docs/how_to/#retrievers) section of the how-to guides.\n", + "\n", + "For a detailed walkthrough of LangChain's conversation memory abstractions, visit the [How to add message history (memory)](/docs/how_to/message_history) LCEL page." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" }, - "nbformat": 4, - "nbformat_minor": 2 + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/docs/core_docs/docs/versions/migrating_memory/chat_history.ipynb b/docs/core_docs/docs/versions/migrating_memory/chat_history.ipynb new file mode 100644 index 000000000000..927aec36ad0c --- /dev/null +++ b/docs/core_docs/docs/versions/migrating_memory/chat_history.ipynb @@ -0,0 +1,268 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "c298a5c9-b9af-481d-9eba-cbd65f987a8a", + "metadata": {}, + "source": [ + "# How to use BaseChatMessageHistory with LangGraph\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat History](/docs/concepts/#chat-history)\n", + "- [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html)\n", + "- [LangGraph](https://langchain-ai.github.io/langgraphjs/concepts/high_level/)\n", + "- [Memory](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/#memory)\n", + "\n", + ":::\n", + "\n", + "We recommend that new LangChain applications take advantage of the [built-in LangGraph peristence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to implement memory.\n", + "\n", + "In some situations, users may need to keep using an existing persistence solution for chat message history.\n", + "\n", + "Here, we will show how to use [LangChain chat message histories](/docs/integrations/memory/) (implementations of [BaseChatMessageHistory](https://api.js.langchain.com/classes/_langchain_core.chat_history.BaseChatMessageHistory.html)) with LangGraph." + ] + }, + { + "cell_type": "markdown", + "id": "548bc988-167b-43f1-860a-d247e28b2b42", + "metadata": {}, + "source": [ + "## Set up\n", + "\n", + "```typescript\n", + "process.env.ANTHROPIC_API_KEY = 'YOUR_API_KEY'\n", + "```\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", + "\n", + "\n", + " @langchain/core @langchain/langgraph @langchain/anthropic\n", + "\n", + "```" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "c5e08659-b68c-48f2-8b33-e79b0c6999e1", + "metadata": {}, + "source": [ + "## ChatMessageHistory\n", + "\n", + "A message history needs to be parameterized by a conversation ID or maybe by the 2-tuple of (user ID, conversation ID).\n", + "\n", + "Many of the [LangChain chat message histories](/docs/integrations/memory/) will have either a `sessionId` or some `namespace` to allow keeping track of different conversations. Please refer to the specific implementations to check how it is parameterized.\n", + "\n", + "The built-in `InMemoryChatMessageHistory` does not contains such a parameterization, so we'll create a dictionary to keep track of the message histories." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "28049308-2543-48e6-90d0-37a88951a637", + "metadata": {}, + "outputs": [], + "source": [ + "import { InMemoryChatMessageHistory } from \"@langchain/core/chat_history\";\n", + "\n", + "const chatsBySessionId: Record = {}\n", + "\n", + "const getChatHistory = (sessionId: string) => {\n", + " let chatHistory: InMemoryChatMessageHistory | undefined = chatsBySessionId[sessionId]\n", + " if (!chatHistory) {\n", + " chatHistory = new InMemoryChatMessageHistory()\n", + " chatsBySessionId[sessionId] = chatHistory\n", + " }\n", + " return chatHistory\n", + "}" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "94c53ce3-4212-41e6-8ad3-f0ab5df6130f", + "metadata": {}, + "source": [ + "## Use with LangGraph\n", + "\n", + "Next, we'll set up a basic chat bot using LangGraph. If you're not familiar with LangGraph, you should look at the following [Quick Start Tutorial](https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/).\n", + "\n", + "We'll create a [LangGraph node](https://langchain-ai.github.io/langgraphjs/concepts/low_level/#nodes) for the chat model, and manually manage the conversation history, taking into account the conversation ID passed as part of the RunnableConfig.\n", + "\n", + "The conversation ID can be passed as either part of the RunnableConfig (as we'll do here), or as part of the [graph state](https://langchain-ai.github.io/langgraphjs/concepts/low_level/#state)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d818e23f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "hi! I'm bob\n", + "Hello Bob! It's nice to meet you. How can I assist you today?\n", + "what was my name?\n", + "You said your name is Bob.\n" + ] + } + ], + "source": [ + "import { v4 as uuidv4 } from \"uuid\";\n", + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { StateGraph, MessagesAnnotation, END, START } from \"@langchain/langgraph\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "import { RunnableConfig } from \"@langchain/core/runnables\";\n", + "\n", + "// Define a chat model\n", + "const model = new ChatAnthropic({ modelName: \"claude-3-haiku-20240307\" });\n", + "\n", + "// Define the function that calls the model\n", + "const callModel = async (\n", + " state: typeof MessagesAnnotation.State,\n", + " config: RunnableConfig\n", + "): Promise> => {\n", + " if (!config.configurable?.sessionId) {\n", + " throw new Error(\n", + " \"Make sure that the config includes the following information: {'configurable': {'sessionId': 'some_value'}}\"\n", + " );\n", + " }\n", + "\n", + " const chatHistory = getChatHistory(config.configurable.sessionId as string);\n", + "\n", + " let messages = [...(await chatHistory.getMessages()), ...state.messages];\n", + "\n", + " if (state.messages.length === 1) {\n", + " // First message, ensure it's in the chat history\n", + " await chatHistory.addMessage(state.messages[0]);\n", + " }\n", + "\n", + " const aiMessage = await model.invoke(messages);\n", + "\n", + " // Update the chat history\n", + " await chatHistory.addMessage(aiMessage);\n", + "\n", + " return { messages: [aiMessage] };\n", + "};\n", + "\n", + "// Define a new graph\n", + "const workflow = new StateGraph(MessagesAnnotation)\n", + " .addNode(\"model\", callModel)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "const app = workflow.compile();\n", + "\n", + "// Create a unique session ID to identify the conversation\n", + "const sessionId = uuidv4();\n", + "const config = { configurable: { sessionId }, streamMode: \"values\" as const };\n", + "\n", + "const inputMessage = new HumanMessage(\"hi! I'm bob\");\n", + "\n", + "for await (const event of await app.stream({ messages: [inputMessage] }, config)) {\n", + " const lastMessage = event.messages[event.messages.length - 1];\n", + " console.log(lastMessage.content);\n", + "}\n", + "\n", + "// Here, let's confirm that the AI remembers our name!\n", + "const followUpMessage = new HumanMessage(\"what was my name?\");\n", + "\n", + "for await (const event of await app.stream({ messages: [followUpMessage] }, config)) {\n", + " const lastMessage = event.messages[event.messages.length - 1];\n", + " console.log(lastMessage.content);\n", + "}" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "da0536dd-9a0b-49e3-b0b6-e8c7abf3b1f9", + "metadata": {}, + "source": [ + "## Using With RunnableWithMessageHistory\n", + "\n", + "This how-to guide used the `messages` and `addMessages` interface of `BaseChatMessageHistory` directly. \n", + "\n", + "Alternatively, you can use [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html), as [LCEL](/docs/concepts/#langchain-expression-language-lcel/) can be used inside any [LangGraph node](https://langchain-ai.github.io/langgraphjs/concepts/low_level/#nodes).\n", + "\n", + "To do that replace the following code:\n", + "\n", + "```typescript\n", + "const callModel = async (\n", + " state: typeof MessagesAnnotation.State,\n", + " config: RunnableConfig\n", + "): Promise> => {\n", + " // highlight-start\n", + " if (!config.configurable?.sessionId) {\n", + " throw new Error(\n", + " \"Make sure that the config includes the following information: {'configurable': {'sessionId': 'some_value'}}\"\n", + " );\n", + " }\n", + "\n", + " const chatHistory = getChatHistory(config.configurable.sessionId as string);\n", + "\n", + " let messages = [...(await chatHistory.getMessages()), ...state.messages];\n", + "\n", + " if (state.messages.length === 1) {\n", + " // First message, ensure it's in the chat history\n", + " await chatHistory.addMessage(state.messages[0]);\n", + " }\n", + "\n", + " const aiMessage = await model.invoke(messages);\n", + "\n", + " // Update the chat history\n", + " await chatHistory.addMessage(aiMessage);\n", + " // highlight-end\n", + " return { messages: [aiMessage] };\n", + "};\n", + "```\n", + "\n", + "With the corresponding instance of `RunnableWithMessageHistory` defined in your current application.\n", + "\n", + "```typescript\n", + "const runnable = new RunnableWithMessageHistory({\n", + " // ... configuration from existing code\n", + "});\n", + "\n", + "const callModel = async (\n", + " state: typeof MessagesAnnotation.State,\n", + " config: RunnableConfig\n", + "): Promise> => {\n", + " // RunnableWithMessageHistory takes care of reading the message history\n", + " // and updating it with the new human message and AI response.\n", + " const aiMessage = await runnable.invoke(state.messages, config);\n", + " return {\n", + " messages: [aiMessage]\n", + " };\n", + "};\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/versions/migrating_memory/conversation_buffer_window_memory.ipynb b/docs/core_docs/docs/versions/migrating_memory/conversation_buffer_window_memory.ipynb new file mode 100644 index 000000000000..719cf9d1022e --- /dev/null +++ b/docs/core_docs/docs/versions/migrating_memory/conversation_buffer_window_memory.ipynb @@ -0,0 +1,643 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ce8457ed-c0b1-4a74-abbd-9d3d2211270f", + "metadata": {}, + "source": [ + "# Migrating off ConversationTokenBufferMemory\n", + "\n", + "Follow this guide if you're trying to migrate off one of the old memory classes listed below:\n", + "\n", + "\n", + "| Memory Type | Description |\n", + "|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n", + "| `ConversationTokenBufferMemory` | Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |\n", + "\n", + "`ConversationTokenBufferMemory` applies additional processing on top of the raw conversation history to trim the conversation history to a size that fits inside the context window of a chat model. \n", + "\n", + "This processing functionality can be accomplished using LangChain's built-in [trimMessages](https://api.js.langchain.com/functions/_langchain_core.messages.trimMessages.html) function." + ] + }, + { + "cell_type": "markdown", + "id": "79935247-acc7-4a05-a387-5d72c9c8c8cb", + "metadata": {}, + "source": [ + "```{=mdx}\n", + ":::important\n", + "\n", + "We’ll begin by exploring a straightforward method that involves applying processing logic to the entire conversation history.\n", + "\n", + "While this approach is easy to implement, it has a downside: as the conversation grows, so does the latency, since the logic is re-applied to all previous exchanges in the conversation at each turn.\n", + "\n", + "More advanced strategies focus on incrementally updating the conversation history to avoid redundant processing.\n", + "\n", + "For instance, the LangGraph [how-to guide on summarization](https://langchain-ai.github.io/langgraphjs/how-tos/add-summary-conversation-history/) demonstrates\n", + "how to maintain a running summary of the conversation while discarding older messages, ensuring they aren't re-processed during later turns.\n", + ":::\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "d07f9459-9fb6-4942-99c9-64558aedd7d4", + "metadata": {}, + "source": [ + "## Set up\n", + "\n", + "### Dependencies\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", + "\n", + "\n", + " @langchain/openai @langchain/core zod\n", + "\n", + "```\n", + "\n", + "### Environment variables\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_OPENAI_API_KEY\";\n", + "```\n", + "\n", + "```{=mdx}\n", + "
\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "7ce2d951", + "metadata": {}, + "source": [ + "## Reimplementing ConversationTokenBufferMemory logic\n", + "\n", + "Here, we'll use `trimMessages` to keeps the system message and the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "e1550bee", + "metadata": {}, + "outputs": [], + "source": [ + "import {\n", + " AIMessage,\n", + " HumanMessage,\n", + " SystemMessage,\n", + "} from \"@langchain/core/messages\";\n", + "\n", + "const messages = [\n", + " new SystemMessage(\"you're a good assistant, you always respond with a joke.\"),\n", + " new HumanMessage(\"i wonder why it's called langchain\"),\n", + " new AIMessage(\n", + " 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n", + " ),\n", + " new HumanMessage(\"and who is harrison chasing anyways\"),\n", + " new AIMessage(\n", + " \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"\n", + " ),\n", + " new HumanMessage(\"why is 42 always the answer?\"),\n", + " new AIMessage(\n", + " \"Because it's the only number that's constantly right, even when it doesn't add up!\"\n", + " ),\n", + " new HumanMessage(\"What did the cow say?\"),\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "6442f74b-2c36-48fd-a3d1-c7c5d18c050f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "SystemMessage {\n", + " \"content\": \"you're a good assistant, you always respond with a joke.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + "}\n", + "HumanMessage {\n", + " \"content\": \"and who is harrison chasing anyways\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + "}\n", + "AIMessage {\n", + " \"content\": \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + "}\n", + "HumanMessage {\n", + " \"content\": \"why is 42 always the answer?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + "}\n", + "AIMessage {\n", + " \"content\": \"Because it's the only number that's constantly right, even when it doesn't add up!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + "}\n", + "HumanMessage {\n", + " \"content\": \"What did the cow say?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + "}\n" + ] + } + ], + "source": [ + "import { trimMessages } from \"@langchain/core/messages\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const selectedMessages = await trimMessages(\n", + " messages,\n", + " {\n", + " // Please see API reference for trimMessages for other ways to specify a token counter.\n", + " tokenCounter: new ChatOpenAI({ model: \"gpt-4o\" }),\n", + " maxTokens: 80, // <-- token limit\n", + " // The startOn is specified\n", + " // to make sure we do not generate a sequence where\n", + " // a ToolMessage that contains the result of a tool invocation\n", + " // appears before the AIMessage that requested a tool invocation\n", + " // as this will cause some chat models to raise an error.\n", + " startOn: \"human\",\n", + " strategy: \"last\",\n", + " includeSystem: true, // <-- Keep the system message\n", + " }\n", + ")\n", + "\n", + "for (const msg of selectedMessages) {\n", + " console.log(msg);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0f05d272-2d22-44b7-9fa6-e617a48584b4", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "
\n", + "```\n", + "\n", + "## Modern usage with LangGraph\n", + "\n", + "The example below shows how to use LangGraph to add simple conversation pre-processing logic.\n", + "\n", + "```{=mdx}\n", + ":::note\n", + "\n", + "If you want to avoid running the computation on the entire conversation history each time, you can follow\n", + "the [how-to guide on summarization](https://langchain-ai.github.io/langgraphjs/how-tos/add-summary-conversation-history/) that demonstrates\n", + "how to discard older messages, ensuring they aren't re-processed during later turns.\n", + "\n", + ":::\n", + "```\n", + "\n", + "```{=mdx}\n", + "
\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "05d360e0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "hi! I'm bob\n", + "Hello, Bob! How can I assist you today?\n", + "what was my name?\n", + "You mentioned that your name is Bob. How can I help you today?\n" + ] + } + ], + "source": [ + "import { v4 as uuidv4 } from 'uuid';\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { StateGraph, MessagesAnnotation, END, START, MemorySaver } from \"@langchain/langgraph\";\n", + "import { trimMessages } from \"@langchain/core/messages\";\n", + "\n", + "// Define a chat model\n", + "const model = new ChatOpenAI({ model: \"gpt-4o\" });\n", + "\n", + "// Define the function that calls the model\n", + "const callModel = async (state: typeof MessagesAnnotation.State): Promise> => {\n", + " // highlight-start\n", + " const selectedMessages = await trimMessages(\n", + " state.messages,\n", + " {\n", + " tokenCounter: (messages) => messages.length, // Simple message count instead of token count\n", + " maxTokens: 5, // Allow up to 5 messages\n", + " strategy: \"last\",\n", + " startOn: \"human\",\n", + " includeSystem: true,\n", + " allowPartial: false,\n", + " }\n", + " );\n", + " // highlight-end\n", + "\n", + " const response = await model.invoke(selectedMessages);\n", + "\n", + " // With LangGraph, we're able to return a single message, and LangGraph will concatenate\n", + " // it to the existing list\n", + " return { messages: [response] };\n", + "};\n", + "\n", + "\n", + "// Define a new graph\n", + "const workflow = new StateGraph(MessagesAnnotation)\n", + "// Define the two nodes we will cycle between\n", + " .addNode(\"model\", callModel)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END)\n", + "\n", + "const app = workflow.compile({\n", + " // Adding memory is straightforward in LangGraph!\n", + " // Just pass a checkpointer to the compile method.\n", + " checkpointer: new MemorySaver()\n", + "});\n", + "\n", + "// The thread id is a unique key that identifies this particular conversation\n", + "// ---\n", + "// NOTE: this must be `thread_id` and not `threadId` as the LangGraph internals expect `thread_id`\n", + "// ---\n", + "const thread_id = uuidv4();\n", + "const config = { configurable: { thread_id }, streamMode: \"values\" as const };\n", + "\n", + "const inputMessage = {\n", + " role: \"user\",\n", + " content: \"hi! I'm bob\",\n", + "}\n", + "for await (const event of await app.stream({ messages: [inputMessage] }, config)) {\n", + " const lastMessage = event.messages[event.messages.length - 1];\n", + " console.log(lastMessage.content);\n", + "}\n", + "\n", + "// Here, let's confirm that the AI remembers our name!\n", + "const followUpMessage = {\n", + " role: \"user\",\n", + " content: \"what was my name?\",\n", + "}\n", + "\n", + "// ---\n", + "// NOTE: You must pass the same thread id to continue the conversation\n", + "// we do that here by passing the same `config` object to the `.stream` call.\n", + "// ---\n", + "for await (const event of await app.stream({ messages: [followUpMessage] }, config)) {\n", + " const lastMessage = event.messages[event.messages.length - 1];\n", + " console.log(lastMessage.content);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "84229e2e-a578-4b21-840a-814223406402", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "
\n", + "```\n", + "\n", + "## Usage with a pre-built langgraph agent\n", + "\n", + "This example shows usage of an Agent Executor with a pre-built agent constructed using the [createReactAgent](https://langchain-ai.github.io/langgraphjs/reference/functions/langgraph_prebuilt.createReactAgent.html) function.\n", + "\n", + "If you are using one of the [old LangChain pre-built agents](https://js.langchain.com/v0.1/docs/modules/agents/agent_types/), you should be able\n", + "to replace that code with the new [LangGraph pre-built agent](https://langchain-ai.github.io/langgraphjs/how-tos/create-react-agent/) which leverages\n", + "native tool calling capabilities of chat models and will likely work better out of the box.\n", + "\n", + "```{=mdx}\n", + "
\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "9e54ccdc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "hi! I'm bob. What is my age?\n", + "\n", + "42 years old\n", + "Hi Bob! You are 42 years old.\n", + "do you remember my name?\n", + "Yes, your name is Bob! If there's anything else you'd like to know or discuss, feel free to ask.\n" + ] + } + ], + "source": [ + "import { z } from \"zod\";\n", + "import { v4 as uuidv4 } from 'uuid';\n", + "import { BaseMessage, trimMessages } from \"@langchain/core/messages\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { MemorySaver } from \"@langchain/langgraph\";\n", + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", + "\n", + "const getUserAge = tool(\n", + " (name: string): string => {\n", + " // This is a placeholder for the actual implementation\n", + " if (name.toLowerCase().includes(\"bob\")) {\n", + " return \"42 years old\";\n", + " }\n", + " return \"41 years old\";\n", + " },\n", + " {\n", + " name: \"get_user_age\",\n", + " description: \"Use this tool to find the user's age.\",\n", + " schema: z.string().describe(\"the name of the user\"),\n", + " }\n", + ");\n", + "\n", + "const memory = new MemorySaver();\n", + "const model2 = new ChatOpenAI({ model: \"gpt-4o\" });\n", + "\n", + "// highlight-start\n", + "const stateModifier = async (messages: BaseMessage[]): Promise => {\n", + " // We're using the message processor defined above.\n", + " return trimMessages(\n", + " messages,\n", + " {\n", + " tokenCounter: (msgs) => msgs.length, // <-- .length will simply count the number of messages rather than tokens\n", + " maxTokens: 5, // <-- allow up to 5 messages.\n", + " strategy: \"last\",\n", + " // The startOn is specified\n", + " // to make sure we do not generate a sequence where\n", + " // a ToolMessage that contains the result of a tool invocation\n", + " // appears before the AIMessage that requested a tool invocation\n", + " // as this will cause some chat models to raise an error.\n", + " startOn: \"human\",\n", + " includeSystem: true, // <-- Keep the system message\n", + " allowPartial: false,\n", + " }\n", + " );\n", + "};\n", + "// highlight-end\n", + "\n", + "const app2 = createReactAgent({\n", + " llm: model2,\n", + " tools: [getUserAge],\n", + " checkpointSaver: memory,\n", + " // highlight-next-line\n", + " messageModifier: stateModifier,\n", + "});\n", + "\n", + "// The thread id is a unique key that identifies\n", + "// this particular conversation.\n", + "// We'll just generate a random uuid here.\n", + "const threadId2 = uuidv4();\n", + "const config2 = { configurable: { thread_id: threadId2 }, streamMode: \"values\" as const };\n", + "\n", + "// Tell the AI that our name is Bob, and ask it to use a tool to confirm\n", + "// that it's capable of working like an agent.\n", + "const inputMessage2 = {\n", + " role: \"user\",\n", + " content: \"hi! I'm bob. What is my age?\",\n", + "}\n", + "\n", + "for await (const event of await app2.stream({ messages: [inputMessage2] }, config2)) {\n", + " const lastMessage = event.messages[event.messages.length - 1];\n", + " console.log(lastMessage.content);\n", + "}\n", + "\n", + "// Confirm that the chat bot has access to previous conversation\n", + "// and can respond to the user saying that the user's name is Bob.\n", + "const followUpMessage2 = {\n", + " role: \"user\",\n", + " content: \"do you remember my name?\",\n", + "};\n", + "\n", + "for await (const event of await app2.stream({ messages: [followUpMessage2] }, config2)) {\n", + " const lastMessage = event.messages[event.messages.length - 1];\n", + " console.log(lastMessage.content);\n", + "}" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "f4d16e09-1d90-4153-8576-6d3996cb5a6c", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "
\n", + "```\n", + "\n", + "## LCEL: Add a preprocessing step\n", + "\n", + "The simplest way to add complex conversation management is by introducing a pre-processing step in front of the chat model and pass the full conversation history to the pre-processing step.\n", + "\n", + "This approach is conceptually simple and will work in many situations; for example, if using a [RunnableWithMessageHistory](/docs/how_to/message_history/) instead of wrapping the chat model, wrap the chat model with the pre-processor.\n", + "\n", + "The obvious downside of this approach is that latency starts to increase as the conversation history grows because of two reasons:\n", + "\n", + "1. As the conversation gets longer, more data may need to be fetched from whatever store your'e using to store the conversation history (if not storing it in memory).\n", + "2. The pre-processing logic will end up doing a lot of redundant computation, repeating computation from previous steps of the conversation.\n", + "\n", + "```{=mdx}\n", + ":::caution\n", + "\n", + "If you want to use a chat model's tool calling capabilities, remember to bind the tools to the model before adding the history pre-processing step to it!\n", + "\n", + ":::\n", + "```\n", + "\n", + "```{=mdx}\n", + "
\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "a1c8adf2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-AB6uzWscxviYlbADFeDlnwIH82Fzt\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_TghBL9dzqXFMCt0zj0VYMjfp\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 16,\n", + " \"promptTokens\": 95,\n", + " \"totalTokens\": 111\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_a5d11b2ef2\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"what_did_the_cow_say\",\n", + " \"args\": {},\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_TghBL9dzqXFMCt0zj0VYMjfp\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 95,\n", + " \"output_tokens\": 16,\n", + " \"total_tokens\": 111\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { AIMessage, HumanMessage, SystemMessage, BaseMessage, trimMessages } from \"@langchain/core/messages\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "import { z } from \"zod\";\n", + "\n", + "const model3 = new ChatOpenAI({ model: \"gpt-4o\" });\n", + "\n", + "const whatDidTheCowSay = tool(\n", + " (): string => {\n", + " return \"foo\";\n", + " },\n", + " {\n", + " name: \"what_did_the_cow_say\",\n", + " description: \"Check to see what the cow said.\",\n", + " schema: z.object({}),\n", + " }\n", + ");\n", + "\n", + "// highlight-start\n", + "const messageProcessor = trimMessages(\n", + " {\n", + " tokenCounter: (msgs) => msgs.length, // <-- .length will simply count the number of messages rather than tokens\n", + " maxTokens: 5, // <-- allow up to 5 messages.\n", + " strategy: \"last\",\n", + " // The startOn is specified\n", + " // to make sure we do not generate a sequence where\n", + " // a ToolMessage that contains the result of a tool invocation\n", + " // appears before the AIMessage that requested a tool invocation\n", + " // as this will cause some chat models to raise an error.\n", + " startOn: \"human\",\n", + " includeSystem: true, // <-- Keep the system message\n", + " allowPartial: false,\n", + " }\n", + ");\n", + "// highlight-end\n", + "\n", + "// Note that we bind tools to the model first!\n", + "const modelWithTools = model3.bindTools([whatDidTheCowSay]);\n", + "\n", + "// highlight-next-line\n", + "const modelWithPreprocessor = messageProcessor.pipe(modelWithTools);\n", + "\n", + "const fullHistory = [\n", + " new SystemMessage(\"you're a good assistant, you always respond with a joke.\"),\n", + " new HumanMessage(\"i wonder why it's called langchain\"),\n", + " new AIMessage('Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'),\n", + " new HumanMessage(\"and who is harrison chasing anyways\"),\n", + " new AIMessage(\"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"),\n", + " new HumanMessage(\"why is 42 always the answer?\"),\n", + " new AIMessage(\"Because it's the only number that's constantly right, even when it doesn't add up!\"),\n", + " new HumanMessage(\"What did the cow say?\"),\n", + "];\n", + "\n", + "// We pass it explicitly to the modelWithPreprocessor for illustrative purposes.\n", + "// If you're using `RunnableWithMessageHistory` the history will be automatically\n", + "// read from the source that you configure.\n", + "const result = await modelWithPreprocessor.invoke(fullHistory);\n", + "console.log(result);" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "5da7225a-5e94-4f53-bb0d-86b6b528d150", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "
\n", + "```\n", + "\n", + "If you need to implement more efficient logic and want to use `RunnableWithMessageHistory` for now the way to achieve this\n", + "is to subclass from [BaseChatMessageHistory](https://api.js.langchain.com/classes/_langchain_core.chat_history.BaseChatMessageHistory.html) and\n", + "define appropriate logic for `addMessages` (that doesn't simply append the history, but instead re-writes it).\n", + "\n", + "Unless you have a good reason to implement this solution, you should instead use LangGraph." + ] + }, + { + "cell_type": "markdown", + "id": "b2717810", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "Explore persistence with LangGraph:\n", + "\n", + "* [LangGraph quickstart tutorial](https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/)\n", + "* [How to add persistence (\"memory\") to your graph](https://langchain-ai.github.io/langgraphjs/how-tos/persistence/)\n", + "* [How to manage conversation history](https://langchain-ai.github.io/langgraphjs/how-tos/manage-conversation-history/)\n", + "* [How to add summary of the conversation history](https://langchain-ai.github.io/langgraphjs/how-tos/add-summary-conversation-history/)\n", + "\n", + "Add persistence with simple LCEL (favor LangGraph for more complex use cases):\n", + "\n", + "* [How to add message history](/docs/how_to/message_history/)\n", + "\n", + "Working with message history:\n", + "\n", + "* [How to trim messages](/docs/how_to/trim_messages)\n", + "* [How to filter messages](/docs/how_to/filter_messages/)\n", + "* [How to merge message runs](/docs/how_to/merge_message_runs/)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/versions/migrating_memory/conversation_summary_memory.ipynb b/docs/core_docs/docs/versions/migrating_memory/conversation_summary_memory.ipynb new file mode 100644 index 000000000000..50b2fbea4f96 --- /dev/null +++ b/docs/core_docs/docs/versions/migrating_memory/conversation_summary_memory.ipynb @@ -0,0 +1,45 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ce8457ed-c0b1-4a74-abbd-9d3d2211270f", + "metadata": {}, + "source": [ + "# Migrating off ConversationSummaryMemory or ConversationSummaryBufferMemory\n", + "\n", + "Follow this guide if you're trying to migrate off one of the old memory classes listed below:\n", + "\n", + "\n", + "| Memory Type | Description |\n", + "|---------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|\n", + "| `ConversationSummaryMemory` | Continually summarizes the conversation history. The summary is updated after each conversation turn. The abstraction returns the summary of the conversation history. |\n", + "| `ConversationSummaryBufferMemory` | Provides a running summary of the conversation together with the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |\n", + "\n", + "Please follow the following [how-to guide on summarization](https://langchain-ai.github.io/langgraphjs/how-tos/add-summary-conversation-history/) in LangGraph. \n", + "\n", + "This guide shows how to maintain a running summary of the conversation while discarding older messages, ensuring they aren't re-processed during later turns." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/versions/migrating_memory/index.mdx b/docs/core_docs/docs/versions/migrating_memory/index.mdx new file mode 100644 index 000000000000..837e7f03c544 --- /dev/null +++ b/docs/core_docs/docs/versions/migrating_memory/index.mdx @@ -0,0 +1,139 @@ +--- +sidebar_position: 1 +--- + +# How to migrate to LangGraph memory + +As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of LangGraph persistence to incorporate `memory` into their LangChain application. + +- Users that rely on `RunnableWithMessageHistory` or `BaseChatMessageHistory` do **not** need to make any changes, but are encouraged to consider using LangGraph for more complex use cases. +- Users that rely on deprecated memory abstractions from LangChain 0.0.x should follow this guide to upgrade to the new LangGraph persistence feature in LangChain 0.3.x. + +## Why use LangGraph for memory? + +The main advantages of persistence in LangGraph are: + +- Built-in support for multiple users and conversations, which is a typical requirement for real-world conversational AI applications. +- Ability to save and resume complex conversations at any point. This helps with: + - Error recovery + - Allowing human intervention in AI workflows + - Exploring different conversation paths ("time travel") +- Full compatibility with both traditional [language models](/docs/concepts/#llms) and modern [chat models](/docs/concepts/#chat-models). Early memory implementations in LangChain weren't designed for newer chat model APIs, causing issues with features like tool-calling. LangGraph memory can persist any custom state. +- Highly customizable, allowing you to fully control how memory works and use different storage backends. + +## Evolution of memory in LangChain + +The concept of memory has evolved significantly in LangChain since its initial release. + +### LangChain 0.0.x memory + +Broadly speaking, LangChain 0.0.x memory was used to handle three main use cases: + +| Use Case | Example | +| ------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | +| Managing conversation history | Keep only the last `n` turns of the conversation between the user and the AI. | +| Extraction of structured information | Extract structured information from the conversation history, such as a list of facts learned about the user. | +| Composite memory implementations | Combine multiple memory sources, e.g., a list of known facts about the user along with facts learned during a given conversation. | + +While the LangChain 0.0.x memory abstractions were useful, they were limited in their capabilities and not well suited for real-world conversational AI applications. These memory abstractions lacked built-in support for multi-user, multi-conversation scenarios, which are essential for practical conversational AI systems. + +Most of these implementations have been officially deprecated in LangChain 0.3.x in favor of LangGraph persistence. + +### RunnableWithMessageHistory and BaseChatMessageHistory + +:::note +Please see [How to use BaseChatMessageHistory with LangGraph](./chat_history), if you would like to use `BaseChatMessageHistory` (with or without `RunnableWithMessageHistory`) in LangGraph. +::: + +As of LangChain v0.1, we started recommending that users rely primarily on [BaseChatMessageHistory](https://api.js.langchain.com/classes/_langchain_core.chat_history.BaseChatMessageHistory.html). `BaseChatMessageHistory` serves +as a simple persistence for storing and retrieving messages in a conversation. + +At that time, the only option for orchestrating LangChain chains was via [LCEL](/docs/how_to/#langchain-expression-language-lcel). To incorporate memory with `LCEL`, users had to use the [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html) interface. While sufficient for basic chat applications, many users found the API unintuitive and challenging to use. + +As of LangChain v0.3, we recommend that **new** code takes advantage of LangGraph for both orchestration and persistence: + +- Orchestration: In LangGraph, users define [graphs](https://langchain-ai.github.io/langgraphjs/concepts/low_level/) that specify the flow of the application. This allows users to keep using `LCEL` within individual nodes when `LCEL` is needed, while making it easy to define complex orchestration logic that is more readable and maintainable. +- Persistence: Users can rely on LangGraph's persistence to store and retrieve data. LangGraph persistence is extremely flexible and can support a much wider range of use cases than the `RunnableWithMessageHistory` interface. + +:::important +If you have been using `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do not need to make any changes. We do not plan on deprecating either functionality in the near future. This functionality is sufficient for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected. +::: + +## Migrations + +:::info Prerequisites + +These guides assume some familiarity with the following concepts: + +- [LangGraph](https://langchain-ai.github.io/langgraphjs/) +- [v0.0.x Memory](https://js.langchain.com/v0.1/docs/modules/memory/) +- [How to add persistence ("memory") to your graph](https://langchain-ai.github.io/langgraphjs/how-tos/persistence/) + ::: + +### 1. Managing conversation history + +The goal of managing conversation history is to store and retrieve the history in a way that is optimal for a chat model to use. + +Often this involves trimming and / or summarizing the conversation history to keep the most relevant parts of the conversation while having the conversation fit inside the context window of the chat model. + +Memory classes that fall into this category include: + +| Memory Type | How to Migrate | Description | +| --------------------------------- | :----------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `ConversationTokenBufferMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. | +| `ConversationSummaryMemory` | [Link to Migration Guide](conversation_summary_memory) | Continually summarizes the conversation history. The summary is updated after each conversation turn. The abstraction returns the summary of the conversation history. | +| `ConversationSummaryBufferMemory` | [Link to Migration Guide](conversation_summary_memory) | Provides a running summary of the conversation together with the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. | + +### 2. Extraction of structured information from the conversation history + +Memory classes that fall into this category include: + +| Memory Type | Description | +| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `BaseEntityStore` | An abstract interface that resembles a key-value store. It was used for storing structured information learned during the conversation. The information had to be represented as an object of key-value pairs. | + +And specific backend implementations of abstractions: + +| Memory Type | Description | +| --------------------- | -------------------------------------------------------------------------------------------------------- | +| `InMemoryEntityStore` | An implementation of `BaseEntityStore` that stores the information in the literal computer memory (RAM). | + +These abstractions have not received much development since their initial release. The reason +is that for these abstractions to be useful they typically require a lot of specialization for a particular application, so these +abstractions are not as widely used as the conversation history management abstractions. + +For this reason, there are no migration guides for these abstractions. If you're struggling to migrate an application +that relies on these abstractions, please pen an issue on the LangChain GitHub repository, explain your use case, and we'll try to provide more guidance on how to migrate these abstractions. + +The general strategy for extracting structured information from the conversation history is to use a chat model with tool calling capabilities to extract structured information from the conversation history. +The extracted information can then be saved into an appropriate data structure (e.g., an object), and information from it can be retrieved and added into the prompt as needed. + +### 3. Implementations that provide composite logic on top of one or more memory implementations + +Memory classes that fall into this category include: + +| Memory Type | Description | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| `CombinedMemory` | This abstraction accepted a list of `BaseMemory` and fetched relevant memory information from each of them based on the input. | + +These implementations did not seem to be used widely or provide significant value. Users should be able +to re-implement these without too much difficulty in custom code. + +## Related Resources + +Explore persistence with LangGraph: + +- [LangGraph quickstart tutorial](https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/) +- [How to add persistence ("memory") to your graph](https://langchain-ai.github.io/langgraphjs/how-tos/persistence/) +- [How to manage conversation history](https://langchain-ai.github.io/langgraphjs/how-tos/manage-conversation-history/) +- [How to add summary of the conversation history](https://langchain-ai.github.io/langgraphjs/how-tos/add-summary-conversation-history/) + +Add persistence with simple LCEL (favor langgraph for more complex use cases): + +- [How to add message history](/docs/how_to/message_history/) + +Working with message history: + +- [How to trim messages](/docs/how_to/trim_messages) +- [How to filter messages](/docs/how_to/filter_messages/) +- [How to merge message runs](/docs/how_to/merge_message_runs/) diff --git a/docs/core_docs/docs/versions/v0_2/migrating_astream_events.mdx b/docs/core_docs/docs/versions/v0_2/migrating_astream_events.mdx index 023c6ad234bd..8fde2bbfe3de 100644 --- a/docs/core_docs/docs/versions/v0_2/migrating_astream_events.mdx +++ b/docs/core_docs/docs/versions/v0_2/migrating_astream_events.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 2 -sidebar_label: streamEvents v2 +sidebar_label: Migrating to streamEvents v2 --- # Migrating to streamEvents v2 diff --git a/docs/core_docs/sidebars.js b/docs/core_docs/sidebars.js index e4297b057712..50336c478fd8 100644 --- a/docs/core_docs/sidebars.js +++ b/docs/core_docs/sidebars.js @@ -73,9 +73,35 @@ module.exports = { collapsible: false, items: [ { - type: "autogenerated", - dirName: "versions", + type: "doc", + id: "versions/v0_3/index", + label: "v0.3", + }, + { + type: "category", + label: "v0.2", + items: [ + { + type: "autogenerated", + dirName: "versions/v0_2", + }, + ], + }, + { + type: "category", + label: "Migrating from v0.0 memory", + link: { type: "doc", id: "versions/migrating_memory/index" }, + collapsible: false, + collapsed: false, + items: [ + { + type: "autogenerated", + dirName: "versions/migrating_memory", + className: "hidden", + }, + ], }, + "versions/release_policy", ], }, "security", diff --git a/docs/core_docs/static/img/conversational_retrieval_chain.png b/docs/core_docs/static/img/conversational_retrieval_chain.png new file mode 100644 index 0000000000000000000000000000000000000000..1130df556af2746ed387a9ae6a22fcf64747e20b GIT binary patch literal 92811 zcmafb1yo$gwl*3xc#z<3!GpU5Xh?8(cT3|g!QDyl;O-6$1b2eF1$U=${+_v$x%2*a z*L|$htBX_q)v3LA?fsn&Q;?H-jr zOL1`pfVeoB0?^*f(#8}DiZ0GMR#X;75u^X5z%WrUFz`bN$O-YCM58v|R8ePlPSA|4 zx82*a!^7q`s{&V;c`+Ey#}tYoXx@HgbfE#!#0NNtiWjLfGEAw4UrD=tR|%g*V&r$1 zub}8O8l;)W7_AQp!@)>)w-X3>!A@$w6hFzv!mWY$54iJkn=)Ni7gs^3ZHe-It2d{; zM@rXMmFwyTQ=+%!VfRMyAYewhn((ff8`zgCuQD zoeas`Y;Eiu`P>94{w%=K}s+=y4yJ!x-r=~ zQvR!v-|a}4IvN8l9h@xf?a2OU*U-q`*-4Os;*XC0{`=QBP2DX2-IJZ;U(o2iYagrzM69!MWT-0wLA{w)8$a{k@pKWb|HyCxea59dFt z{v+%Eud3>33KX}uh4kqp^zWtltMWfH|Eeg!^2gNw0mZ*?{_`#bXdz?)mcJKG2)RrB z8Wjpk1PUM_s^SKHl!4%eqYCarCl8`Z#3&KjW1yozq);uJua1C!FhBJ@`elxXd3yx^ zPNNF%QQ@jv=2SI~nfkjcjHO7|OS`Khcf#n2Swl)2*qw`m-*I;g)bTCjC=;CNSVFAy zK||UP?*F*Rh7hSFG=1lZvB;qRkLxEKoza=(|5+?l1_p;M=c1-cnMwrazh5|PZs@U4 zNdNtkp!V;PzOkCj1F)M8;j9EL)S9Op4h6uY41p?-KJAWWFqUHde70L@fp_wHM|$L8 zD<+7mUkNIFUe9EmE0@m_nryV!msf4h_^(iqAtR~q=gIKg!~cLW?Fq&7Ab2NUZx`t+ z-H8CxxD1RyB@vhpq0~tF?~P!=i7^|AwD~z~0O}kvnC!`J*{xR*sdnZ?Qu_F*e!BcR zcXN9m*m4Uz{-j?9_G|VzBRh9m+u#W&2qsDS3o8s=I4&RW7OK42q`qA*0$pNbd!tN| z)zs>m%{i(3ji4;)sQdojM>ig*-|OXnOU=4B+8eLP2q5XWAGjr^l9GhsLJIuY<94%P z-kX8B5a|8(y#+@1!+e``87@Bbas(ZvRJ79)MlWikMpN8hb4rIpJJMyG!NQ_u1ATK$ z>rWEzrI;!4T(&juPu&RFw_-Zcqd^*UJ|gPkqcVu-oYl9BZTlb$IEWL>J9l4Oa->Z=atYtiMz{;|laduo?FaG=1K@8qQj{5j7jzDM5q|jP7EjrIqs3 zZ$cyd6^zNJ13nl0{RsIDG-TN)zdi+*;PmdiWYStozE99~%qp#`$UyFe*FIJ;CdR6g zhx+w}f%=aBh;&oQ5&VdE?cIK!#-Vd_#`L=xC?vQn5xdOIhY0IoR<$hP)G2T^+p?x| zJ^RqEy)ppK%6e$|u5?xNBp%SZDkCIOj^P6leRo zWWk8Fx*3z_#WiP84yW(XO_=K%w|Mbi(}7TO(vJ%ZafYO7-6xI}uY_`s0K>GM8L@9$!exv4bZ6 z(a`6brGfy?dtYYs|ocq48%xJbQF(T|wc&(^BCw?VYS|ym-)^QccIv z@Y#=yCEe&X%CoVe%*CIk{A=-ko-hxtEGgxi6|ukC1eKH&igZ|sUipjn_5uwYT^2-V z(1!ephlf@@HLab-%xjD%K}uCd@i=8l0et{HhSr6ojt%ZX{Y&BEwM+LmFXV%tUdMfL zKRgbycl3PE9sgT4N$~rZw?>$(4y{bq`II=>sA{{qZlPOmr|hShjAv3TG<7SCP;lS2S~<@aY=IRFOO3_@JZ_c#-uMc`^*OuCZ^+8z`Sxq4LrU-mw?6+x#e&} z*F-%qBNd#8_-A7oWI8OaEijq*yQ4ztyT_REK_ zG6hQw-L;DFS#LNRhq?)5Z-)D-cLASIXa-xJ<|ceH3+qxpa6gyB<%<<3r_xv+ zkHy}DLgfkCj~ihHr}nwQv~$XTV*6_Mi1**vif5$>a}H`!Y(FN!iZ4CvuoQ~=9a z#e=MiRBhOf)@Pdxhp4TmYuR2qDykySL!RQe87Zsx`~a)>0(vv_n}>`jbJ#d`8PpHS z-W8MGRI=qa>HwhGH@u0|m-jPZgN*h@*`4#4RuTq&bF^eZ<+X&={_B)ig&M$?;=@+Y z3F{i)T_8a9NzUH8nu@9*iGLMOZyRlk@b^~s_d_I78KiH|s@9yf9ZNA@nvK(wGA*fO zM*G74R>&RdHWH;uqj#Z(5(HLF_z^Xo9@~|Bc1CopNj0Q|8yI8L0b3 zv(|+R*bK2O#+EmF{)Vllx$glo?;a{PJ|$t}&KvuT4|en^Ze#sj9yeR6E*o?5y`PnO>{0wDym@<8G(q8^W1+ z4tV34XZhtV5LkF0uUYyC-URrb#K!n&F)oC=8yPuUuGAzi`xp}PYB7D${Mn(OSlBB@ zP4V9PE6pz0=BSO9wLSAdNTx(lm!@O)D=Y4ZDX*g`wSGJcpXbLpvwfPUvfK|eAe|H4xV^Q zuU-4p8%TBC&Bz8w)p;uAYp+K;baYsH1+bfxw5sD%z4_>_{nBOO)Qex(7|c7{GNOCm zew2CDhI2aXXeTEv6F;5%0C>Wd;z6u4Be4P=3%_Zl0~7t(p<^?O=L%e$;?$<&p+#8JSJvq|bU$OZi7@^%7j( zKAATZ79LM5GI_j1`H~AuQTkU&Iq7f4J`>G1%DH^>Q9!fPt*AYv&NG!U%PP#1+*~z9 z(43w1{Rs#Y(0->)REktwJh?YHp7gyVZB?Pp_`A*B4v;$eh+e(%WFK9+ulhyZPMgfW z;pKD4Q3lwymnIQP@wALK-{m3gO33~RP%@>};NuyBr7@Y;w?SKMeHVjMuFU|rek)UZ zh^K1P>J*`iTk%>%Vd-%N{+?*oyEufDC2E|sko#NDHGMgsOhN>$M(N=9FYq-d*WEKU zP0PEsrx)m^yDuaxv@mL=db)4*@9L*5rga+)Q`DQWFJAxFiz2)*G3~7q4M%jb{T0?M zO8N%g05Grnor7hQa6gutxc~A8J$2I zLxkCL6W_G(TWroy7t(7hm(S^k2&sANTLc`Q6oL1jqX7!MC7>n!ue7bx^-ne$_Jc}d zfrUVDX@tpG8B@F(Bf!V=!9=q>Fb+XqlZsI1LPWr+pAG156wdCrz}$RFAoZ1H&V@x; z>=ix&CBecjU!MYw{{z<*dfu6Xi*sT@A(!*7ZysHa*S3pZrx>1N@h}J@b zc}L`B&xCn3?7R_=Og2rRU^@FF~a->*GKw4Thsmym&JwP;Avfi=JSP7)2Ts*bm zvcT)?@XhagJEnQMQ{(iBgCl%Quome1`oB_BS^T>ZXQ_u9CeXt`DKZ5H@bGluws znpd%AHc&L$9Mc#afT`6f0vf3eK@`Kaz5A8Q{ya~fW;)!*nD-o%O9i^q^oY2tb?xn! zgQhH2b$ z^R4E>VLLOyyh9{3gF{_5-aj6Xw;JSBMJ#+Nu9DX_?Z5Rs^kiHL+Gzs;(lc{)8@_Ag~r1cWny>a)3jZ58aj;bSVH&arvD8O1-zyRr?tXbbz9S&5v6J3=--e{9%sCT@i=8-XPPL zihkIDdx*s#gn2d8VSh?@h_gmWl2+^(W1xLV$!?N6^Z`b)JE>9fg(M`bC69f}yWiCl z5!pN|nR@hX-r(alziHpq=*9NDZ{7OE`nK&v;2ONqkn;fG31ZlC%ADcy%(KY4sp zcT&((2G7HLr&>((j5#yRi9p%IV6pA_V?w!;yj6u;Mj{xA!BkBN%^?5ttR70{A4t{z zt`3XZ2x8kXr3r%uEPI#-Sz^nvE7beI)h!{O1u<$lQaQM62RB|~$ZDCJta*&ab|+Nb{S7?PY-5kPd&ZU6Wmh+m;LeHem8hNg0 zAdG zMbu_}OL|g14Y`#^PV5tpC-U{Llnc;W4c{w*a7uY=9A^`@#avVw-^4nM^=N6{PqPNo z(#N1;ua-EZ$&`T~5J8n{DJayUL7>HpyP3tbe2;Q1CGd11^>x*1&9yIgxSXWmjTxZT zF=_HGQL~(b!HG@?8%cm@ZK-)F)4|+VhHH5tDo+)}+?*(mY4jYEg{y_T`$gKKpK>9( zPYGyiK~yK&9qpR>yRe>fbQ^S4q!v6ZW5p4_rcJWNBPUNiU(d|(*x#kZYj6zjK2zoK z7|D_HDd3tfe$J`saM9Qra|0kDE#=9jh>o4x5es*c8x#r63ELM<^Zl}73N-Eb9BHOp z}WnYv282LS0z-#mb8ugEgVA&R% zr#rsE2gd-MQ{Yxqb#JMf;|ki#UxHjjmGZ2S{bvV{S#z1jFT;RHn2R4jGXn(P6g0iH zuYOWDU6<4>S^N@O?XDgtaD5>LDqoqn-a1C$J7oRX-JwIhM)!k~pI`m7_2dZmq7@4x z7K5N~8$BsRbaMn}f9iYY>gm3G$pwwb;3x}<;Ir6x`otzf+uJ zlXcI>XR}Pgq74RtG)`MDCL4gi*siyhU(!n8BU2B)f-{epN0Sf`?!9AVDxwd=JZXG= zt8G@+F{DY9yZQ-q-|lg}VyHHF@}t~Z8YG;#F3xX+zu2w7+p+!uqsJ+~G6mSpz10k} zkf+16J#2t6p9-@o!{A!KyEy6}-rCi7On+FtcPPF(Tomzq9#P&XI`QymXs0DKpGkbE z;8O}|_t;2y!|psJ+3b7kb$1t)Fcd`oU~b(02o9;QahLku=4CFWJ%{v5i&bXf%~dt0 zo1sPoAkk)}&lxL}8U}+s7QGvX>Rz&l{d75Q>!QSE+`j1*gj@kA_O8UP_M;{ z{^@jH9tq!g+B0NA5Y)>%#IYrBy7y`NX}b^&rsc){d2h-byZ(Ciud8u!5Tql+?^ z+G=zYd%z^|oMO4A=r0&*D9zYq`EHfn_6%T~btz_U-#UyNp}|&d4wPULV^s)`Y9nhkiHoA$2050B z&TW(a*4Q#|?P{;Kd`l4UZsRQy-c9^aemb>R4@-OPQzSVVU`AYvOkAd^6+Kz1a}|#!PEWl3e~qlF)-K z=xEIO;{ogDLB0Sa9^ zi)$hcmsEfwqVP8caJOssgM~bz%B}UNo~;gPX_wxUEWz3av|^qZ4XAvPcXKQ(;Lz^a z$My{~3jADyXY0pi@u0C5Mcq=y#1wd$iVK%!?*Sd}3S&Ob*jAPK@^POvGXBP$!>7Ie zPvax;{xWTTe*Q4>p?%KECZLO15m9_bN`t-E2k1>}-ip@y>I^ntkK373D%W<8hGQT2 z`LV5Xbt7MX94_&vmFcA}Kag)%8^J}6F%%;Ap(UTbG|A0*-)_U-d(UkW4$k9wSWVE> zn$?WxH+?z{X&h`7E_ZH-c$2Ve3c~VIIMSijWkd9R#AVX~u?GCa;}V6zq+L(_>DhzW zU)C4u8cF<@o7`U-i;nY=M1^AlE~7d*W_5yuVQ=mQ67xJadrI=g?)`R zm`MnJseIq+UXs_K%6uWS#nWV6$mM0;F{MYnm05A9`Jz&qS+tgnbL?!lDs&B<1l1PS zsbTY)F730IyRiToPo6|9uCG19O0T0;jc|$1#KlgrVy=-Fa z*uQLx=}C=LuW+s--JL!hg^RJ;B(YYLn8M-$WO|x}AE*Q}V|qhz;7YIc661p|ig?G) z<@HlpCj5pm*X^|>aABNb|1wL1a}3)*kZ??^9&aI`E4fx+-O&;!dqtwzKY-ct{pGG3>TWXE?D+I>MV=$1#lj}t9PczeRjJ65O=8thbL6awD2kC2}cGcYU7En%Wj6n}PPGVAOf*rM{Z;<~Kb%*QZY5PEs`e`*Vmv1(KRz>f%axC2aH^Rl? z`MHN704?W~*pC+98w-d{DD$XM5bewa7}|@BFT9ZYdXdR40#+ec#?(U0H!PrZnv7RP zt3+2mZEGK8aHBNP*ArcE%|7NQUVEt`HjEEm1=dlOC`g2qDk#&*)-v7zPS4KA+wP)I zo$b+u^sJJDe`=k{S0s?8en~;aa!RXlSJzx4LyJ^%7oSv8(Th>SG*@S}PLJ z^-*kQTo?3j!61^1)OlC9b`$cb;t?eyLkr+k`!%g^?P&U}Bb7xqOQzq)$&iN?m>hXu zd{KktsnGt!q|Jh^o`WFv{s-(<)})Y_iu<-4rw!R2XpQ} z(yl#H4ip+9OAsbTF`y&Ghc1ESc5X$IeZugsx)9-bW1?JOq~+|9+t_|bxgcK9s+|Hq z1j(sV+7gMu5c)5!B~+YFBSyp8cluGqTH)e!1Nxg7oZ*+;yx4x+?FMb{J8UcR8cxfj z)(42IEJvv0Q=XK0duFU>=T0LWiE9JTXnY5GY4bE5hO01COE(RY0iU!BOvOv!n004;5vpT#s^49zoYp$L^z8x!(UnU;xnE*r!V zh=u!*YiUb~Udgm++=Z3-g5iz{pew?zi7IyY5aN?__o_>_zjt{p+|Sf|q}|nSJWV;x zzr!Dldx149mQf)=NsRx3kZ({Jo40aRrc+9B=I~+GyAtFa4yMC|)@C&Ren2&AQ+9}3kyW1Zf%o!?eX|E4uXn{7 zj+euVWKRF!3Zz?O_fjg>b=e|F;!0E~QGC?6B(oYodgV#=PQIkPB97sXH+mc%j$$jy z(qq&94=1%!>F@^`RuA={r_@~GkRok*78Rp^9c#$^8KlXor#Ble*!m@}6k_H|q0a$rU!oqI1{29_VM zV@5Cn!PUvH@?m;$-?a26lB#|sy|S0G+$)vLm)xFt;fcI=mR*xq>hdqyd6$;x{|x|3~B_qns^Gt8CP%^p>Df)b|$l*jy(b0dW>{cchMd#`#Cbv+PaJoxGr>deO&C z{kdUc`LRhUZc9B`PmA@@?jioJP>!^uDubaznMaEvtgKuo=Hn#+PBa|+$&LKjoIbcy zq)-BG{VBN!0@gm^rwfX^^<5hP0|@=*2x4bvFB?N@M*qUnG|L2Xe!6;_PLd}-~U;YZ1+7|#e_kLJY7sx>pk$XudBY}`OdvY zDroOQ7!esWi8&Ae(AD?5A82tX;AMV1XGNG{jzGj7-?H(9dBFlnG>R;{g5mUsX4id$U zz+}ZxLjGRx@BNE4-_d<;io0p=@NBIlIAhq^`=CW!nVivq8kb`e!p2>#CYXUoM*icR z)3JXGfy(~eK7O&F)+AFvES_UgcGIyv{tIn+N>oJI(0PAV!%o)$=_K-_PE73_c4EDgUykUwkgy zVj?WX_jWGBdnpw;JqQrH?4{9}QKMD;-DH@NGx~8&NtyQ^cPJ3e4GYxj(W||m-vNE-d&%=q3>Az zF{<)yWzNlVe}(^5!$ca#uARq1>6N*pPrs$5BSjhxg!tp;M_h=B_&4^{!=k023u%8h zU)F#5=>*a}RW^bLs&rO8-LWhH0hShYfrMgXA@7nHH(Go~Gp=>jS4kQr`=azq+gMYF zi18XG41=G`BadBm=C$n9B=@5hJhuVV>|Vk-DVIMb^N~t?6`OflNWPH^7Y9xiA=twj zkJu}X8!pQgonVkv2v;Vds;$CAs8Sp%+uiHF<&sMnW%$kuq{=p+d9jXho!Y1wiF|rU z><>MU6@f%)IkvRx)8G51-2gkeTH%dG7n$H|w1OH_vw@oL)r@)M+~vtMZ|F>^`iF7c zO4|hYcoy<{68+KTr^F^W!|%EqU1GoF>&J-pw|zEF|+@f3Kj-OD)F;W@q1TF!q!Q zmsA~R*&J-QAO@-M`p#VVfLMSQ`j6Aekiy{zFj1+a+4{5iUImr&_yR~ylB_nff6h7U z18OPWs|y5Hyx__KO<`O_)VJ@YU!f5dwT8zk;jkHPlvKz3i}&!ILLqnmoR9oHP@9b9 zOFfx>*GVypEFjak&+A}gw*-TyQaY+E!KuC{GTB4(Y#e6cY@Gg&kWC3SH6!ICwCOwx zXXK&gc}wbZf=^G7Qk6-Km9jgjmuI1))XEeBqceqOhkf3Kfeo))F-}EhWjB2oa&cgQ z<{>A)E`xK7uZn8uc+5kCq2g5)9MSRUSaYBN&+EYLNSknfn3e&0;?lIpoCU z&8Wh@zc}iEI|Du*s%jJWWKJ~r)~I{!nu1)NSlP`>9vN#r$a1F{tFz3Im}f#v9yRu` z%$wW3U(aFM9kNm%3mp@=*uTnvs0fFPh&i)&F`fjL$h3L5kxarM2OMPaew<9rwBk)d z_H`b#ShyLzj`qoJEYa(uN0-SkCVFQ`$3j1!AMf9hmkeKHJ!#+^oqCY&sd9oE!X26~ z3PNUyIdEpL6RF?q@26^huI9@Mu@*$+MsQ(D%CU`rHB!|L>W73}k2G2``%|71iyRa= z-|{R!eSRTZ;DxIcoLY6|Lg~BqK$|#aH1?O_jA>H2`A9&|iRS2bp4T9#xLZu8!SxBK z9TUGOwQ^*$R7Un0>@UceLBbd{ekaCta!mQaU73`s%balUO=J{aoV9x4wGelfcf+vA zx?^HlaPrz)J`GueAttfw7$vDF2bgIyb~7;Sfj9^F7gs5p0n^Q9J+9^hb-f;|eJK~a zal=z{VPovlxIO)o-Lp8MmvstF)2kHiFDZyuODw2Br>8 zDHL__AFt|VgWnvxe*E>*emVKG0;0|EHV`}@fmp> z{3XXksR3FB76jCrvkQnU>$S8Z;?A6!v@9b1-uA&Ky;<`fY*BXjRk*N-OHYlw;p~ma zPK|DjqWgzwHI6fMI^!o2PDf$&Fs@b=p{SK1gLYjc4E`BLZguLVF(!`ew1#IT#uVLY zNNhrps#I27M3l8@>v0x%+H31Hz(_Sray^X%yu_}pn`bw=Qo8eSV-XG5N2ugkexILW z5V=g@`*Ah9!)~th_|_Jx`#EE%6?P-eY6cKzaz{4mE#P9WW6_$eGaWTRBpC~#^R##l4^X=6i3m(pr3^9mXISx$tYXj!3!*EHDUS-I4 zaNmu>hNc2i=n;di`?_`C!@6m-IaO|YmMp|5PJjFSHFR*zj~Qaf|^D5fz9!pcg4aGj=9 zF}U89uY|=J_X%91R)c77YAjCH_)Whd_rn=; znDzKRUm>yE^_jVE)^77P1{^k%C8M_A(d~a1KrY*!#Y8cQy9o93hnyi^0TbAtUG0>Y zv_9N0l2b!G#9tMJ&1l8Yws-It$NhSMnYd;hjnqJ&GPPfW&banC_Sc|e!gCAteRrn< zEPs`DmhBCw$~Ug?Ipa>f$4w2+Z8v!OA7rOVIa*)jso%HVo&(L>k*$}`-ZrVZ<96~J zhhU)=6e}3ex|W=Nbh9fzY>8%7oh$Y1apSsDqfnsTu7_QXP&-KuVi3R;z)~Pla;J_@ z%6~q>CL&KOvyD>Rs9nVe^|TD`I_vlb)&=yVRvfZGB6)4wt)_!Rx;4&Ux=Zn#h1|Y} zTwDN@4O`K8#2uP5QJ?lpPh52nsw6_|ov7QVD>pF$bq>3y_Qq$LC$*_d)C+&Y&rJs2 z0?X}JTlHvbSJv{!DS~IJ(h?FjPu<;#kR$~CAzVFvaT&#)(EH(Dw14rXAa}*B`kL5x z>@tfZn_H9ACB&2Vn!X^PZaEyg%ss(2zgWPw@f&vOJ{i~!lehn+AeRH$?HZ&~ZT`jM+fv7@PZZ(5d7YqBtXbGL0;WIS= zt@V+cR045#KgTNH;qcwU#Wqx-XuH^MA~<{og?hT|=&Deykul%r)Ns2c+*(VgiOJ$l z?k7*hbpeZn42sBM4c3gt zzF%56o!sb+-N1#|0}l>!Ki%zuXzN0%Oy)`j=%g!-i&0fK2HPJD%U54lm-=Is)!_8+ z3~}T|UrZJk-_FP>6TKOKJ2>Mk=;riZA+mpTo=X3*NzJbP%Ar{!jP3;aq}W*|v?wi; z&RND$t*I36a0%i7#gMUDZu(p;8RvFnbC%n;)jyQ5kM=kILyt;WiWkA z$^t{(yvv*C(snBD13JzaA_ZKAVLRh8so!+uY$USN&R%fIql%T;(}Haqg~8BSpL4UrHo>W5F*5nf`u5L{L1h8+N~$Ol0a;1A-P6L2 zr9ZXq;w#|dXBo<5zpW zUU2-&^M4l9xkJjF9AG*{4ca7_tXe~8CmL$`Cuy2Z(x3#HFY8}@kU=MDXKO&hwv$xs z(Y~<~|E7Za!Tm}XIYs++(VgaP`aHBOWBl09i;NZnmy_xzFZ8a~eB1@Cz<#ZL%()^~ zqzw!Iy+y2}1tLqY*l!#$I^cjQph~AmqogC=B*WZaV0K57e~(@v8cK{h!$s$*YB!y3 z4yhfYanEErX)(L&0vGn*Q7ODZR46M&R{v&9^%wOV!GHPgbU!JtF9aLuH|Oa0j*AsZ z#5j}f*_$obPISszA@e`O{K{@Bb1T=!5%;@HV1|g8QMiyfa7E2fm|I@@_c;INp|5B9 z=~Dzh6f`Bl{YvmV7pMtVju?*vs{A!(g?gmveV58VZ1(z?)=H}{>wAA$rGNA2$-)tD z6Xyw$qBZ}Cqcj-_QeI<=!r5;Z)<3xPp(HRq?nYPpQ?wqEotd#Bp-37Do*}m?HktpG zOWrBg$%5Gp6j1;FNAUMh{!Wll!9m#my>`Qznwl+Ea&plt;umrj?A;WpzpUuY>V}D?x@1$5-8!aZLaB`u+{;KNA?nkkc~D_A!kzK%Ej8LNN5{ZY{sX^%zHo4s)j8eE4%tUOaOuyN1$V(rmJ6ymkPs7V8G9%#MXts)e;0mq)2?yEuR(qjhb2 z6%AL=NQvNPE4Bk~Ou-~4;)jA};C3fl4xLlm2>gu$jB#<82}wT~y*gdYn~?tuOqV>A zq`rOMDvA1&O{sD>$^P3!7lXu`*(`+Mk@}e|gSuL)1-as~jLMEnyKE_Q<7#3=j=jc4 zBXdZkE?{D8W_-kXDVq3smJLEvIh3g^uJ@svqc;kSXI`R##tH?%-0@Y4slO@L}Ymv-&2`ltRLE^M*!XT?0Rck%Onvc2~C((UhO3u@y2K z7XdcNbeQlOZWzw+g)}@46Ea-ZiTzU=U8*{2K7WtSFe=O#>cQaOWdj?S?$%DJWjp`z ztjbY-jS_NL7t!UUrR^UhFB6CPEZXUD8biPlypd=0Sn_N5Ya@%1bug)!b>JViXZ>m) z%e{=^7;>d^RqccydfQFNabSpiRfT8MQt{Dq^-`8Wz4F2?#qtxpgWgRp;xM>x4wzHptf{04rNqn{BpO&S$ zLE|)RkcXkd_zZ)=e3t*Zl1X2O_Eyx$EIF{m(FG2bf7gtEm40D+^HbA>PjC%r2gP#% z_hmLJu; zZU%1HtUp=sLqMRTo0U4lMxnzv5mUGk!652zcl7UFQ>8Urm}0M4TzPpI_-9j|AY##h ztA!<0lY18FCpP6xMK&f~+bG#NS3*D0spVGElLLEyHL-ZtQ@q!*Bb)YMf2>1CB$v>M z=V1B&5JC!ZQ0>E#m!~q9x=WnXmJ|2iJXkSY8By3YK2KLy!v||%iy`E|iy?i-&$YLS z3;=y-#?`Nx3Ohy6^N44z0E$L5w6q2`gaGCTM5D_Nv{PuPdw#UA|48s+k z!~e{5IOR6?WO0M*Z?*c*#5f!V61iG0THt(`)C9~;xf`AL!%b9(Vb2VH*)P&pW^kPb zKGBadN26)f6(<~MeF!kh1s@INP)XfB)wy0!c5wPspY>M++X!20vgxTUkFb_m-O-6$sJ6oZ>I7&W8p4 zKMUovVi(?*XX2fih|x751RqT!9h5)7`0uBLVh$j=zxzWw1jdUbKqsVE^B%YC2GSAqqNi2K#R;==l?1yVJYiNuA6XdOPAQQ(PKQ`Bl4k?Z?R{@J_S7c*3vE-aok z;4sr$@cNH0UxHT{)V{{rm|3puo+8m3-O5JyLlm|I6LdJ9qNn9N+dpCaGwshH0)>rQ zzL808vcAnEY+6)Jwl&&i+M2K28X4Pj1a5r6U|KhYMn^GraIqbBgELSQG@H)7eM{f( z=G4?M3Z4LTrW_ugEPAvSMrQj{!$m2C5=eG-nlt0~R;J@a-fR59!@Wf2|CP)S;YX)G zth4@?rytQsP)Ah%Sx?>qIvLEe!=wXK#4;lw(%m`QppW(`{V-0;AB04LcM^yaiu>i& z>q*_q-mk>P)n{!JpP<00+={TEkWr3#@Q4*hisspaJi~5iGNN_+S;%m1z$@Nda*Z0b4?l>a+ zPt)}K0u_krW+0KHA;?N*05?^2FziOAcXhg^FZIB>iig467fC=F>cL^R+yo3Z(_b{x z#X1Xu>XO>XlYjF8^E*;%EX^=`7jTmwfG~$Mfw@KNE}4O@CyHqv;Wgd5P@mO z^o#Y!^VtkxOrh>t_=;r--5tmv<-6x>Sb>}pM`bV?WOzM-7^E(ly_pw^NheD3hoA() zoOQBNV^wpEHaGt^8vh4&h>;=?f@*>ePdrSp^&ZKN_UohPjfo%B>wtky&B!Xla_r^W zCHp9fU~jcD0Hi@^7-=XoWO!8ZfZz1lf0`R)(Dsmd`18dfl5aDE;l-ph9kJaGg~4$m zJV^N8yAbhqmfc8?0@n&`^^-@!Hc*&msiCnD86k+%yuiapR^{vt{tqkhUltn~Gol1V z95)^vM&k1#Z)fk9qq+Th<7`TxY&1yQ68|3-Q)6hb14YG*~Jv~;B# zUh{on5)VO3`VfH|bn)J%6&a%nnLskI8w;@iMUe!bU45Mzo~=6t>yo1H_Iz`@c{ekm z+PHs6+a5$MMf5pP$7$ljWUDJAfcX83z{LP%`x&C;ie|0ZV4daE)|9;P>wWim9;6;v zXsUbmps=>rNX-91nEAmSVTd3WuM)E#tGu=%-s)uS(;qS6HuMPaz)I=UbKidB{&YRJ zz3ezXvr3XH85OVF>e4*@OS=`0#Pf*F{k#|NSS_a2qscr9FPMPn#BzD*2Lok)#Z7VX zS6sH5+S=_|ZTqCgH6QJh_6Ixn=eynf>}-*pk#D&mVu-M$4CQR}1lwh02bNs;!y-6M z@w?s^ybmf7wn>4@`38mME!t-BejV1sP~tMkXzf3(pa=|M#~$Ns`d4T!yz1_9K${^P zT0KHA!Je|bAT@2eLx4}K$F-IF&NuTs~3a>XLNUWzrX5uzAs(=+)oT#b>FpgzACF7!?H0BlY@y=$8SCiz)-^A_#ImP`>60| z;#UIXpa^+kg^#v?lR^Al^lU1Q`_F7qL&7e@5MmmA-=})qPt2`H?7Qy1w;Kq=izG{G zWf~jHV{bg+fR2vGs*zJ#YUeOHL~U%br_`~);h^&Cjnd@fEBZW@oJv|}XX^9oTZ377 zd3n6R1Tp^<@xxg(M0CNc*y$2g#;=7JrRAe(+`}C&PxcFSmI-B=wUX-u0W}bP36!z1 zvMT8;4Gj7Z>gQj}P=e(T5r-Dgl(YRduj^qq?Wt&byX^RVpS_~6652{tOKJ4I&Vxq2 zvWd$HUaK^pwm)2eAEG^OfNo#aD^vv|UZgF5(}L$rUoHObBDpte3-V zK5m4Np5&c9oOYntG%~7JOtX9x1DnY99(o~-ECp-&z7g%{H!9u z9&+|-a_BZSGdl=TS5i`Ho)UGSV_=Byiy}-}dS3NB9sK!Se7M&ambNEIR#tY(ncR-- zB$FP4=FCCj0!W1u&=rBsCSW`!{Nknj2}MGhD=?r+-#pX3z>!q_KWyJWH$e%Se`IFN zP%=x!lwm%G62q*RtTu?1!h5Dfy|SX!;K;XXgIsCE4;pr><*;H3yMnx~)8TZC5*DUk z_~nrjaeS@LQn|AG$Chazo>(2x{0&3tX?-=6D+L}R96}@^*Fss@yGkaC5cZtS?B~rG z;gbftEFtf$tpxdG<7i&VXrcQ(Aoi|h#jDBRzXf3U@&qZj1(8})?otNWobZNi(SkP` zms|xY7%m^_=s9BPl?&juqWs(N5oxz&!w50Em+3I*mXj=}3M@sTNhR>d%fsnreebsM zyz${)R$83vHDLVVy+foBhhCo6UI5*}uZJ_R0TbqQV_*yB8sQy_Wk*6vl}JzY>ag~FF}sI)Z-2^ zO};hH7PB#A9V7ziOZnM*cf^IkdDj1Am_Q-*ko#MlcR^|4&naTO8q|swxRACjJzDGN zXgCKwUJP&BWMfc1UuM1VbSrMm<1%RtsH*GcAN#T1rUmSAftMX)9#4H=IMTKTX|r$~ zcp#?!HJ2}~YT7Hdjkl{*1)V}DpK~Om-kK%oby@T`v`eT87R)x47`6)Y1IUWv<#RD6VtrhoUa>%E7iC63o6C^!ZG@nwx ze)?Pu$HMdKt5F;Kh^Kx^#-gbOW;Y%=rJY3N_2HbgUMA|Vdh5B_htJ|Zm!sSs#Cn7d z2wDv`-`d&E4SNvgaRLhh*j|ns*Jk@XixM%!F`fq#BusF*R{dlqon)umQFZ?oL@Ulb zRJL8nFonL;{eN_QcOYDEx2`f6F=CW3h+fl}LG<2*D3K`9MhOy~V06PMVYEn;L?@9% zuMu^0iB6*TE_(0W?K|f?=bm$a_dj*^`|iE=D$jbJ^;SP^5RnH}MKMc_jS4e6YFjj% zZAv`2&8AN(Z<`zx`;@y`ikT{{)<)yP_IXfTDoEJFvU&6+*?bsuvEX=j8z(jUyi3NY zjEN1m9^H7m`Tyg$jumi!z0k2wUJ~|f=hFW;P5v<^D!v|9XhCi?uWu<#AwaF@phw|V zQClnNceAWK4XR@RM=(;?vKb&pOJArrf^1-WkL9wQ!`YIAqKkKm=h?oIuSY=X3V$py zINVe6`yHZBXpik+a~U$USydaV*Z4No5YSM&`|I9y)FqWv z|AY;g(-1g029Keru8`%F+XT~m2_CYb(e_Ouc;M{TMDmM<*Kftf(XntauZ6d0Nx}Vm zr~TvK9xi$5%o#KxK4H6Toe)u*gXC+t2Uwa{n^VWt)pehWLQ>xa#HWiwu}#p>sM?(z zjyoMF5R7Now>?59l2QHqXti51;1qC5>H-;Na&)_tjy)ISkNHnF-v5N+zI*`1(q;47 zD9Hwk2-esQ({R{ByP@Ch^S9d^#|up){Ft%b6);2- zwE~JOkg1LAj0Y4CyxeL>o0BFVBR(D#vV9_;hk@GE`D`CR1=ka*)Ycs&5i<@mh4!PR ztv~3A&thaQ4FWPWk~n;MXKz?NfQoC!_Rglb_4Kdh=@qzsdv&aB_^i9Y==-Zy9sQ>P z&U?=zUC^b>g#CD80y8_v@2n~BDdC!8i!>J*$q36(4;KMbDqFHNYTjek?DYvl>7kMH zlf6{7Y%Dz30qQy*ZyPD#Nkg;?}Nih4T>D##Js{*ue9 zt`0+aO$bl)t7y!;Xv%|U7!j{fHoK)V7Ax@(1rPnZNV&aTHN zYuICmJ(q=r)kiL{kD&*f4Ojk57QYsL1a&9Jn8ylO^N&?#r zqhQ&D94&O_yqe574X*4Dy`!6}>ArMJ*h6{5SqEt;G<(U$DY=a!47!Bj6S`|)QkWz> zW(st1Rfl7CM3mNj+=}EJ=PQr?#w3StR!^?09Q*HgD;CKyCYpMGy;--&M-U;#mypQa z_?=$ig^s0$lvbx)V&#FFEtX6RQgxU_u>=4b?kKK_6z2y`_gyi9V^uEM@$^)>j3tCq zbiCS=#tJ%YtK<`Jw#|j+@O~?Dx-~wIrs!buoX8{s^Pz`#M<)y@jO8sRsN2m}$#?sVoh+C)1U&0uO_2sJ+gY6KN+IRcQF{>uhg7vVr1I<2fP~?>Vkah z)6CJFF5vDmx?o~vjXHDVbHo0LWqfd=9Ct=Qbf~PDgEsk1gYj!>SpkBUu9JK{35Nla z&_1+~i1LMo0T=8b%7kJzKA*@#!48oXoT~O`Nt9Zyy;g*6KW;ipJ&{j5D>@kyt;){} zK8PWmh8pI_PsA0Cj3CS-9}-bF&B_H8KP&cOD1%R7pJvtfYYW?fn;<^$2oZZvx0`^|&{(Z-hob>?{?$Y=I%ZAPrYOtHv*{~wmPc*xZ_Fc@?<~Q}gl50Zq|Oy) zq&v$sD-cpJ?aj!Ue92NiaQCzqa&o)q3oR|0m85H<+}sO!%XE#;|4)qI`rB|Of{w`k zzn70o2F?HIT6QRXNwvKL{%R%9#da=j?++g^+5=@5CGA5bURqdW4OWJq*K9wLZ3Njb zoV9W~mE#+6u6rfHqSYpV^sgo@fBmQS7VyUTH4VrVzhtk5$jfBzuMKOBBOgawDEi7F z)+rwn3!991AV`_mwkREcEpRKFaQNbuB0boU8OSVgMaYO;!J&m9iHGjUYlI6k zJ8=^0BZ{7|FM>%E(O(&3=;ZI2CsZeg z#8k!_6SJ7oq)`85d~2w{8+gOkIOa9c0m&5Pv$SyvtE?=Sq6zis&))_1q@H);pWeO* zh6RH}p(@&}IR<1jdb~|bjX^s&2|~YCY2R?RM>CCq1Zb<3zD!N-p~?*TxyJrdAr@62 zJuRr|ENCvc;fNG1^1{m?fc>}9{ePEFviVf-%wXtU>9<6oeu`%34GLjx#rJabaAe>$ z#09+p7K3ZgB=7vqEBgC$cw013Zdy^jacBD}F2@G-It8d?yt9PYy}5-Nl4Wlek^WW^ zqT_6DvvNZzCRuPlo0nRTNwRg!YRd2nzr_nB$tOBgjraKu;48bko&0N)D-<9a#dx$Q zSP?`6hQj!JNF+F2Jq-XIH6nhzv8}pyM2|-^CHz&71-GfCyy#e16g{OQa$G3B4pG9~ zbkmird5tLk4<#4L0rD!OPWXAPTM8kWb1TD>UlV8e!Y|L9)1?sX7n$YJ5c(~1Ri}ZR zH__iiDVVX_o328*zMF|GoPyEgu$=0d zCdQP=Im4+Pdr{-YBnba!ghG{-fz&`D@S|F9++QAUVaA%zb}Vo-;}14?By*el{vQVU z8wZqXM8ZlK+o0jlJMfWaM^baVjSO5HiP0VVAo!HlN8!f}U5F%Ja>o1ipOZw%Gx6dJ z`}yo+MVXs!*hj-pEuXjp0eycq{3#(t=){DvL;S^X!{uVRLlyydh=NdHH?{d1n=SIK z@cJLhlk%$8+fw8p3__o^QH-MS5dx;Y zZ3pp?RAo4+@ewCfcE?$k+%rbv?*- zJ!Ek>!p;vu9h*D(bXTD3_TLXd>4+<|E9QT6Nd65AA|B&DP-K^q)@L&FO5S0WM+ykm zaRwXgQ&)DkQoz-1j9Z{4Rih0^^G<4-BBvwKJ^Zs z#Sbnv+U^L9Ap1n~i;dYAY!q@>8xrff5#tpz!O5Fw;*mVs!bOVW4CI{NZ`%$JllVu7 zAS+N-XrURGvkG`*30@&q(~&#y^JZn+3JFI55K*x}%qFduTmG%OL26yIHt@TdiU4Gv z)L|#-e~!9vEr`jP;U4|5CS5DC@nbWXhqy_uk@)Y|n=Q9d7%LkayR$njjK9Oee}8_= zILtji=U2RIfjxfp-uGhi75$;A{^}%K*%M*uNEr9aN1q{A1F79k0+x%3=ojg0;q1=^IZqStsK3 zp{4=9zvzf(iYP+DNw*7eiuRa3`SK484_`fm8OQ|*I>^l8w9zp*f7?0Hg zm~hvxAK|K}nvKtLd@I!)Xs9#|c?$MReAp!kbJ*7L$U58@Co{djqmGOFChgz8JlHX_ z8JxeXTD@_HK3a=hxk`XWmN&XzTl#EWaFU#J#heXa3zR>boxhg$*G=M|fCSldC?FSo z5_{ou4vFAkLyNLQ@AOBrhju=G-AF{Y@<3I-nd&xy-Eg5hH_6t(Cv|6rDD8WGr~7aE zlJk0A8cjB+{S&$4Uw77)N7hKE+>GszKMBO{-f)&e&5;xw-Bx*BqpGQ^TTX?tn%<`N zxp@K?5D*9&5%|ddPbtK|K5CoVpcpB4to2Lqt?Vr>S<2?cCjBB)6JiK#ru4lNAzNRS zi)AWn?Xx^Rv{18U9vFR|Lh9dIm(Pw8)%b_BgcZ%p-uK;Un)fexjXXgn7xe2{D!a06 z(LE!6E;wo%S;~rjL@J9L*==>NW>$FgEF$vOKfVmtKLqGTBRy{@zoPVA0*FLqfvbH7 zvDy~@?^ydEm}Q^|fmGqC;r54WRPMjnD5|W);1ODc1@itn`#(LN|M)lATbxkMSqos+ zLAPqW$yCi4A1wi5pD6`yl$@64N2v_IXtdDsH+2D*(OuDgx@ zyhvcoUzF?y^YBU~;5ezug%$ey-rdoa@4r9JiN33ZD-6Wv-EaBwdw2Y400Ahi>j#aY7%1P7gO2A5WT_{ohOd-(Ry9 zUN^xD`T0ryi>LPQi~NT#FHvGOcw?FE{yT-0zCb_p0pH!~xP2(y4*h_xl$m&>?>(5#jVN8^C~`ltzQi*X%$1?|0iGJ&=-984F{h zA+xyh))Nl;Rao-<0vI!ypYP>zOD#VOtO$)@e$Q@21o$E-RYo|aeAV@*;6wuuey zxTkMgIQ(xD{=yDoHI^-x-LWn|3~l~nuXSVS>0^my_zr zk71FMl?2HrPRph)1(tqJim^tHKa@N@w`UqXYM6$+>m;nidu>cS9T#o@j|vAO@S-Jh zb&Y_b`{qmjF;;RyEeMoriy0ES@31|B%{D$mhCich#6Eb^iF70L@X4I9z%b^`5LkXX zNE5UOHI)AyE-YsWI5K&3m;*qB1tsGE=F!jL>4F6FnAf)?vN)8mtY&z^Dg-5pcxE02 zW~vrnx7P3dU#1xdG#k;sHg)tA5+*G*>QuW1$Xbuaep%;#+&Qfg3bmswBt;vF;m5q-^41i#~=Ep9CKh+thhr+FAyho(EbC zbntIM>(kjlet`>txQDvS!}!d8>LW`4(h56!P6^Z4y{rgXZUVRvolN$dmJEWO z=Ch14K!>~YenI!lrQp=-C3-ZMX`4cDFX_FKnZo8I+qC}udN7|+CGB&9r60&NMp4Gg zM4b-sgCp7J?R);m#kp3qS??V`id9-t*Ii6Wc>i5|Q}_Hw0o_X5&%q&-!&L}n{K4hu zo~u=VI^(^Ro(Z=l=6r25Fwa%CLgCv86ZyNRG}4%LD$6<)8*=j^B_#z#|CTw-J%K`4 zvH3U&*E#hdl>c@;?OyQj#Q7llf!2fCC6W(ohtBmP%9+|JgD}uW`Nji9tk_g)X5;$2 zz#(`>P|~6WoSWG+3GTXhH?~_A8%|xMY zi*sZMqgn0Z1CBc8B}% zo*fcf?-w7uA=+trA0dGVj^8x76S7oy60o>tGhQK59|4mNiI-nSB7^%MO!?z}1TFYb z!9iVJN|wd~<3AuwXrM8jX(t)m5o*pgqw=R7BDv^z+y!RbIPiU4n zhKY`|Gz2`;#8s`HQZLNP@wCbIc6w6!ZH`K*7d9-~E#I7Q+Yvq)5v$@gY_u7rSpz*P zd+<&@T@f-!xt4EU7Nt%V?ny9v>D(sK`%Oq%pD=`8!apY?$#B@ZM#gM2|4LOuqMk1b zxxD65co2Lc)AZ_{r<8ux)JMBp9Vd5vLPtpH=QpR|!&AmG!*Ik*7#$V@BO!{CEzG)V ze@@=~`Ia$71YIa`V!u3In_zAJ(fu?QUZH%Vqr0;w4iHJ(5#ZOX!D1Ml2E36M`>IVlf_5`@S7Uhp!UJ` z&fcwh7U)2U9rmZnok(9ch%#e|v&mdR%u;poCwJdtf-nm#nVwv*U|vCve9a~UH}}(C zbYFB(f&N2+{e z{c(WRrOJx}WmUyHN!K~O*qOCF0sKzrx>H$Hrhu^l!8_bq%gmFQ&-Y=T}t zow9mYF=mG+#caZNLt?F+4qx*7c98l5(;=4jW7jZ)1m%_RqBxaaTiozDi(HX2`SI=W zw6*56rD!)K#kWR5ezG*i`>RZF7%-g0j1AzEDig*n^ z-JW19*%ZvNylTD`R@21-5Vi54nnjyjd|jrP}QVDD|&U zg68+KSV(+S2`;HKTzUgMo*k&3m*t>zo)!7;$Bcs+hFGYfg4_A?bagXkGI{0%1;G;J zG>yw$v@n%g3G%|!d{~wKGEI4zK5Dq( zz4`0IcXjOeS73vNvpES&>h$f$#2Rn)j-1|>=L?=o+=ju)x^H(Rewf{D2H}-W?RPK|o9LhVfh3d}wjc=axE$b};2VOEMdof;sMVvRTev zw~)7Zmksn1XcLYXyl2$+9Ok|7OnJ|vq4)HoF80F$|D?&!m&$;GJ-hP7@`8q8M(k)f z^|oOPoYeMYk+VxbA~zC@Zy7k3_$z!S=4<@(D5@1va%zP8$v1nO9s9D&FB!3vwMLDn zgi*c0GcGUGbr@P- z&A6Gx8|8`P-+(>4xEa2&(;RNy)Pm%s5g=$t8%lm}8x{Hsms}dM!iI#rPw87YdsyOc zHglvteXn*$PdvLgSuB;apLEL#d}IU0+E<{i2ibpXvY!~( z%TBD>!oq(!x8zbHkH$83E|ZZQF<($w|w#IqVprgFL|E=-^dd-iqEc5~CS z4zLf;+zO_R7tPkMO|L?ipwJK6u7w{xfJ2%T&iP`Y$gX#D=jHnM=*S~O+s!i7rH(`O7F-OS?(t$v?2JGfIO&W*W}x86 zM=}b*&>qxcqB`@eXt>XIJ#Ac?K8C3*UYS%OsMCN0`A{QktMJGpKZ@tcMQh><%Xb-H z=)dOS_Ch}dCeJjE4jrl)rGHfhl?Djey_L0x;yg*IE6}|pTf<$a8Z%oYTlrNY(qIFn z9%p(vE!%W@mTh_X4UB0-J5|wqM7!N$Z_+sx!kK+d73z%ry)@g8|HT=-OG=N=SMo|AcInXev9i+Y4paeOGi(EOIuVloUM~d8262k{* z*$Qa~EDrBHQzs+w@YmcZj>yOpD67DdFZxIgt{MhKS0zk2>=_UpN3@g92wA+cC=((G zLyqN4lTvXcYATt9WrcVv5o`)c6JG)4x2btsj#zLxLZ&AMG4ylI9!HT}dy3twKz(1mD zPFMlH`uv@%aQ<=wfS|sul67>iV_IeE#M$Yb5+2V6Fa2gM6xopXlQK?r8w^OhY6xuP z4!0U5#EiblgWi7YGsqj#CIlK^E5!`ktX4>6iH3ar0>(SEDv|uZtMm)XRuMPz;B=Am z7U|6jxYO3iwqM)uWy24M04J^+RN*fPfWg}^sd}tY2m~gH)hBMnCe_Q#mF_9mIv>;r_TS;*33G{< z-~C47sXesJ5cR(CMdwf!;1n4-yuEKtN+Cjx1v~Fo`*coSZwR>DOa+6?gJe>{^B$tjP24F{5OeC3;lb))yARPO7XYH+&ZMIGtRaxrbKyvIVQHi=DM+aW_k-06hi89F za62T`BDDKn<102UyxFFB6qmI-NGJEd-J_t&x&)-X57Q|T0rX;cRA2`D z^QFjx>7R1JjiEphE`)aW(}L~%xqEV2^G5)n>I~EtP6B( z3L+i}g~MG3b@Xr64n)RH5{J&3bd3tzla53a(ynvZOMr`bA7+XYeLJL6KT`h?+ z;VrYLvy=t*NB9BcC<0{rCBFk>1T0ih#kBbBEV}j@IABczu_80No>#}SE5jR4`-WmE z;}<6T%lt%}bp8~|&bl?fzm2>!Gbfd69x1yghOKGVV9j9D}s)@fu@DmKJRP;dfYDdf;eMnlkR=p}!XPO>8>R=&Q$<38THX$&)VWlwlgT zOHbwAfsd?KGY^3w2AXq%vGV09J8Ck?)g$#IFj~rHaXk)OgYV{2W6khB(OK_S`77x@ zpP=FU`@>)ErZv9GnU&JJwH5eH#Cpvydb#w#?@x9kB_L_8q6##h>N0hkY(H8g-99l) zBbk0iPLKl9c~~f@qDD|pwY^`%4oK1DqBUsp%UWoGxZ*N+z|+wjthq0a2PU6tAub{z zDIWCJEQ_wMhg+1w5QP0;SJz*P@9{L-73!edTnk1cN)WA!r9^Knt8begQ`FCn#@d@Y zqH?wF@4xrsM9Y%S_~%lxYez`_TNU~3CjMo@o^JtRYX0(%7n^M8W7a+^T+T5}^%f(M zyed5fwx5IlO}Ho}uPc}yW#aqCv(&l&(JH_T#4?KT^;ysVMXj|=IaAzco8VbE`In6_ zgW)X7lEzBWjrs&bDe!vc&eQ*PF8_u~cT{j62nm^CZ8dCqD&D;t?SBg`*;z1=AZc0J6lrZe)B0iWC@|akjb#Ebdth zUUS>iI=i0h=X8x?%L-8$>@T;)z9ut;eTFqRc(HV<=fRq{-1=R(?pP?cyCeC5z0zEB zTVLJ0(_O71MZw^neMwak{QquMM>Wv=RrYq)@#rViUQ@A!Xr(3Dg-1&`46N zQWEqI0$|cp&e#O^(DLel_}^;}%jbG{$1R#{6b-)vhem-(@!c8*sedw?_WUkse^W6R z1BgzKYd+uBaVFJBlQi6xc_<+vQAMs|Zs*IaIICoysG1Q0wH@axi@*x)Hm68B6qHf# zEQFX7|0z(ntc#d#Ja4f6qUfkb$>F;0W&J<#V6-SVF8Q;Iuge8&>@AnN)xqCC0s!+{ zCVGkOKQ`w9_678J*Bvza*DsAE{#<;sgfc|%vPma%E@{1 z<2I$dq6+VO#Jqrx>*sK}hpt~CYU0%=y`GPIfg>FbUu;Y}nGKDdo%u>POh5N2@T^PR z8uKYQ@=H7NnrgV%a9Vv|nHLMpua1-cn&UarN>>HnKhL zr4vK=gigVfL>~)vlZ#B5bh0;uDsCWKJq>1zR+-SP-)RL+70HDxK&0QPVu24~#V1vj zC7_CxZ3lgW6PC;?d!a$zvaDEG0KlyEi9I_m9_RF4pMd{E z^FgbwCN2h=htv8)Qo6FtK%t0tPa8%IH6IO6Kj})!q-=JIy#Bi&AQTxu-~IgerFfap z4VMQ+t0nEsYj$<@r+b*A% z=sjrpO^x%E8yYw&f+pjxw~xg$g|VGnh%QwcgB+_*(p;Z)j6ojaN5K^Qd>m3hoZ9XL z0M8#;Kx&ift~ch?aPj2QU?WH36Z`h*aD%kp9QC4EMm?_(IZ8<_S@Zf9`~Vw!Exuo! z-17Gv^PKh@%rm?ATfWyLk-JU2qVl-mq}%DUQ^48y%gghV3*JI=di8|+YWl`5O5&?I z@eFBO%Z+g8d9%#9LVWx7KZjw5vG&b(4umA$wz#M)uO5Sc&2fJ_IUb zO{s+gu5bT5r}MPm(Wh>aX~|T#9x=n4xhbxX*Rr8W8Z}S!{@fsNlJ|E6}W)cq85yiIibUXMKZrsz`+i2=uGc*2aucCMe{<+hY*SUh$JE7AH!Y;?hQ+yv&`L4sfGh0&*im z&92L<84{T!$NH0{*8uP8vyIf6;r#u~lNZ4KHW3G85oN8J0jGoMLsSB69NaIQx4&xn zy{f*tJR2-azgU0%!Es~zQtEUiyKEP345T(>d{wL-WgBgbPLfM*MKX%h$f$=y9TDOO zzjX%zmV3%1f9;FY$#(4FKrh6rO)!nJ#_>!x@RBf8O6l6b|A4!Uw)eh0^D;iTC|UEz zMjG++4TSfTeOtPNPl}DiS$dj@6t0CHWZltCXg6))n2%Fo{`o=7VXVBeyLF|%mxzP3 z*SUi&ET`^hN}_K41;3q9gDDqsr*F%5Sz=Y)?DWa=7w?~WKhv1?J@cZtq5li>FCPQ{ z4bpylz00046u?>@hQMx*IDDTJ;2=J0IBwENDB(uBDH5Iw!+`*JF+Ow0`e;jK!MjeRxzk4@#3gh)ySgJ zrPj}6Tt2--%!cCe0+TlE@s(eYQttTr>8qXQ}j5CxQt} zkV{1Y7i(w+1X9U>n3jTR0b3c)RX6r3?~g6@lkD)_Ob@ige|U1V3l!-^+`Nc~RK`l+beX~5a{0Chmt=k5^>)i%4_O&t6Su5g>i#z#ha;}$Qc(p+8gi=V*S{S$O{9s1H^jiCy zNYKpJ;&x`A-+YZ9gNn8Dc}f8D9gZ#wR>9v;pp20V`*~~JzU=Yoc9={Gir;eNCHF$u za2udEOBezKXNaF2k?tISr9_xfGiZ!RYr*%a4|U7whvm61+`nnjC7b)oHHA~QTSig_ z!;z@zA+k``Q}os4)>WG6aT8s%*hRqAQNZe#?C3{;mmf-+&yfO@iFWNBSAdHMpbBOG zyIpj3TJ$oQT=>g-k&d6d6_2WQ)d+lTyol{9N3Bd~1Tf0AKntHP`a%iX!kamUVgQe8QcfG&ls`_)FfUm4`u z^-r?H?S}HvZF^gK5}|A??*=>U%+AI_yHC!SWg41XXe9D8AFu`K+4tLlOZU}bg)Ij~`^YCw)BKuEf3D-rA6li zQ>DqJ@yN?C@>dma_HM&;m)qM9N%Ve+NlIUSUrdzQ`mvTH5=G?w9oZA3sQCC}Q z*6Yr#;;&Y0dS&$SH6r`<6{S0)hyy7=Dw`0?{WtK{iTx!P=cD zr>=M8yf9dmKscb$OfB21JJ)YOa^DD>K3@`9{i)^eS{b+u(2*}qY10=?8c-{r5=}qc zk%qmS?++5lq;vVs`0&2G0EUM+lS%v6_EDi{-Mvh(&VNR-I@Z7}2DaNeI*ny)5Ew6! zp>FWQDpXi$%r3JMMTF)s@G>~P}}83`WM}L zW|_0m-d8N7UeEXwja}6|!e7G>U{5936OJcw|?wLtlWEDI32!q+s@1!v@LPe;&AsBZstZ5Ov~9#>pF zDB?9~@JR9}#kJelFnNry>4M-hn<12hTEPPM|Mk){`kDWgLV&viT|<%+Ahp^zo;lQE zO{-Z%ELISJ_xJ$PzR4TR1vSq?sa&VPd{60X@u0NhpSOy8#Mi9Sz4SA&%Yu)*W`c;mJ!R@%G()_Za11O}B7aatR{{J~f<&*&9HZNave?{AM%XUJ{Ws z8;Nv2o;s>e+z`wR)=oc|Eo|zDKO#0CAh|K^{ps#(MG{zk8sD7FV~BMekVTdDo^z1f zq1hI?y%|us+eVOPkTM;XVgDATq~2VOTsqfSi&#>-{I;(ir#1$USfgG-G5{92%{V90 zy2Y@Fpe{N-yH1TXMQ1UH+1ZGNbS>(O^-t=FOB9~vMTxIfs;WO!tt46elGf_7+R`JE ze|b9-I5DzI7)$@|)@t>$!jspDvV!wOpX`5}>^@XlBWFBe&=A`HqeypR9Yh=uK3#F1 zdZ)68G8@LDd!6O+-om$LLEMX@kHj#A#}e3^?Y>R4B3w?w|MO+8OVR3(De?vFfi|+6 zj0QAm0FTE9v+=-uv+>M@34)Jn;V?7cgc3W4&UDN4(dYx0GO?+jbD23?l`ZyzzO{DN zA-i-<)l8U#COoB%8`fP^j(24Q1qX!3vQl1sUwN6mCdvJ;3ZVIM;KQ3>`jEFQN@8Uh z*)e_ji<#Qf?!zW_#8}@kRFTU%K*lagmH&DP6nk7B4ZCwlv9Q{YyUJKp(T*lXqaYra z_?C;c^`5jMSfcriGK4n(><8i|_)Yfci6qNPJnK8Rlf}dsgvW2QW}9~>JO13ammhqe zczefe9u}{-`*P(B+H)7<`Opq+Zr@n7TN2}UU-Ly~@~$60mXY`M9yJ|&3CGSdbR!ye z+jdXpVo>HqD9fow7lX1bqdV7vzMmN>AvEX+%vQq7LU={`-^mvv|6IqfD?az7&+w`P-P|?k^Kfd_OH5t+8oIZcl&T?Y`)f37VoDe1# z>@rq-p_k`xg8;O;GhUV-(6e(=S59Z;gX`qL_?YD|FS5-?5*vNB=P|vhiO-hRb04I? z!-XC9AZ8o440mciQD%$Y(*>M17dH+Yq2qlV0AyjEn{9whTH4yVt5Lu1@3Ym?PQFg! z%Y*L_OU<7Sp2O$3;S=9VCf!f4=ba4Z4F9;tnPT}OU2d4t9%Aq?T~(9$m1$&`jx zja|I4Rk2k8%Ot0eazhB5md_?OMu3owR-`Qfup}E?5cDSgO(j~2)<3EEJ%DUn(CMvr z>rIDGlLzLaNhZotNe8WXQA%VS39sr+*TYJ4rO)-HnFI&gv-)5+E;jU z5fE0eN2V?9y*2~<5|CVm;X)Z5lkN<8^$Ni2XG0kW@JVjRg5_F#J|2Mub8O z?Eyu#6d_I7kykHUt)SO;Dj()X!gr)#Hq3{)x)6JyP#H}a9olD-*FNRog($#N7Q7g*EK^AZDA{1!CO*pkC(bh`aAGsD78rp$!F>lGdWs7QF6+G>b=A>kmWjX+}IH{vz&og9%>044!;UEicUzm{geU40|bvnW=9k#?&GRQfJ|uHURX;X_kJljrnsRkxLt z#kI#Jtr>A5)==h@%h)Gapzyay(cr=--H*fN98_+65wZqO_28iCY#!*h1t)Xs1eV+9-@4A0+Y3v z=|$|?en)bz#%}Q&KplV`m``+Z`%j! zphu+EkWB!MNp>YzRD)5LV*g(1(30hFU~IqW-@t*1TCcMK`=z@Pa{q8XM!=HM6L4eOau%Fsqw*>jzI9OKP%9TFN1FD2;9?Il*Gfmi*5z9y52 z*KiZMpB)znv%JtzVS3-a$PMhFxWZqseazeBw%2Z zzlFR4m>C4#GMt*-am1?O!|ocynD5NBCg*}*Cd1^$pznhA$qY!cli~P*(#*e}U?1!= zqS`HHa$}lV92yDP`S-n0F>^m3$)No?B7CtDzu*6)svTU_?Hiuu2RT6@9Obj?Wxbj_ zsjr_{R|V2htLz4qzCVz%CA?rdRmTLdV{WE#!0yl7$lVT~@r%)>z={Ai5A)-jI^qpl zEt^3{_Ja;FgF%GyS0rgL76y9OBwutfT$GrRv+zaS0-)uV&J($Kr;~n*febHZV1)fl zUSr0LcWrfkcO56&CVn5IEnZar?^ysck#4glN{F015fA6uxLm}nLH3sB2dMuQ#;v)p z{fKcZ-!~2B;6=O5*aU6bvOk*i>>Z+BIC{Fp#dOk^F#R&$PNAdXt8Q-F*L-7t8A%zA z7SoM|QYLJWWIRj(=}d0k)gp**sV=DX+CKp1P0xXItl6mSL=m@lbUGZ7ZadEMZ>7rJ zG4Hd@vH4Q1gUq6Fs@mCnW36+18q;0(s_T?4H?_7au)pan)49!0Gn_5BVYqfSpCbCrnEm(iq$YRO`EL!j) zlCuwpA@As6hEN)&F6$TYm?li^Y=};nAF&3nW|CKE;v>_!OGVooJqVcGDtZ9_6A^K= zFX{%dAg_eHxgvcDj%QfVt~6dr>xc3+^vH^Cr2dlrw6BQ4e4;&j2dLPd(`Kz+SE6N} zsM=<*#I3D6?<`?sh^I)!WePd9@L7UjCD@&9582FK2~qw9^#-yK4r;un@VF!Ajs|{( zpK8qciy`RXH=ya`Hjz?zNsIZQJqaUsjaPAbI#pN$f;O)ybMNs zFbyjCWYx?UD8D*j8CET-jn%XoKx8;cY&hpyljtYOAm5q#9D~8dvdeZSR)L8m1N<^~ae zoBfi#ARYcKhc2g@r9kN+cXnnh>Eg)96MZ0F3!GA$YeB@noj>4Yhv8&MNG=OtZ-fC+qXf3 zLA#tn_{1cf5FnP_3Bz~x4@E9e8qv^ogBDdG7I4}OS@Z5lFo#YXd_ZN+3i~ZsY@WeF41vf((qhA9x}_Z~Lb=+KVr?yX!$g@w zX^Bm}8b#4Z>W~Q0_AWSaAMX=+UTQ9IP!mW=Jx@t%Ui>>1FC#up-1$< z$6Cz1&^eJ|UM)aW{uI0|!D`GIpqe*LyrtA6o`vQ7O*tFG(0#rLGY?}REsNF}2*D2) zf=R+@W|ZFa4dZNMvZOKYjM|&oIqf4jiRgM#!LO@s&|ThIs7^S0*Xx^v#OYrH_IZwt z`FUBB7ds^09Z7CEHrfBtvPBZe`BTX)G)p8Z^(w&s_9f2Jx1QBg-s6_;Glfv_8dFLl z)FRQGAO+GiXum#(FO455Elc6npd1@G^{YM< zfq26+jLl8vS+6(#i1y>jy%d}sdroRDqBn4b(2YF)BaE^9LwU=XIN-rOJDh?lgtl|I z`LO}}8>?nvi9%k3H`8Qb&nUScASxSU1KFr3_}d6!LiCI~jCpiePRSy8`L2%wO0Pw_ zp<6j^1?3usPv|6?rjye;^X7{pZx zJTv=Ia`{>%ozcsvrHr*UV0V^W;#Y0&o7sn7e%_*fc^7DBz~JnXy0`P@X!)tes4_pn z{q+{5#8jsk5X5TJrChA&LaHz659OBn8#|oxq*r9wWZM&>MG@978B_E0sF>~#EYKxI z+pT1%z&H;TMti-Q?jdVsS7O7=IV7}?_#0r>{*4;ed;?!qnoQo(KWBr$wS9rwft>g* z+eA%Ougm?)%#jX?1nt}KK?E&Dln}4&uKY*oz2si2F!?u7wz3t2F#KSsHpT=jH``

ZQM%_Dj_-|fSARxMOGbd_4zlwVk2kVzBe(Tfthv=^8sa#iC0j3j%RD>b=sOk@ z&AcoPA2r7{%{ZKJ;cJL2#gWgoXqg^MEUU(?EfmdWZo0pB$~TAAfSLEn6a`Nu43S~= z)M=kF)o(mq(Vfq^GwV)BNusEL5&O?A-&Jor{HBPkS6Pq?AX3nzG;bnGLtrM%35Wv@ zf+$Ix`h351o(lOcNl>xj7*bg(EG6lV^U`^V!ps?rH1_L5DG3$sOdCnYB8V|NaR~hB z{_`A`U$bg4rve&k?!!_<9(jz|D5~W=l#Y9=9+7EtiARx*5b!1GWmg+ zQitNWvYU^r#Dfd@!ougA zX4bT0RR0t$G+#(`FCnMlXz=|{Uz*c*XohvcN2-$rr4xwFBD{roulZYu(0MCQCMTod zE_Z$Ciwuqqe}as0XV-Ze(q^b;+qz%w@UtSVysCn zy(V}|zL1F$hkNO;7)1c4Uua!w)T)l%;1BvJLJqxUA&h%Jd^>MTXle8?zQhc~y^GCM zlB<9fh>3~N_oqhiadpOhZ9jSMl)fmA*87ubKVHQ1{PqIPU3wahjUWv~U!YMz1pc=l z9MU|5nEo8K+;vhjY;H45DN2GJp6wQlnLj#u&7O`|fx>oEyHc5>hcK2F^{ze}p(Ok$ z44Cp6-OuM&vct5F9R0fldJ~#ZGWT&tGj2coQ|U?@f}TrN34SIMc|)_sq|Vxyp_oG+ z7w6HG?sqBX(Q0M(T2!6^CgxvUK6z1=wP_5IV_>Ganz2xpbD0kGnOIMlvspDjl850IpX5u1LaC(9?Y3yCFI$sMkwU>QZVpQgv}UW#Ge zB*BD=lqsYnH*Qe*$8tB4*QMfj+>zx@_f}XCW^mqfm)Mx(%GS4R;(vI~E!#!QwsM!@ z6OEMcuwj176blrlf!TY_A^)0NHTXbfVVRyJu*Ix*RJDOl*f3f*Gv(5?9p5z~dtnyV zDb+}$>!pHh^x!)fEc77nsS&kl9_)N+1*?Pa-PM&Bv8LfESpee>xkllCtA z&#M6Iz>37c#BE_nh2(^08i!&>c{+>{OgK{`nY-e8nCw$^1Q=N& z7=b1@Ch6)=Hhz{Vs0Vsj{`4S?QegV5{_>;7Xxmp4Vv@od8<$pcPWlIk@XMwL0%6b? zc=;GRr3|UVZP@)qiEBWoJ-i4hUu(;-Ye$hi8-wTW+7fa!de%Z@nH78nfLP1Kj%)``BN-x-CU1jJ=F0pe)V@ z#Ka2=1XABZlYOS_4Lc`~0*cTuejv6UBx+LAmP)IuAdtp{RAPA7Oy@0fOy1uRWPkOI zlRxz%GW~-TpAvN0$|u#VGgNR#s@Y(^T6B~EMRIVh3Hsrh5H5+xbtc9~vOaw8o3bl2 z+#Ke_k-^*(WN1msPwns3vLTivO@d=$U4Jy8k(10lMgwK^xRaLQ4QN$kyRz1|;;x-k z-X5xqm?Q*~iQ#e_L@b3xxjO$gzdZPGcXbA z&rad7vmD>UUQHo#B@?0B=l%w6$YcbIX9R=z=hkNpn41SzxI#Iaq3mR9y3vLl8t=|W zsBraL(C6ntH7f(Bpl45z3LB;eu_xTwjnBP5@o-_~_}`+Z8TvurGDnb&9rnimnTU(Q zQ&D~)iG8B79|xaLB0p#x%d#5G;r?QDY~GMq_|z@us!vOVDkEuZh>5(9Mby#qk(NQ# zpYa@^lCc3<#vYK6K_CUG*t>xfOt*9t;km18G6aKQc3@tb4CBHz*gAg09-UgJDu6mo zJS$RFrf2Ml?n7$RUbCM)*@~zF)@GwpAk$%`dFp}5FqJxs2@Q0@IJghYVMu(*xS5*n z0XTkO%C4zZ$CKH$8kbsTY?@H)+`{ zE8qhCHh)y;Y>a_MTx{ecfMm;8Xl_Dly@hy(%RBWpeFPK^6wcO;dS4RIh>#%NCdOM6 zFmZdMyLBt!Q)?;##zT=6?Pg!^-4=YM1?%N~Lg&~ao#M7uNOK$pPN6sx5qlUcbvMc2 zm>BbYoNF|_)^QjbDUAaR$w35NfnsE&ha?9Ix~;AszYo82`Rk0b!{$Ek&x6iX|06=_ zjKe@N7(LdC;u8Tmh2%7N(4~^Et?rrdLqant>hqgvM#y#l!64N!Ax578tS60v-uFDy z<+Emkt4-GU5?6<$hwsYRo)bwk3Den#9CUJJDN%8csMwJQ-uyAo)JJ5YvSci~K4>hfk0jbd;+?m;u`7R{v<7k2CO!|hg=rp#M)tfwC;&Xb4NO||@d z7$$IpHd6gY0H3YJQc9$7n9Egm9^H(;c=Dn1X5{ay{lpK7W(5sIexHwaW8GRqds|zF zm5Ey^c}Y8XkM=3e+cxO64`RgHu~r3tgbJ)*S;|8pZrUadej~a0iaP!r8A=JI6d?mH zd2VU8R7Ko9R2UMqH@QfgQV%=nAnn4*ctrQNpYwp5hlP`Y7WTZg#v;F2<~f0OpJqxZ{Pr+VwV=^*`|7UO~P zqaR6vKVpp&TioDCj5L+kA#?Rz8>|(bfwY8o^V%Hddn2CI3}+i}(GQXOAZRpN^nf<0 z{lz<_w7GlR9gyw9omG#k!3=CtMJ)(o7PHOXkQ}jN}gg zH&za8$rMbdhLJ=o_;sn+>peC1X#9O>S6N#i>>XtO16B-60DbudC|->??K<(e%OR$G zijN_n#@2PhJFGRLn%6)vq%k$CZV&H)?pQ(}$ey;eX3l3MRW7}!AWG58Jq#0LX}wv| zd=;*}6rznGchY$lSQL zU6L{Kgt}jpRJB5!T!bAOZ< z8YaC(;AR0kpXzssT03PTd(_k7C3IB1`Q5$4K7JC)j8cWU8Cu0a1ZeR0o(O);CNU3N z*itZ`IA_TTXuNzOu+hSJk{)EY-mOO$^?GHX4TlT8ug;*jIc0O zp)Mv-Cf3oI{Jze}*j9STBN?EXx)?z*yOsq_SQhbvx59$uc&8tD=dO7vDLB+ARf2*o zF|Mv}3?RFOtImnPLpyBjVZx6RhL_{9{45GJm6;Wgmoo(YGpge&K)6P)`K@;P;80FKwr3<7vxhEr>RQ z*n*y4+`_IUANp>RXoVZs0%eJ586Bu7Jd{ANa<9lmiZ>$?k{>318dyh5UpwTz!k}1m z+51lab*%YSZKcY&o3T+|dHpE`BH41Qb$Qogk^Njb)2FwX`JpDq$tOrKq1mCPwF9e{ z{m~h3qM~9-;m1Luey+QjXPmALJTE>NbdeG(AUzTV=ctMRh!upuM9TLL<}iBvBmpFZ zLoZvEk)g;$VQZ~ldlDe6;sZ(yI8}2-@DvW^x43_9t|wRVCwUJiZC1CMWKVN3m|7IC zc3u~FCAJPQ%v9}m(3fbwFR*y}a+&w2E|QGH?ZXuM14Uf)x-LVfvDDWdgJ0QY!q~pD zS&10>IqVISoMe(Mi6rc_jggGyzi6!ph9DxuuAD{hUN)ysVh%99v&vhr7SU!uIq2yT z7v*!G9~_aCxP>WU1Tw*naQI7qOOQvtZ^Sp5 zA?!UDTsZrHK)xOEV{630>^4@n$WuCSS*N>F6v{8@Aln22H}jXcVSpe8^+$*vlLcgL zR{ECvy5yHaWS%u$pA%IDG{CP?y$*enna1l?A#kS9FIw&sh341kYaJc5P;P^}X+~s# z(2s=7kg~Vfimx#IkkEN1MxV=FVi_O*L_L=3diUMt$COJWfuF^#geBI|`-FPog*1j| zS2Xt_GIm*yeL!(vBSsQd_3#3f=6B<&nrt{6D&F@Rf6I?{8qBXARaVs(i1c!#vv_hL29$tJzc^v z_c(u82|K`}M@<1_WVxgp^=VSay#RqETrd)N*k`hQLgaT&JC19t8&U2jLzGl(!u631 zw-N?nD!|r@s|JGAQ9Q3*zgH5(JJt_$&qrcMfFx!{=^Tf>4xZ#RWB$GZjzu#rql4n@ zb2RV)jd^8$p5uLVj?7sVRAlIvr zPMxOqqiwMzVBdN~Y@tf@Kn#mC>b*3pb*{3Vui}M^D`Vql`H|xi?22@kK<-42ZLZ8f zb1lI^w}~=NI(kYXK+ou&w^Ch)ueG8l_|G&GS!|AEO@9TizWI6dkleCZyk@&{_V#$F zl-M8Ta}RS)CXqZpjb>BXPyoZjC)5V=Br;lQPlh{FdyWtTE+V#t!}f*2d!^B7{9jP?ol0qlq?7KL{ zW}&h=nb(zK%!UMFaBgp==d;hwIY_Ebchm4ZnGTNAWu?&jvC7>?V|hX|Ng$9U7fS{Mey*T5 z;x_A~#l(f}f-NZc#n1txe$jQ%U8i!0J-R7Q{F8@?3=1=b*XdF`;Ry!S>05pKt8t8<&y(}3TX;sNjA6Lg4yzrR=C?m)fu(dCqx3IFZ}$WXd>JJLdYhsxeFWc*?NP@jlRMT`e4z z=LoY20|gYN{Q%xLb<-0_$G!Z`rA+_V-?%JL8rrZMTjv4mq=C@u`O4qCS-9*x4XQC0 zUw|Uvq4Uk>s)C^~dX&2aJwuinNs?F4`1~mhrpMp4fu&(iNYr@^6t3MrT7@4V>PtU0 zftuYB+jU#oJ3UCjH>9FIin5WCbVWG0BrB#{(S$He^gU#kC>6I>)bD{2(^H^(PB=nC zmFxBlxcz&Xq*2;GSJU%o++rPwC9l~j9LJP4L7#vx6o&7%!a)0mnUX4CE7%NXDa`d` zV4wJTJ~P=6Q-FgCH*A8=E1`VZb!sGA=b)+|O1xZ-QYBqn=MkfsFtPj*8~BI`?X+O9 zAxTkrLH~@~S0C>Ss~ldf8y>@KC!y%Hv+7Uv!fW_KVUB#QVL%3zrPSYs?DWP;}VM>$8;Kk4v%#0C7IvmxzubSx?}%_3Xr#q>lQJ znqG3Za0;mZM|^MMln#DswO`IowC}r-HN!+^64u!{g4zo8AlD)Xg-Por)g?(3n$*te<0_Ac^wrM z$4n-{|GDvUjeQf`Tl?=6fbI^PhocH%qt=P|i+l;XEKggdh!iPfnt})yL8g6&M=NUm z`P)4-g*FsXvI*wVsx^2URCWhG1bLaNc{d9uuMoCMb(%Sp3{VbvIz8y6DtH2%0zzDR z_%-`uFBA$$8G-yY@sGIu0JA;F4s^;eokXcWHtka`v|gEKMVAh`=xTYLt;O?opjJws zm~yYj^gxl8|0!Ic7yu5@79(2Jn1YyXgE|aA7lYBO zlZBL^af*Fl$KZ3am<3YF6Z3wa$$U>IQz>*9DP1;?>4mWa)UZ34XFmwIJs8l1hz5iX z`toJ!Jw));M3MByuf0jPe94UZ2Z!uTD%r zb6b$oj`<$ShA{0&ZdI=2cD1FOvVDOP3BHA3N{V50v{kR;l0^^^iTwz8W=O{Of=O-!A!8-S_6Q zN39`}nH=k&7|fZm%nB-pCn)u)9=@Na_Co~3r|B993Z*Ht#lM+1%1hegO}v2WN->)G z?4zpmf_22ejq?n6Hr+GGw~?~;?C=4Jg;z9((UpZbuQ~Ir>l&j@x}pf zmO^bj3{l<_L6aDxJShQe5;Q6#Co6CAi9dB8^PC6tPW^kYLMaoLHS=Q>Q?!;4Om13E@A78XhYT#e0w^^ zS0U;d=#rhNv%lV1qllK>BOA{@!tdmbKe8pyuN%FO!HPE{>O}BOCx!EBS^oBDXS3oS zu$+i$d+fDWto*D4QeMh~X*@!+r>WB?#dD6ye~F%c5LJJ#q1T{;cB}lQzf8VyXplok!C z&%k-UW2y9HaElPE~zmvLc+Ba0|n9lxv1{vAAh&YMJpKxKSl{BCyui>8l$$By9z{Il-~ z-xUEAr|$P2wEeVMFrgb@(U@cfJnfGeym#kKmC;*P!%bRR7GX-cEb}BZ`I~8fjtek| za65Ft?c(M(E{=C;w5g08igWVhbUr_bm}+}BQn9(~*mnyY?LU+Fx8ipQM^te{n#ztiS}zMqtq5~rf7#-e185}8Wf$9s6#C>BnSF+RNp2-W(lXZ{ z(7+S}J7vA1HvH{wi*tyX<>KGH?gCjzq7^n;dup$=^*I^T&9zyG@19fI0#zg+5`w)vdT3xxrCm=Tl~>7V)jatghZj_ zH1HlA%7>qvSCU*!Vg9>#9ksZaH83=pqEl`W%I8$D8TnWuhh&WGR8(9tXsPI$UjjQ|+su}Q>^)7i=ynLb;$ z!vBcm;@sz#%jd|8qycYC;bENb19>pSgC6+t2bl~PDgX|-qr=!0OAXbjFJI)>i%n%u zb>g7WKr!_1z`|FQ2z2g!YAy364TL=!0Ofw$UJ=r;4)t5UnwT(qK=3_i|FriVGWgKO zb4#=Ah#SrTZ{=X`-|x?1uA%DjkkHm=Iz?H|Q2qikoH`M+=e1G-OU}dLD_KwFrVtxqp(6%o|%rLqTuew{}Ec&{ExdYLL)OhWhtx-wt2B-ZZu_ z$E)EzU}dKEVSr$+UH9aB6Bj&zQ{&eJwb#F=3T>h6ekV$u%!L_-72&^6zH?jRVKEaQ zj+bGSOV5ksNZ4Dt9j}wx#6T8`PpF4}mjufJ-xCL{zaUIBiIfOs`iJD-FQ6FLEHu0_`&aC-ewj<-9Zi9?7> zF}p8GkZ-$or=cQy*hxaBB-Kp1)(s(0Ne~m@wpjmWznjrCM?yF@ZqsL`3WlpIM{WGt7=pKU5}qj^F^ zc^fM*3eR&xa#N+qP38NNVv19G%94axkaNR9(zHQpUtnL6wzwJVUEK1|@p@*>7XAP` zy+E5pfjT^`Qr7}dY^t-ar>{Hw0dhbGJgJ>={xt9K^qAv0*dHzi@VGc|W(J$&AN_a@KLh?6hVvgRZI<)k zYy--QKtDjDIDbn(t#yr{Kny$~xaK0^j-Ole$K`OzJI_L+j*rX_QZ&SfHpgY#;p+Jy zGuNoj@|gZ(l90H&7nrLsT9|xvbOHRbA3yLf2cZyxF(|{4`6VmL9?1&% zWThP=s4sjjej^MYQq)^VVkz2F`N#99^1$|JT&=2rH!uVjucpdMfCz-{jHhCB9UF7@ zn&9JruaO{4a{KZ5{lF5V=vnun51vQrN{n^+FhT<>d&hIfecFwbHAe1~33?y&deW{` zcVSu%F&TJu&n2kX8j#Qm2(vt3U@SIs#Ei72Jwi%#-csJf3_!%?Vde;xBj0k3T#=!U z3m7gsfwKLNvCkR9dYS;gkxqRMG3iej&-&Dt%?k1II1ui3@!Txm5^S`6 zQZy9Ak8MbEin zrWEQObGgwb>|O@G6Z1^z2v=^kx0$VgNUl;d?0hYDt`%HyMXd{OM#bNekLX|%N~!iP zpdXeo&MUl{5BcnNzrL=UjJs@G=*}MfuqFt-nJasiU8vT~Bd&MKX?&)X9(OmERn;^Mgy0%slHG_|YU}m@_1)p&zeammrW{5taAB(!uu` z&-tawl=S|KxE`out|2uEEY`^?aZ-KQ>u~qupUKg~p~yVtLF;nImj_q@{=q#;SC~`Q z*yRj*th*B)LM%Q?58%$fzA0H&T2YFlciy*`o%jLvXYqPLO4&)d!#8~{BF(HS2wZw? zhnXe|1zk_#T!H?J&cX^@_l0~=ia(SIeJL0yvygvz?~|z)`$ien%V#boy{{$@QHCYp zZYp{mZ7H!1)=843et)#uqZ5{bhewtUT$c@e=_8kM!9CwCg}6K*+%vcf!WB}48<=P* zxCP7ztQfCB11oKoL77x%h`=V5XwB+Awwlo~vYTiiLmRXHn8{qtOJjh_u8qJDN~$Zc zeVz>C>>1J7icsd0XzAB2=SFc1D33hRg_O}a?R(k33S*#LzQFe_u=gj01Lq_n>1|Js z(r-+p$pvUR$diD(lR7R-tVBA42LGrUDOQJ}c$wg`u(zs9MJScCj>>(y5s2g(6e{~3?L%Yu8)HymFv(8qye-Eo&egPB^EB~b zFSclS;;T5G)+m$troGpE>LCEt{yNNUspDPPPZd{|J5I_7Z)ZzUhGa{5HXx+?s-t?^4*d zMX{_Jk*>z8B-aDFx?^vfc`OP!n7uz=(8-7?APMUCcs0?D>EHHl{CMHaVsZ5PDENc= z_FF+hm;pM6`|F^@sp#x9qzkA(5VaPN&RJCbQwGh1)+dsni{+spE`d^M_FjW#9?-rXksPIDkU=O@t%IL$c_eZ`TWomG4VXJbnk@{S|9mOXou({*0{8jIF8V}3h9Xzz?nC+b` zBI+3~PANz1Mg6-p0TS`ra}(+Z?+MMa1kIhSf4mGOAzBlWog|lk9rr8SDnhIsTgd6e zQ3J<7^Y}ZAF#V@<*STZjTbZ7TCtHFG1t#Pb1KnkWxtM76<-rR%9?}#ScMAJajBQMJ>EVx%umA0q2@evG3C0a}_!ytn!ux6@goeLXnbLGtI90eaJ7c$F9!5^vJH66W#j9T-PYmb z@o=9yyMc8HQ~r3S$dm7FbgiaOOrdhxchFriFb941$ha8g=J*Q$cJ_w*ix}HwJ@H#^ zp9cVoSb`KDk606JOq_TQ%#SK7u|sD7|2FG9atTHuf_71ME23tOZgQ%QlUB5t@-aRI zcePh)c3K&R3qt|ar_R?`W-CkOa$>EX!$aimx=GJ#{4NEL-LFGu!nLQ%A!=}=)eD29 z^^JRk8|*$Fzi(i4xFlozMQm~#r{tY$x;|&&@j+2ej#r3+H=xVckyt`Lfg$x2+e?s6 zaQf7t`#SMg=kCvy|B}L9ZGCXB-sI)znrVu`#vv)kmj(YND`y?0_}>k9HX*f>rFDFm zgYT^Sjka`f`=6n&(1m+Xo@v3TBnw-@Bi1_)cn6&>AG>Ek+@9+V3ZLW$gf9yUQDPu$ zEX;nXJSbixPE;wGp?k6JrQk3Y#KEb{M*a}+i@|ScXxyYGJZ{NR7v(xoD_>fVmG#)O zCWkPiNZN;^3|_kXA88Id;`kNpVM1uhIA_k2&%SP{_B=U9XZ^rK4o2&6Y^Mc4wrQprc zT8Q{jzvWZ9ve(Byf>m;I7@;Jfh8_29C*n9Pooi~gm#R`u8pPrUot<8W#(@S=`M>&E z?fOlvuKEZk3k6j!_WDnZNL9M=d2@Ga5y)R2>wN%$*+BQHsQRsEfL1g|f-Q19%w3kB+;@D1~&Gs*y1A2 z#NAL945@jh2e>9h^1V@D2DjScJcEpwgA<6AU&chVJmQPswVf^LMk6f_K4hssCq z!?@)3G5G2@epW~Ct7?3{gPoiE6JPz~DihN|2leshr}Ix{=zXnO3{Cblzdi5V^!Uu8 zz26g1N>T{LjMp!4e%@`9FrG({>&;+`PM;0D;zBH^d8PyQevK}?o%WCK>ev6YK~CCo z4##)+C0xy3)y{}?%1>m3xX&|9?s?(upTyZ&(`6ZJENI;NRiPwsYWRFKCb4i1>wPB* zQl&}}WY5FtXxh70eLfl|^708Cf{I$xAB4b95SQ-V()V{LG%-oiqF@86vtN{`@%{8? z$S{_b%$%!cVS~Ox3eXgo*mkBpQ;6-XAJVP2MB%Pe%<-54b(gKw9r>W$_=Cf8t!_g~Fq4h9kzwh5a}5j?L=<`KvZ5!uW=7u+ zzZtr*d?hK&@M;H@<8kp0OuN5IGcj*t^6!3vI>*U~QZZsrI)d&PLE&V&Yf7m2-0-4B ztbJ#{?bp4_IG#AFsog$BY$1+9ojFl-GPv7d!aBseS>T*7>L*wA$t7DF%*Uhy=lr&@ zni;W$euZ*-8BH?9n8;36_>Qw};Wbxh)G&kMs$c})E3v%3z>xFWX?6@unQwaC_N6k& zAMw$92R(U0wWgS6IQ92jOYuA0v2PYm*@5iOD8U%3hx2P&oS4BK>(bHU#R z5^yJzMyU0DBQGCe{;^Vd4spROwid7FU|Zl!!FDBE16iHowYw26IZ;z;b4|Q4vA1r0 zqhRgN^XpgA%yk^Qdi+4WQqNxflzxmH{)PJ&Wx0|T9U`nD$^;1Pki=+FcbEtbh%pPE z4maJv$53LTM}!0y6x{Z4TdR>dn*a9o(*EtbB-TY*-0I_HJ*KYeZAR2WSkS+!!%>Ke z_SV@&B9^}r%wCqTHkW!c#pXSbPv87$z1Diu(aCsgO#gvJ((AL6=YA%FGAOD85qe{1 z{?gx1$-=v6ws!x^i&(kr$4vESY&(O&!T7Q&+(xUC(!+6ILTlq!@j0;}=6GMr8Xe0v zjFLH0wp;aifR@X*jLTp1%+6>`%+=J+bu%WDTdp)j*fW(M+}^nKa+cY#dZdzPV`Co0 z?wouRmazPlFLzz#tV^8^l^rPHcNKl?cAqg_BeJZiI9b*{*5OrJVc{7ZvnJ!*MVc|p zz4G?sRFAu_5Q#ruGTa$rXuN<}uva{CkVA?`RB2&Hy%p1O)gZ>Py{Cl;`BrnT@zG9B zga4`Lw-1ld1g7lQ2MQb^H2QTH*WOevszandpF});R~7FNsZb^)q(gyT(13GPV0JPW zRQJhPTcPk(ifbibq&zY_>5k&8XBFAFsd`xZj+xMQVvw2)jI`s0wHDDrMAvhAt#CUq z$A=%qyyTFkKkeN%Tc0%tI@>%kS`=PVpQ{`J?RnU`@!`k zy~UO`xc>aObN%6)pc-cjK>|!Wh0AT{E$b+{6qDd}lg?hHt$P;D2gQ@hJPi9AJZJ}C zB?Z2}aeD-4CQq`S)hTSK4YMaoFXXdF7ueF+nv7^)JrSzoJe_?LvYypR^?P(d^K{*E zw(Bii39?Fh7Whs~TT0{J8#n2_ z_nPq9jl6n8SVb>BklNnK?{#9#(@0T7PlMTT&o=fX<-VSen5g*fOx1Yhwu0DVnN11x zn9bpq5=7steH~l4aR%4ACzql?!j*l{`D*(@*i5R^-k5gLxkA>twdN0cL+tZ)olSmS z*TGcaS8=>2mk~S}4+GOvYQeDnY@jK+(e3JX*4*LBtow@%hwTHEb5ioNSPXU{DfA@5pgRs_TTqJ$r0nA` zwXbEG^65l*DpfmAs}2&?x9VQ~>%s?=mWfSrRU%P5iy(9VnMmMg=Os(l`l9Jt+on z4y97P*z)?d)*TW1>bjkn-9Jb?Nlk41q<0kELYW?2AB)Zi8u|N|%4doRmH&Ma)bNCh zX<)E)YK(Z6s4G%Dj9sYduccdUIYmDF}g-H;exRXX0VCvHBu3 z6u2$BpR`1Ae^D4V)DLG)04;BNtaq^uB_b-nw}y=8>^$&5CRMn(c>z(`zvy1nLD?gt z2T)UE6~m;yDRv9(t*!Q{5Duyp<$8`En*f%B7>uwT?NpK=0EDWSLqI289SuIND1lC- zBB5}2PDooK^VhZcPc@%Ef5I~%C`DmUCF@S8&wkaoe|rz_NN^Ywt)bPG9VzaeqDD!& z&kOSIKiBR5_c_29{q_6tu>AQjH8L)f%!~pkUkMGr0ac2 zc53KrjNy26M;cxhRwv&Jzgw96(<`J>L9kdHfe6(^ z%DijimR;`#u}>)(+$;rww4O8mYqz1so1S_9D8;`Qt^K!O_eDkzAWKT!J7Ei2TVa{z3jj1iCiyDs|u9_E>ro3hF1qi<3SNQOMrm*#~%)oG)e+9lfG1bC>c{U zfa9X0MX)ju1d?c5Oyz`8-=#9=$y2*{RAgI)3u3D~&sKoQ+;H142N6(h^DR*o5yF)E zup=b5=zj=_|1u0zSgWC=cS(Xx?_Q*R>&zvHRP*Oke*2U%qG6hM-`c66uk!%fqdKUn zMV@5g!0O$2u-#iF_{(!%ag3?x^62d93n_d0FES2af1u4vy$fmabeQ^vc#bwH^Jka+ z`YOBRD&Bi1fM^#O$(C?(Q=Y-|`Y`Vk`3OwAu>*}m3R-cHUd zG39HD==`Qvj?5wL*$M3+?OF_t9E@~koL&e%leOaEpY`QSbGi~aj{$f3GN=*v&)Ys# z8S&4Qmu0GD!7Bnb% z^YpVEiYEFmzp)n*)KNU)WF!AE+uG77@KG6$Aok0ox4=Ny=r{3Kv8(?aa!E10ISLUD zNI@|+h+kFM8}Em$(!VqQ*KZL+55VqA;K{RnMf8CZ_=7T>2dzr}X)3|2%RK!fNdskq zm22EIXaH#@9aO~s!ub16OJT`KpzSMzc@74MHb-IFf@`qGMF3AzaM9)L3o+a1n#~`#0xdt zB`RN@HC1qYab0lJwg`pQGgw~#$K(I?x?+Raec%!~V{*RA?cTAP9BYgQlJHZkJ^<2F zT$YfRh7<`~OiIMVv58;?+gPRzv2uYU89u~4FmKawUOrT;50 zkP+9bICB0xz5>LGlgEEblK(9X=vAYt%rK5bc@`!p(yg<<5=h&scPWMtKhol#x~=ZU z3|@q3&8kHCe}By1##xLJ@*Q}HP!mF^34kIPCD1+wuItPYVnc8G)b(yiPTm*1YgJ0{ z4-Ys@{*&h2Bb=R&HAC%)97tun$m3e0ne5&wsY2ZfY1&cL{@Y+gThMsYQ~(UDY1^-*)v5 zYN9B<-UQ^sHWp0z3kA*Ke+HgE^PtC#@?eBsShCP7f@~$~e-wLvdp*GJsio z&Wp5OCwbNY`=zg#e^f&Gd}VZQQ8Ws}`rG`Z+M}O*x=254@|6^5 z9AbI+bP?nR@{B1SZ)WTVh1GH|S^{v?_+vzy9D6nh0qa|HG9U9ibqs>VT(Km6lI`ex z`sDML`P$zf3kDTNg@z=_e|HAx1tdF0(U-eE>S0GuMbDL<9u2tb0<4Gq$Hprnyjvjb zwAxWw1^maW{k0GtO5VUgz}QhMF#2=VJql4YC_d`sjuD@S4?lV;#2=2L2ep+4@&2!7 z6AQ;BqCESL1n2L;X^s{j6S}{pdN&Z`WQHkk9)+3}yiQOE!vb24D%$}985vo@>8uQHF&DEok=<=s2NdUZxJr*SIq|Nk@?yU)2JyvC{tW>a5R*a+F5haJ&R zvY%6=Wuk-&=b2uVC`~b9!({d)fPK-lg`f-n3&qJ)k^T%iIvnSFa1|1Rr7>QkJE4kBC->*hl0-qR*N5u3z-@#CMIt)> zzIDW+!2^5nl~rmC>+>wYYefaWL-@yVM!nqmC8_deB1kD(&{!Do+Q=J86ed>wdR_3k zmqWGyLHPTtfoqZibz_rrMz}KI4jd=2B%ek9);Z1yB;z<2pXLP952_IH8R6^vC|pU& z;>gI0>IX;E-=ehQryDdWxEETVVwqE@hnO;7vTj^Gd3b_|2VlHX!$qYw`aOoXG=_k@ zrC5pf;b^_{BEaU0${)4t|9K-H83H}%Ktl8-0ke4sPss$JD-%~~ej`X7 zj>mj2Eu&h9_|RlQQCU$@^&Zfep~(9n^#m9^T=aUs_I(A{U*eg6f5jSw8?EgifF>5G zOf#K4+p?P7u+{yb4VG2;=|q+%bFT!0Jhg<(8ZPWi3zt4RhePOMRyO6+K;y>12eM3p z3_2|aa2oeW-;l)gcF3F|g{PO@+cul{8hi;4E{{Z|Sc>sSXSUipGvk6I%D+{Wxl>eK zT~Iw7{!b)oqQ-KtsC$v^k^B70r8s%pr8D~IZ1*w4IgdbR3GES$c9{p(+Bazi6wy?F zb*t=!-~HHmhb_s_FY6_QL`~!?ayD+Gab#pNuc05(F5uuOci7gbbsBpF&fz6akj^NY zpi2qJR9AjqE1|Di6P-BTEFRA(Nb1{OM3Yd@Gm*Geq9n5PII_`N=xntp>TG;acqCRwi$exv46`+IKxQHz`$#TLnv;8PCx@^Iyu-_GA#?$HJG|H&-Ls|6wAc zRAn+t+t{A_qD+Wiu=9@V4j2gdE)+6+f#3> zP=0q)N+k5$#rVf$b9Tzndwjuy^X03b??^TW-x<`5@8nC16#wer&q_JtelhXlU0u0_ zTE?E9f!Hdwv8 zbqGCt1OmG|hL`>07QrwnofVy-B&A+vKNb0)SJghVvXiGfxwd4jCax3d26%3db!QxR z)OaD64tI*v_Ul-M@>&&(;R5nYGbpwKKRa!FO(JR)m~8u%vk5HOyAz zNqg30tuCJZe}sK?T$F3u?G{nNf*}W#76~OrL6lZtKsp7Hau6j1M7kLR1{gq;5~QSC zl#*745Tw(g1VnP^&T~KP_x;ZPWcE4#sXynLdG5Tfb**cytDp{7{X;vT%g+OU1{L_L zGsnsC5=(z~UmuHcgpJ+Y9m&B`TIT5CvB?)nU zaxPM^em}I1Nwnj@P&c4O^5s6_?7#y^)qc=M>?2yZyqz z0U?pnz6a`q1@l!ok<|?_a}m~tqioh(Q_H<|l1ydD##5r6t`@yVupZNSO%t$f!qR(5 z7rp|jG z)9!8d<-V_){yh~S_}FWe`jM}2JePi-2N=1^&2ZCJ2fldz+9v{vBs|&fZCb z+ZC0PL3m1|9Mkzx>iB?_dWFFp5B8mAYGHVpZh<6WiA{H-t@W|a`up99{6d38EwQ7= zb!nLB3(DpPzo#ke=PG!D+2s%U*%{Gc`AA?TT~ADNkmlLm85J zhc=GdEbO#^?;GBoZbah0L@VIf1w{`ydgKU44fi>!JD`b7yVmDs?7%10jyR8{Z-d*f z$DaliA_ZS-e3f=04$ zJyIeI1G+d7REs{h=(Z}!Z<#)fn4#~K^0eTaMU+*YKTcZoQ zqeLsIwwe+3MMlPF{=p~8hcKJW$U5wQvEsUA{3j!dB`u%{%6x05asja0qe)sZqq*l41rx5H->}%19Yzaq9&27dNL`a_>^DU-4&2ag+q(VCep|BOfNBlDYwGOHV z70I#)-86>U?A4OsHt~F)r|0c2p?N8Py2pq-0I{%hLU#`ap}s@+ulc>h^Xh?Rx-_UP zi0Ze^FeYU2NI@8JhVR;$TlBo4gu`( zZ{iowic|-oxNSLNx@u<3q75`=9yrq&-+20Pq)8Y>gEYHEbW9IM&6IrdB_hHv-ZxMh z5L$PgJD})udNR_|RPc)=n+xW;hK@SJF{!Ym`3Am! z4fo+l5?8Z%!J^h{LHuaJHb*OKtg3?L|M#n^L3(C&`PR34Vn2gPeKFO+VslZ2zJ-V_ zJ`kv?3@i~F?St$kk@c1Cr0;8IZpMck7u4l_3K2zMuxHJyWE3rM>17OFL~_OdWI=7g z(iQCVoOz)8p0#cMl_p3VM(&xN<5c(Y5y1aVGWm0zN?VXaQ+TJix56!{vTG5-pTkYL zGZ;@g`$kp2yJ3I410*m+ zV)ZMyG`X4w=7MyS!4bmm;g7_AO;_grw;d4n7H}1BHo)Z1BQPjFEfP;I${ExziBQoD zS-1cc;eL$-?1KMrNdIjdq1q#e=Dj+KF8|v?GT|np)I8z;c1ot58*t8^k!~@ls&F8` z#GtPH+ef59;|`SNc{j9D!S#XpLM4dc@Of{}y!pq79Qva}2sHhD72i+(2BA4C_nUu( ztfO8%FjNW}7%bv(dlp^Brw^*{P0{+W9G&(~5-vZEogd_+GW z1c$EyiIp&^J7QD8q;3kVmG&W0doGYwTPNu8`BBrMDS!MlFSrQOifg|A`#F%U@UwDz zPASH=(p7Zgo&Co*kfB}aQsbpgh%HY`?Ji3q!Ttp5IUlY$kaSsTlbOO^ZBYOX$~IpU zH0atf8c_4Vzb!pkg|j7bi^qn~kq5lUxk#wx_qVM&n=&G-IyD)xWJY z69sZ??>2jgX=51A4|(w`%%E%du{XZJV)KH^x^4Mlox^0JUn(R6G6=*lRXSbYmy>#J z_YbEYJ$s!M5=T0kbHVrO1M%S+FCC9=FWLt;{_WrZuk_(vHRL!}3Cs&0#?ixt;+Ee{ zBtyL)FBc4;o>d2{?0ls4aO^at-p2>GOGg7PeWX+saR1R1eP;)l9o_?q8bh1xv*pVz zQw5!>2c}`aVX|@oUKBBRN}IUcdbjv>yJr7wo{o?UaeM%6?msdtV)%!zDe0yH8L50Z z)S1&gEE_$=mEn-(T2c2l=zM(#wRq{5Is!e%qIp$EGFiBw_?75y@{&uT-%A`vUeaGv z>Iff$e5I&|lYq(}>!8PSGkFd2t*jdM%HWw9A{fixGYWj|{(JX5&fSnNP{UhK zD+VW*7r0{6bn_?ZwwAdAqjT^E{gN?#d%s1hZbPmIouFkSZU`t3r0L0SgVo^t(4@~Y zE62@f)973+REIgc*LP;hF>i>w`A3=FhENb(qKZKGubXq_xwOt1xZDqMu%95$vP@26 z>%#7f=Pfa{?|?$ZS^;S9Pk~wh3(E%?K*#K5s4aEjW0bJfov&>#PBgd6v&1sho7e#- z!Jd;qTJ@CW_UvP@V17(!sYwXOf*5M};3|BiY#yr+6JE4`Pssc4fto2Too?zM8pB+5)&Ok0!U}3cInZOeo8sSD% zm+Yro7gQfKQwp362!RV}^pe(|K<3%XuTh;T$+Ei}lvFa4d(JUZ=UIS1<)?tJAB{{& z6iAQ4*-%&t^}8$_?-SnZ^6#UpW|bda9K6~okyciNo`?cFfQ6&3W%{PrOWq z+JqxzmaJaT`<;KP5DiAWt!B9oro|w~Y6qPgQC}?!mVh1HwOW}Cc=KZQ2}wcSzneei zoU7a?Z+bm`2ePRxaH_@40cD-vqAa_U@}0wbgyG-oi@obr`@1^{`_%CpvRlBNsV~ft zAs5)9tU&%_&GAxF6+qPPe5sQwwe|IlpU?g9dPVaYQr_#%-jR|ki4@_}o2$TjP5C}_ zXi9nq|K#bB`V49W9i4G&1&^6`DfU6%cSYHGu^vD8xE+xC`z4)xt>NS_r&0uHHWas; zTmZPc$S(MyO3)FcNy-fANom1lKTrj_Qc#5Zp3Uw%$A+x6!Tdfntk6Iw++k(TtuH0h zfVS*JD15QfsY{mhJWL;#w^?jE95*Z<8&x{VvcJf(ETqk{QGEiX#UJAO-LwOe+L6EA z6_+*}Qk!hrn6yEiBr*rI^CAq(%Ig%+WP+o;yD+zG{X$;31}aXHuqeg@w@c`_?11|I z96+;U8gPr%l{r#3#=Ks#^HYNPcYVR_eIQNqRu%Ze4(z!I8r%9$MPIWSh>~~2HM|1y zCQ>1)7}!bB31x8BiJjFS#yxUF9ETZ8GJ0_~eyc0Zm%l7+PgYRiocYGu_5Zy-|GQ$HPvI~xH_W_0DPkRf`=(JS zixV*OX;llP<5NVaXouk!MnD_C^AH_HS03mG-jVd$WI@;UR<0H9Y=X6GC<-(ymbP6I zn5w!$cmkQ%KKPAW7kbx{bf(BwQ&J4tmW=rBJn;fYSxXs8KQqL0)^yV|dziA9??_$$ z&|1+-rfvoAynB0~QJB1wf%op}=JHE=qbTFCas)do@|moEC~2>Oy$Z7i6$kq?PkSzW z1ePR%Iuj78NP232MQuNf548{C+x&Zlz`XMEId9O<%ZPXW#u+)AF9nOh?fQpuYOtr1 zwURxl(|4#?8>*E!8wftDGD6K(2ZYB&X=G)0q2Az@exSx8^*f`CoyMQ_(p+nGwKVUr zf5`--eXu+Qsk=>3a;26>D_{_s_hJeObGG+Bwak?h+4x{!1#Qq(doxun6vL)&?;#CA zS8W+=9fqpvBjiC9Q)Dw34Ryp%SaVjpd{Z)d&3+l7ZWC z$Za(@P5nv_*Dau(5>*^t)p&GcgVtM|M(THz>Cz%(L=vAR%en}zu^Uw()(Dbhr}jm? zSE9H4O+zHEoq%f~dde|WGxD)+d__(NUE}GOcMJN9qXh=$0a?KD zj=h?PlX*$r-;wXV_z1aLLmrqy^Kfp?R{{3D{qc}65KqrK1{iJzkZHfq+g}pce{BP9 zGJF$uqq4v@ITm)Ywjnf~n&XP1KTisKABJju;%%$PL;W%*>%H?FMkvM%y?u5?a>4f! zFb`Rt;Ws;pqu%5QUa#juA^+>t{CicR=V&m;5>55fxEKGDWbTu{4ohOTR3be;j&^(; z@{`pooTX|_fMVRVF^xL}_#1cnN$rf#SUnCCNVb8PY^CdfvRm(s|JZ5zMa`s z{<(K;fr42;vL2;)&oTd6+?XhWBqc0riAV8ni0P9jA7&f#peEoYZRN$p7ra3$T*0R% zN}S$ZN^J<(rk$}p4??~h-!VK%$V;{2YxU6=?e-OS_1;`iTuIBm59>!pTV}0^+j)>% zk8VKfoi`>iMYR&VUy!p|ZZMSg?pX_#f8Hqn-LdX8aJ@1Prix#qGk_!Hf^~unP~%a%L3%)F^r38&1~Nx>Q1iXD2Bb!|!{}%# znn{LL#L!q-$bO#A%;-6W5^fd z7~Wl8T>1*3{W`ImnGvQ}?DdGF`_f&J)RO@(qmpn(r(8ZTSqU3Bw96gXf*bu3^HO0M z2l8VNy@ICRzsGeU&2Tq8fn~-uil(b#5^y<;zb1J*%Zn zp9=An6%XGHT%(PTip#{BU`}kNu;i=Cz7RySE>#b*Kw>nQifgG6&qBD#;vm zG6M=cKdX(LAE}2lbat4yhyBv;iSga>;2-uUlhil--7@Jl%*)g!*OM$ZT zWRy3R-k!}U;)6xY!tML+`;9lR12;=cXOW#f5NFsLb(Ot1V)x8^W^*Dwz6QHtKvx{R z4tW;O$t!=O-No$54U0VPdxbhe(8G#<4I!ffiw}F*|C;PyH1``IgR+{J@NOUiSI)^F zt5nhxp%-#N5o_T3<&@E@-b?=1)UdwjtnS&$n(})g9&fo4?K$foJ(<0(pWHgeCz#{6=+*MJU%L;knlXA_$t_h&Vn0Gc z;HI>&N-of7e5Od~qp-gCN9;(36Vf!JL5g3newg8z`tk9PkA|uDq9+|qL+AGP_IA{F zfwAmTdYafw_NNYTQSGJdFZ@Jp+=RJ>!Kj@F?Buul>LnvkjG0s~x_Z1JiHJKn`Ug1} zP3M`~yzQ|gt_;oU_nbGjLGL!7)TzfaR##OoA7(2vT!A(sM>{N?Uh8J<(Jgt2 zX${wY+5G_Fo9xNlTFSLs@O&G2tN{iF8{=7BFrh54XYX7_{at^l+>K0nHwlSoRI-fQm)Dtx^f6Bay6d=EY|MdAitoEai#pH;K_qsO83PPnfSz^sUQYHo2yi29}Bg!r#KTUxc{al}_5_3`5 zgquFcGxf?3HsoidMBbPuCDTN(si|n1iw|>^eaK&|BriuO+GZ@|GpHy0pNs=_0plM9 z{WHLOHc~zkr+|JKL1u-@%;tun!YrZ~KmT^wqW&VA9RlNcur}-8{HmCsK*X(^`bIjzTE?Ek4$1Wp>0b7F19N z=YMeK8j$lO@NuW(9v@8fK1*hckV6I^Jj)yqNfj6m>!FvI}#PAnJ>C zio`K2Xy4yX?IspAb+M_ZzM7tyX|j3{romumPA-J<+8-Jk3XZYKJR6sr{uBq3caW13 zD|~}QIZVfP3lKTl4MAi5L$x`=`d&PjH?BEMym>CSXfGkHqNUVVM_ z`=!omJd5>Y2RgihD9ooU+u}h`>6@fU>Aokyozlx*S_OPQg+I~Zg((A zuqS8u^LC3yksh@v4Lk-&3rX*k0=K6>BlSMRgzO|(UcSNxP-QJ}=e$QoX&&H1nZ%Q= zs`%&s1ydx=buvskiF!L#jWmD~wif9v z&e?Qrd7)Gb5|Gx;FMmF9V%{jM)Pnx$DD2n^ILF661!9Agc2brMmc;iSdu^0`t@}9r z;@v4#GypF6oOfO;@!!sKZMNz-(69g+YpL!)59vmq+q70Z-citWx9siX(`MEbUi_5gJxF>EVasj1AVe*omW680(4gjV%V(sb}rs}2k?6yE29ODHP z{C-#N&pwjsH@o%*LI6)N8XmnW$3%-#hLtz{dG0||Be+QddjO8VJGlmL@8lUt{H`UX zH(e4cxSDEQ3S6=~q5N9N8eaelshIuP)Byzd-#|Hj0T5SDtCw8Mw*aF!1tbDjY*r7@ zNFX;hVP8Ax_wDj95X9|-)G1feeIgZde4PSPqaHT% z_x+fzIefPXw}xw&%zckb{9uPW$?=#5h4n#E|2c=yL=XfhIRqm34Mi>GN$Hb1uWZr~bLm;BvJcC{zhFa{)y#p8lwkaw!v@b*xo zB+%$hg&)yq{1msEy%L~X&JGFSnswUb0sHLv4v9ItkVFE#v4@=~$e5n$X)a&D=2L@SWjZvH9vbTSZZ%$1(-?-!~)0kjN)REkT&j#1$;?$`CSQ% z?xh<%Ml*c3ux=?`97J|gf;B1foF`j}`4Hd0XZuH@K(EvJb9Iq*|I=41knJsm+qn)y zq4W;pmoG{{;CiLaPf|$kLq4d393U@pluv( z0T~@x0DUNEmE2CA09(ppdyOy!d79?msRdBb+KTP|n(b7Z424Rhk(&h`K#iJM8dufR^|jfiwx9N-rdHN$`Zc+5nG>o2dZbsRp5|%iD5+ zbSVI&Uv+(e1GbgpiGPhjpFIGl&Pweph^J_mha{NR2GCAHP{Ua+2xJ=1$eJq|3qEgx zBhM=t4WQ{%m-B&f_7rdA?Qi9+K<>tdE+a3?Na0XWZLhEdU}oXndhZ?i04M&tA1|=b zdx!I6qZ}|DZfG75BY3O7hmMXkgO?_1fiq!lgeAs(nIF?sH8J{zMUQ`6$%xFJa94lq zw=S~hr+oMXo|HsyE>BGG_-}1(IlZk*-@c&VKX%Th^Y;Pn z5r$e}a0U$$Z25?HQEe$4AV+GQAhFWQz6ndbm7N3Q{Q3fNpWa-)2gHZPMcIv#pyZ7z zIqZ5RHWhQM#CthrI2l0L7yaPFTRp{!h2n!c3=C8pLa1ML0InJuScCt7jYD9zS5V>A zu%1%|;yCWaS`Z3mP8@NZxgBj^n=v_~ZSd)an`wOE%BS)OJg0TY{zxEu7xxvTLo*vI z@bj+v%={wcIiDCoTZPCL}vwis-Cus6_TlZ#L8|Ma%nn?QdndQg@EiVNLJ9=f-!Os^mvI6o8o@HA>_+eQiTp?Sdr2C4y++8J_N< zk3{AK*0bX(DOlJL9f-np&*a^F>w|sbjdWp0U_Y?+y*N=M7rtkMtYMdB{R@SF-dFa{9=InQZ0f;H5Q9=o|MKNaqNrUER9Bf=&a$*VTCBnOz?+Z)!-oTL)JgIiKAZFP{yP+#e}5yd})N%Tds z)7gWeATPhY{7_h`A?lj7{@j;nVJp!&zy7h$!udlu5KQ4{DAr67bKH<^dIzY=mMKM1 z_6w9Rd@a9Nrv+~Bjq4BnEizHRWJ#GhazbDy>bGd@Vn0UInR8*?U<&6wSK6l_!Sqw; z`UwEl^^~PEc_Mbs=X1M|f((U&Bur0=BQ7hbu&92)-r?(nzogngxwT-d+wX9T_OCTs z`=e8ip~`p#>`0Qp3wp=RtS%J#%C0skT_O4&xo|0>!F-YL$&EkPDTpnnJ-P7($FJMr zfn2H|4e}II+#>eT>Fyt$F?!l%*|!n;gJb~rP3kTK7v9^^R=sfyM{q;n%lgvuW^Nhz z)BPeJt7&_OCBl14SH)(|BKB5-7&qTJTJudQz15ZS>aM(BbDaEvp&PGCxHKtUXqfy@ zI?iX`JCreZgTs6XX=@pIbejerg=;(|K%a(aGj_ z{`K$>C~TU&={8|IUb5j)mz-*@cS@@HY*re-0No)=W=|FNKDDw?Z0# z0 z?!j9FvJ4c(Ak;KjY`*+cqe^#C8EG98s*6A1e3eOvg)0pSfuHgwlEBD-FpR!+IFdFI zI?^h)f8mqKmuEX8Kxlubv+iVN3@AQ{0-ezVkKYx5&n7S6g~922OIfh15_Uf}cfGueaf z{)1LAk`k*KYAO(s>(cNLkdRLX0D939p{tr)nTyuNjP1cIQc)F?8s9TAh5_yHlZSs~ z;j!EjFF}zfgIlrPHt9XENfi3?Rrja!X{{yOh0FnVIsW->tJ5AfNrKaCchU@xVe8C$ z#nJ{3&kHG;wuZ@2c3gqw<~xU$Df?*~qZ&4(GAC^;+cfi_Xjv2uwK?MbwkrXt?2Ko>U->uH||yDBO`n|m`ACpY_>d~164w+wLM$G zCj)}yDgucNJa~F0tDx;2HT^;lRgmvha5iTcDmj{%ByjdRz(}-s)Wrt(KAJVvb%!Aem-S7AvAib)0eT@l2phvf-sI)2VJqvZ+|!Qu*`!;P(=;Q!EuT@?&QSr_tS6 zx)hGd;jPN`v?v7zzqxP`Vr$VMLz)n#S*S&D;PJC@{!qQJDyaV!hP{NF{OO)(MeGxl z=6WWeP;iE2>@N}myZX6~aF{}lWf^6XPDgHM5lN0RZ9m=ZnnNeAAva}fP~*|TC{4@p z#&E;*gr-4Ob+K^myD}v@yL6}mMZDvG%OC&z*2(K=JWtB#&G`ijXVKgm(Jn`lnwrnF zm+lmFNn_eIZRm+6YHvs_gRNz^;Dl zjhFCM?hKhfXY&BPL6Ba1-1nhr=-4T`*`*&RNC+3(up03H;#c;n@meB&2dm=Hcte9I zSXvE&2w$hV0?lVwwzIZ)WmXdGm6N7xvp?R525FNr1cjB#obGH9`kKj4Y)1`Vr0i7e z=J&YVFj-_P{bUa#p-LHrhMO@shzTfTvS=^L>FPYn*MwPxi{^ zu*~pw`g|W}ENf;X8Fuuet?Q?>UcP%pvl#)q|6IBMv$Lb9pqrutgfE>d3=&#TcO?u9 z#Nojc+2e_JcimZy#*RF-wY7zS(X@cj^VB@1yKrZK4=2TyhPR>vYZmHW$X!x&QA-YU z@1DGI(am<|vk_r8WZQ6VM&b9!;P7y~etWJPUb4K!$azS|*Y^CB{b?Q^ChRYDwl)=# zP(`Hwg^!v0QT6sx>{9~!`785o;?IKr%zra4d~oeIi5AIBpTRNS&l5j$TyBScx zC!ccuV!*fk5}}T@u@rAUH&3JEUfQGmlFr>yS7JK|mo9&siRf1-hgLt3^ojd!FC3Vp zOz;A%oI5t5Qx8{vxXRP_xvv0mtcO6beb zB#TbP>ZYO##W3KhH%XZ2Q9D)p>-!y$gf>r^oUMC*e+Ptx@|5kl2zYes=%zrC=hQaf zqPeJz5G%K8U3^lz`rM*^?Ms2pZVQjq^HbEUz=ai@aG$?IIK5&1!}&_#`TjKmP_Fe( zIj}kclnJ@g@rZjJ(BB+>m_?=l=809hTE2C>=h|(k_~%2TH_4oKgSr&17|1|H;A#ND!KW~;ma}l;pG}g>^8Aeo^?4aO`H=#t4!l2AloH30F}h4O6{&7W(XD&^MLSWVD{w66Ez(I z7ZF?$I{C+_%r8`9P8ARouyD_XY27MO5Ds$ZxpilC09%s%1N6`vOwn*G)Fr(m35w$k z2PiJY5lK5Cxqz1G=UQ%tfR4n@-Bsg##{;`MubnZL32O@XzM77z{p~6W!fZu@BHHGEO%YL(gJ2LZv_JH41t#P4e9HEBD}mN zwf#w|DX+iSHol9J7oOdAQ}^#Z<++b;sy@0p0D6Yj$j|aXd;on zD5!TXA)jNf1!)-@8*dr!&v>Um#0v$8_cU`>K=DnZ8zUrR-QilSlw6x_GmLP8+PXI5bOI)auf-6Jxk;v5JVoIE0TcG{RDv}^cw2bK^y-rqOhJno(<1lC_H=+k#8)XAIGG}jGIG{9I zp>Q>;nxprYi|D}wS8bP7DY0y| z`8Bd_3V-#VlzBm|fy&H*@-^3yg~?8cZs-6s&{N?VFb*N)B?SH#WS+CU#lZ>px372O zfH2y#LXG)$dsVyrReUV=f-tLy7c(cMHomMtY>wgj$tHWinND2}CfpEO4_#*Bj&u*vGZfD!J z3w6@@(VL!F_9%1zb~qYz4?;9G`y1@YcMedM`PejLNODfZ z=6pNbcfZJwliLe~Mc3pD?%@3AsT^Az)I5}?5H3PRX?;LB(l2=OYr=`p!$RO}%OHs< zK9dSKBGYq5V8G0xfe#NQMe_{LqM(H0<=I_OO4tRV>5nO>90?(dm1NCeyVXzQEQN}aV+StN5+YzDfKb6k@0kbN}-iDbfc>iK7 zQzz?bA5{`U66^;GLZC&WuUaoHoBE5Zh{3L#CzuMF##^NJcYqC@$hb8^SeLfR{N^o- z83pvMA0izy=J#t9Zq_K&tNktl8E4uC$M+~@oryn-(fVwZ6mq(J@ttio?RN^AfM&wR zn`%&Am$`2WfJJXSqOPpyG-7W$Z;#jSY`(8dHWTnYD+hM+DN4glGFwqC+=|)8uo*sC z1z7)Rq8qjWqLgfpG?KCo_fZOmT1 z8#7Eu91!Ij;Xp8Ym{Od z+4WHm#1cE*ZtxPsrwWhb$3C|z6K$Hi-+^pm|e{Ja$wUr7zsqk0g$>^;w}15Y=l!kq*uN!blp;3H!|GIYhs zz99Ydu$eKlI89_xugZKc&v7q#_Dg5juypZbD zJNYLxGk0a-nn*)g?nNk`U~=laf5X84xzlW4Xb97uF18#4V}qsR!gLD*vGRH}=-WC< zN^c=Dn4f6csGT3*R6I(ET zT+Ip@B?_PMvCWB`(r7hPcid6X(jS$4Yc@of6SX)C(ICQw1xF3?=VY^Clv>;pzt||g z^#-;dUtEyviBCU+VCTks7y}`8OA>9PBkC6pc-!bo&MgY(ZUfIJaleZ&(5;>p>!dHb z`Rk2+jHzWdcS^|i>?bwmyLP}~bG(`zX^9hMx7$6a)M^(7hSXP*QJ?}c@q&{L93jdA zZta0fi1uQ=ZT4FUAQe2zhh?4!Zl}oy+B5s_FE@CCf~f7JTg@=80<()yxl_Q2^py4A z&MZ)s_GuTj(*5%sg9Zt}ausvYqLVFdXWxQCO)g#MFM!b`#>u26t731#`2f}X*WxP) z<|PV&{A)5;JSG&b!Je=I^`OZFjyR6l8%$UkZ|ev6?U)g;n)4uP@GJ!_Ku@EnCIVrn zIZe17r(Jgmw%#!;bCFZ;eOaiJc-X5kCgVN#Y@2&8dG1fCc}Mqty$(jfk0oNJ0n@P;8>4SJd z51Gr{dnNS}{AygZT1MfHlD@q$nXTj7EWK3#JW0!B$W=NV=O>IEqXQP_X-`8~BAh$2 zxtYAjrYBAA_K9U0dCkQ?yv{h&7JLd(H~sk1dToK$qEFR}lR4#1ghKYmm{kO-eHLn$ zP@|^3|3%@8%z1jN02~68dGPka%bwZtRDk1VOM9wFH-`+7=_Fs^cPLy>&;op)Shq)7 zjB1$np3d7kn|C5C!c$9wpz_O7mp-E6jsz#tut2mL<5a89sDX;k<&A6ym7b?Dg+Yg& z2E84D7r5_SGbj_2XGMC2O%@t;YFLwgq6+drbx>nC}h>uj)j07WSSAC?t$Y_MmfM7)tiW>F%t0 z$1i&IBEftU486C{3vrUzKU(`W!w=|1gT3^qE|nD2CsN7HZ)(4_;=#^uE`0&;$HTd# zIt*elq+PLgPr4~tjSzn*vKYjYNP**tc_8i{==&#k}_3(b(ggMG2&$9G!9lAd6{ayRz zv)6z6IJDh7q4PNdPYivpzCa$PK?CYG*5>-@SB~iz8xLP7#93rQ*u@sp^un6NGc;KIqE2dO|OMmU1V zY@_|j!+25{B?2>&8ELKNw{BtAD!uiGQRtzeAv)vsaUv7tkSfifqA?$)X~vtep;6Ew z0lsVQbFUjQRvsYRk#~uuXunl#4V(a=! z?8f+G=VxZlZ5iA;t9AQKXu)_40DL;ymH1T`^=&i6aOt~WyWF3yk~u{Cc%O)Ml3)FE zr%hf!lU)e|h#wr@m$Ddh&AKsOg|qOKlDr4RtRLJ3cvbo~mY3KPga9qkV{1Yw+IQRf zDX=r7gb$XtuxHB>ONm3=PE>F!#f&Q_Iku=*!{(^XL9Nt2#E~VOnR-Dgtnw%kNHvyHV}fi|+o z1hkwn8efX6sQ!$<{yy{^e4xq*X9M>UUky0ZljmG*FYR8!L1OT(nv+Q}p@P(7xPQjW zg746$rQJ`49jomBw5mUN84W(tY>8c_O8;0r`{PsAfVlOcf`vn2^>6FvQ_0RoZ;$uP zOO%)Pre)YRk%5J=8X`GI>D(T__4Z@!jDJyKDUNogEnM%&q$?PqQ91sUx!b=g&5#Us{rJs$zgt24T{(X7}odV_9vV1f(biM%JGe{ z?WGU()yBW`ncr6!wDcE`zJJrB&B4tD5p4yL@mNG>NTBXMD6Ska7* zPtDK)1xh4W$3RDgG5sU(GVdG#h9t8;DgHC3>2At3YQ4`-!6BTHklSn%_xsSvld5N; z_C@aPCI_=%ud0;xeBQ*^4mDpP$E}NQxr^tG#ZH=u*Q>2WJyhk(NmAU^aap?q|>R$pd;R_N#f!y_Cd1R~{g7V`+?VS33chVxTgVSycD^ z2xEIFiTg>R_nlg_53!Ff)D4Y&R$2_nJ?$DWW~z-uGqi4=mq}10LEhJ%vN=q;NWhaU z8VGqU7(UGPpGXCJ;hWSJGeLK`dk(~mWzyMbm^UhVhwKtE+WI2Wtf1-YuAK@Otq4LB zhi`|sPkm!N+dXGYMX)tSZ}{EYd#;so%oeY!-So!_K0*?EiL&+e=x1{tm=WAjL#X`D z&4e|EM+9eK{2j^Oo{U>De#Q33j!En$|Mt(wW|B^hW@0&YrUO02|UlNYqFyNBgmrpQ^kVElD9s&3@9D z)b&Eqp+obRDL`pYU^(csYv!+s=hWGaoNZovvbP4~zK@4BS2%#>;uDs$s7siMO z89|P--^IU;0F`NdCqhXZC$SG+=X5_!#eu z(u<6XswyVXk}M8L#*Nf}+HpZL&d2Ot-WOo$f8(`TFV>l<+nQMx=sxJGn^CsL%v&db zZ+hQQs58b=Ap7wSZaaeplc*7TxyMWQ7+lL$-XP}BR~NKHMn9Wl0k<1)U~nb z9F`eWXRe6R5H(VZf;o=ehbpy_*?D=k0~Fy+GBZziNAf^$#8_!DxeVQ=Lm_LaYrWe^ z-BU#b&ct&=^G~JxYz5-SBt!1hQS*kmv#gg4G_}uxRskW`ej+c|_MMU=&OUT--=Z-ekl1edzr2 zh4=QAC)^IL2sl4@GPsgQ@RH8&rFG;@3ohf*D>Bd;NVzc5Pfk_qq!(paZw10(S3=t* z-n5^cI2>=9S4w}nbvj8f+I!w0QItgId{F+p_5#k~rdeY`Xb+$S9aWL*H~3U>tZ_ZF z!aZ^I&ok@@Nzh3s+64_UZ|V5N%iCpUGNn9xUzMKK^p}n~DRkwi>Zh{noE(VgL7oX> zRkDO_sIx~IWHZW^^(vgg8Shl@*r6Y)@zCxgb&Wqx+SCZ0PA)&aqIB}~v_cG4B=)>}%BkS)R1$_;cj^7fvg}t^pYJQQ}GqpI>3;jy9 zl742ArXr;=2;sS0qB#rn+1Z=YnEVs0L<`jEl@LxlHOWbz<`SE&8U8YeD@&?HCV>g$ zx&)Wr0%GAK8g?vq$g2s{&-Kv`=!{U`7|18ZD<-@hUI#>okF^Iu=+RHM65(Bc@(>Q* zPCEQpG|Wi*a}^#5zUFuH^zE#O*4rR+K~N7VwCkooHnC7Fe5u1TF?IU00v|MA~Ne_a2 zurx#a0=)Zm zUY}UGVF7|PA;3-(anNNUFGzuA3`zq2hq$}?MFCqHrA-vgnkFH^W)8CvIGz+slQOK* zu^}ror-}VsVE7K5b<!O5+6#_5=<+x-%${IkbFVQ z8$_V^TmqxAPxU02)OPDxVv93Dg%sj}7OzYbiVJQB3cmr$UeK{7Rsk4=j?P}grB<25 zxMhE#15T0&@Vc1_1xg3~v)c%X>bjId6zG6{t{@v4NrEkTMQM6#;4HCj#)DinniDLt zh-{*WkaR-LF)u(z02IQW6kzF*lpA+VUrKcg0P~f(_E}y30Z-iI4nuulgi}vl28HAb z;J~6N<}{l=qaV~0@`fLnkAMP*9n{>m*d}aKvD(uGXhKrOEhLr@RV6ljdUyXP?; zu6x$H3TlBEa_@A7u%ogxH`&9lpI~(?_??}4?^;DS6VODGiWhMGO3u~gIE9?4zYVFL z7`AwVZm#iG<86GeFGN`;fHY_d_&1B?DG__14=}TMXTSBX;BTr`T7Rje*)Crqt}}jb zr4-+0zI)vQ&_6cLcPgx|t)&9Ax=uc&nG|3G3V=$e97x9;2X&fft%4;}n@kk7(4nU^ zDVRHJXx}V+Ljh8oa%+%b{#buX1k5-f6zm}kL5XE^;KcbS(q6lubo_b<4A{i_cwSV8 zqJ;o}e5U`s`wgsy%L|~0bEPT{bbLGd3xQ|4LHr;{$Y^s1D&G6%+5{rXea2L8vKr zXAAU=iBx$3?S~&`WfZYs!B1vyXAq^dDnFBkfW>zySJe(0o#g}>o)kzx9}ltXxU40J1lVpvZu4D+Eo3|EIYxkEiNu+t2AJB^fFXQX(0{k)cc- z#ZfZP%9J6q1~W-VrGtZnLXk2aGG!`rg-V$+6`?4Z$vmWf_tx`$-{<+Yz3;zo|Mcn8 z-uvve_FDJ4hwHj-yHuZ3Bw?+^lk+NLC!O@FQgTK8BFjv zIv0du;At=#y97u|k4=6+ML@0xs8vh8c#eLk)}LJCfCA}kX5~YR{VI;6lXe@alH3do zIoxUS>zhRyBH+PwdFr{3d)?2a8Ed!FueVQ4XI05+S@t!K-rj?{Kd%yi(T!D(J|$Km znQOp2$p|T3`C9$*+=FlLiZ$^busTyQ_tvWS;yc0g`z&I~cj)i{uel{!I`n83HrmBD zU(dqatC0W{oGiSLG91!x=Kq#9i86Z}k)LIc-GQ*FJU`-`Y$*Dm5*Lv|9R1yHv1+hg z<<|lrl?2mnab(GV6V!TBR^rlq2cfv;XLfa_VTe|R!(pw?Xs3rK+f(0_gJtInZFOn4 zE6g^#gG6CE4u5f`K_(qx06+K*RZpYRg^J09HFD44!?94V&Kfgw)j0~(TZp)D*pV!*y(X!O^^4Vihn$0XeKtTdxVoR!Rnt5u(Q$`f08#pm9-ZZ zp+g;oz0BFQ@R#W3h#y}Nu|(C~7nMVtgxXnS|2xyBLe|NR_C-6gfRAk=Ug{+KsgA1$ zI5n-o{D~jx6ozurWB}Mfk;0dbt~@M3yAe52U0|)bxMfMAVQ<&`jk~be%O*;C|4OhR zCn?x~VrC|*!iMbk+Gx`b*EE|U8S>ITUzFO>RYC%$z^*B$Ak7hgc&ETS5Hb0Vmaa|u zhEZ)iobj#GT?vDY=g&JoqdEQ#bx8JM)ko5sPW}sOaoSDEXdl7S6vJOJQ@?W!Tj0TU zem~JnT?lQpYkSbp5Zfnj&3lxsxlI3pFqR5l$9d>cQIBzCjtX_*&DC5wQI8^I(T}h) zAr%{t0r~r#d-mtSN$fh3Q^}H%0&{)4L?!vp)c$pKb=wwwzM36Y>OEf7F@Hsie;!Rp zMX31})Ok9*kcXTkx(R_x1>mdjeEBQrUHhWDJ^;}RQ5%xX^HlAgR|4?<2Q!EW%7PF6 z{r)igI3Q#(4zgNQtlp7!dN#;1v}cV9W4A_yNlctPqs8ZeIwWmd(zzfugEPEI^$SYf zbQHcdC?-@t^j}c&NACEq_lj#XW1mR?s0c$spSv!@MXLJW=QVP?I)8ea-(C8+%1lJN zXc9OHp;eSpO_WQ)#e;>o$3I;GD{Go0)EBUicr&2I{cmR1pL^Sc^PmNnZ&q>Zh|3*)@m>vtUL zuYf){(pGeiqRasj2;(0VVr<1Cxj+UGUYR&ucw!@gHeR&A4Djou4jk@6j-*z9PoILB!g7!1AI!Qk{ z6gswur|R?fp7j@k)cVO77X$BIR{CE=?NQ-uF}vVm+w);TQYS5eovtD4Hjuu{O44-@ zHLNc^Q5~&o=3Y?tG|S+=nvg3{FG8!LQ5P6t6_Xa7*{9Yp&yq2sulrD-#y)rQb_beO z6%YKc2phOVsvqR?HS$G?ak}-Gz8wYb?sK3=aq2Vv@p&X3NAiN$qT7 zyT3(oo|>pa~)T9?*r;u zzPH5`c6VBdH%Le(ZJBcl$7V-)zu{!6O*NuS8#F`48yeWf0 z{1NXrKeJlRE%;1tqrzY`2SaUZ4NsqKS3;~|rl)M8nzV5;-L@ULJ(eKRa-ephG!xF% zcDZCT)TO7(oz_ETkN|vx_8U*foutzw<5zr@S+@HoV~=Hy0B0niuS?Tg3&ozmoA|K` zjBc*k_$BtV(4OrJ^Yg{dW+a6YZ3avL3r=yF2SZ`tl>SLQcB2ne@h-bbqM;E;BU`sI zO7OfBNF5V?P{9(rm4x%hgVf5k;!CSIl{U>jEl@sk`h-bR&IuTUh{5rE zI{f^J6b$NPN7!%&-;od1Z2V>{$E!wZBTEGQ`uTH23^PX;U2O&BBL_r-?p7BwTmHUE zJDtp(=E{Em3VmvJM^ec6ED@_P=D=v{>X>U9w3Z`oDQ2_ zrJlZ^(@g|^OV9$^*JsKg6Mp($d2G>WloA);asb1ZN7hjomwaR=zc1)UMfcZJ)Ht;J z3UdkS>{1<@Ig;G52&&OdN$GIFlTVvdb8GTue9~E+s*kqN^R~lr6?t|wr;7Z+&VhM&cNnl3G3I=>NlCD9L zj_b0U2g+h2l+e`#K2J~dv1PGwf6LX`-IGLyNqEYkpKG zY}Xk?2<8)M_|U|a>~tB{`JDAQG)(q!)%7vLGew}Qal5wuOkM9ReZ#%>D8qvfqWX(^ zfyQN|_BZA9&xs#g4Y-&Y4^Z#msj(0C9u<9m;wp2tM1ub4M3^wj(Er3|3~=spRO_Po zI)5IU0F&p@k%JQQ|Cw(|Zb+wWIAwq`5o-$|$pf6J4wEv}5FAU_woe>H&SV{_?-}4a z*QalnQ9Mb=${@*wiL*ZAQxhY5fV&G$Sl+|p$!={IbYD+fi3I0$E6{2jfIYmmSTl-_ zi4GKPH5F#+CH;w7vXBS#c!&$uVT<}pU;r7nz8T@F&YWC|e~&2eb-W^8e8{i()q;aJ z{M|8h6m_=>2?5aJOv@by!z>6JvTu8$)1;8NVjrYuj++Xtlc}S@WV1lnunxy_Ev^WH zUs%VlTQl>8YT$GWIdq~W<<>qM2mFF$O{xUiNDTf$rd? zQ)mf(Sk0#ewB_QFv(dxC2i!4_PJ{dkr1;|hyD$;O1ERsW2y4rwwn05|{zHUBOxqse z%w{}TqVQ)ivS`lU2GaZvoC9f+#lA*TFSSjRugI!4Q5xhoDN|COT!-t)q9c-O;_z4|vW!ZVLaAw;$bp;o61`2kcHNE9w7Z zstA0rCMb~{c0$xhec6C}Ovy8o`Fl{Na>$#+)xFtmZv-42uNI1;8`K8sNiFJxi}e zbsO%h`|2ypiS|@IZgGEN(@xWNzI*$`-(&jx{rwxBM!OZ0sw-WZi5jRgR2_@?eqWK_ zrB^z3uz}?qWlMXIoV71CvZLPqWsbXl)Ii@5)y_PQ3$dD|@|2p4(37a8T)>Q017?)N zV#U|b@0C|lvd}>$5>WxX`-t^Sa`Pn;?#5ZA$vXu`BZ0>Npt(H^)vRqG8Gn*nbJwef zF3~tyxi7Inkq6C=X-koeH@=j`8I%d@weI1?r`7cspk9ciSrou6ncrX|gEo`M+7p+; zq(#MZcp0H3#I6!vBAg`W#n?;LAcb1ry`{nxY{9WtGNYdXym~Idw{2#AZZ53!yu6(H z!=kcyCTp)9AR_t4Bl`EHZGUKOGa_tsu%PcDh>LOccVW^Ayuq@~xu>+~y-V(;fwUK{ zb0h6~YAm4-7{&PBndDp5PMNtZnUGuVlxQTQ4KZUEc>`}fhS*9^$TLj-Y=wx)6r8_BHvl;3tI&k zc=2_m$@AzoScw!PY?L-Fy!hoN(Y^&@OqmirV|+O%*YeLkoJ~zIf9bz}g~gRJzsgxz z!S!MrFTQK|9yR;l=kcGJr3h%>h{Pq|VB>p=8-6%LPRNg}-cL6s*i1fE^O};WaY(9s z^cdv}4;pxkyV(Dh%W+z)5a&cB>yWe%`g7V3_p4vfI|wP5JA=m&eB}=lrEO0+T@K-nwls ziJ2y7>XFNNJ0mIi7a$nFU5wdA`LIzcc=zvnnpx34a2+3FJf4PgPAa_&$OkA_ZpuKK z8aJk7vA1s4u86P0i>eEnv@sJwsM@f&6l$$g#9^$DMaH3PTro|0Z+HQYLt`4N7 z_cV#EjO!al!qSO3NDx!A7T_F5uYg~z?m>WyoR91WU-mu>^*)|Ni+OpCVk~F<z*b^KGENDi|!at>J|(i;WeYE;7<1R>#^Ky zP_6jh&ic@Dmdb4z6Z9oIQ|1nTlnHW52} zhkH3c3=IuYl-M53v2VV!7|LD8a;vpK~ti&vvEhp1H;T7UZxuR1uj&2E@*ua zSc$4Vq6uc#tJ~Vzcp6{o;AZr=1^8e%6!MnN?n^iZv~)db57UPLZ&`*pnWM3AY=x*#)&%cRNU|qmC)n(33#vR{ zR*qpdd*+n=J~re0rY3gtE7bExRyAoC4W*I2B9SGdl7Quy(f!o8&@eAZ#@`F^N42FH zJ8*mV=5`MVlO}d$)X#{LTPm7jBheNE;thneGSv(Xb*ImuJ`1e+67qQ(&d+W;y|d=< zM#*&41?h(|@S%45O%p%8g-=*!34r4RLxXC)JeknK6UQd$g-*-_`+Dk}C-M-3AdHWOzBh?bzf z4h?2z3!T-w^Z^{&uOidaEKQ?CDQ+oS7jh>4;3{|BW!Z4lwUET!+*m}jwI}ank{x^- z-$6r80n}e&6}zy|UDl_jynhL51nhScY4PzKLSk$Qj8s4Sl}^5)NOqs^b{|I`l#-B5N^wonL+*0|P_q^3_u!YF7|o z0>WSx?D7U7+50&lh>UR8-whoX3t->9`!Q`?*Rc44UPq((GwFY3=s!1){xNRz=_-Jf zG%Q~Nh*KJ!xpQBoLV`!faQoH?kPDqf^i6B?XPS;4McNS}iFT9Ko-(iWP|;EW`km#= z?YWwp8TZ`Bry;Pe;rnilCqe8*&uJyO0>Pq#>}r)7g0CtdD34qA)U5y@XBJfD_aV*W z>cinbs!k%T`S+xao6ogT&KkYTMYyPa7%pF(isF+)?{mBTEJhG;z50r;NU6x%z4ztaR@M0WLY&gko3osG#K))|T6Pp%_RHzmX8OvamTM z4}54U?Z!QzRb^{1LtDwK&8&1)fO(2M7L=?SkyZr?+kzeC-k|WUbMKgt#1RBFXUER$ zLsa$br&{=2*k7q?xv#znh9$yly)^yp<3eoj2tubWJ~~BWOeu2eJQv4`Jf#(K@T~~5 z{_z!c3y}U{CVCSCD*VoC@ z0Ckh3S63!{1*MG>tg&RvaG(w><6=RX_k5z=7Eg=oUP~UA=tV!bnk!P7{ z0*&3}0{803V7BM?)`2e6r`hcD{=!*zwN_>e(-jVK9l5k0!{d+GI|lTS8YIT%+>N*HC0#`BT$ zZA$8Hpm|W-+Ww^1?qu~KV{0C<$X(2FyAX}2)~Trr#d56m;ev69sMNRi_lVYW*G_G` zzkW23@vpUbodp4l!*3JGbBCazK!ojc3ZSoq>EmlS*nBSR3*>EyunK+`W-pc& zSAUH!f6BY^)&jrck6>qoFhm!A7^JHLmZ9arJ=+_P%`0S`2BGJGYW6eGF3v4>v38(k zy&buaoL{(5*N*<`CM4vPsQ|f`4BD+cFZTs9;0;<2mVCOM13E|0RuV_BAu0z8#y_gr zspXT$U8$O;9331S{8&?DjO!zN{sm7PJLPkAh&)))UV3NM;Q5th&0Gf-6At{Go{gb$ z2QeD@5mmk_G{h_DNB>;TUj@T1t}u3%@omTRr_hb+J<`(>&J5O@Po@D5CfD~4BUB_N zEhKDtcCx;ggX=FYCwOT|TQT{E-E*Tkp=0;GkYQ-JdWyN)8^muGj~;J2BsQ%c20dm) zYh_s{y5=~g z>L+@F^88ud3JPd%dzD~KD&m~CjS;PKw&gIZ%KWi~J_GTaPmgoSd8UAW-nHP`{d9RG zYYhOGQ}t_EL>%9}xYkXVtzt;!W3*;Tk7u{Df#XX5oEuhr&(zh;HY-+_9CD%_ zY3`{e%SI~Y!}RVX-7>x}r^p*RkRRdiyw;G9R{N2;&B<~Q)(&sb{q}RFNvjX6%aeBz z#%*#rIu=%D5lHt%LDuE)y{U)5OOqW?9Ogqai&!CErjHzK>C zh^VRIkLSavj!VV93zpwY3q5Ay({gXNaQYHYeh#3uQn+mx9VW9|EkCH?*UQ$_c(xjW zb>YLfk;yDywz&vG6a8*EUs>;qQ)F&p)6s1k(DtW&4&pnyakg=gLg+ysps?ts1ilOO z4^Yo>aIdRPAxd$3^I;tX1whLTz#+{3DT)T!TJv_GRW%uZmx3;|iDaYBz-<06uVB~Y zQH>s)x`$NPac)ngH+pbSv@}VKVQ!%)r5i%0gAPW?Sh{xyxp!E*azeMVzaxdA4W!kZ z>8<(6tqu!`16<8#%=Qzwu{@uR(_YsIB&TdgT`TaQJbThcxz1EN`8y4mq5L?9# zAEZIM?XONe{|d~Yq4-*aPFz1i2ap(?CGBMy0T8Ld*Bf=E(x9EHNYFb0`G}$L75kki zFYIe*nNgK%_saD%w$#k>;~Txy?#&LDQ7)u&FR6xg{Bgq>=4Ru3E0vD<;2r#>oGn9i zZCeWaKkkJD&v>t!JcKa^VwU`3f_1#~E~7;YNi*1fTl+Y+M+q_#3I_nT%3MvpH1&G8 z+B&%frj_Lq8h)7ula8%>_38+cyU3KE48$qSykCSraJ%*myy1a*Mi#3)&<%7h%BbHItLx$g3@; z9rN9cDfw69;<~%Z23b}vF_0t@t_nqqf1&z?SPtv5aH(YKk#2~v<7Z+wVoNoWgqq5@ zi6sgqbts|^Nw2W;AbE2&!_GX5Vb<+?`h_X9ZsD#&+8ukx}>x%4yW<0 z6E!;TIUp`CZ_4@p!v|v|c68lkKliBDb|dbKFu5gLEAArd6z)Wp{AoD}3CEq%rN-O= zjWRA>ZSy)?tqNCJhyF9<`qy4HV9OIlV8p+qPJM>dxwE03hJz~f?xJv@)t8ctuxlNfVS;*_yQB)I7M_Lq{yBs+g8~BQ+kLzN$`z2P=M-0xO{smc--s;_@fiYCT zclBkA-@-&Ttwc`wv>J)Bl(#!8g}{#$$7lk$J0e08ZINkvkd_HS)KusuF)N1UaT|Jn zN=&2nMmcYRJyk+HoGYQxwdO`v(?O5p>wl$oS|qj%8<#Ck>7fval|h8^o#y#gH#{Wn z%x-^bY(kRvW{x|apLoi=e<|$y(@Yhn0x$=B%IG5ZVk&V2Q-yt8nbdK6&%dZuqD`kK>B?J{(o(mTnR2gk6 zx?u%yj97dKeSdCS)|D_)d4so!Jg_Yw0o)4CLuEHV=?>jj`i6+Gu!h%91AYqpimM zV9;fBP|6`A;4BR>4wp(sUo(j1Vighx6IGw?CFgDPx@pu(8Z6<4- z%bIAZ&!iE|3Zs_Y?_GO-vzy;ft{PeGIEqOSxX?oDZ&G5tZ?bli_@jr&{?okPY=`RF zMeoY^Jvn7;zW`$))>{ai#5`d6xG?g6d?Jf5nBlXAGS)0-GEh`5T|K4A~ZX z4hER?i9C?E)D8&kH)nerhqbGh_ga#gzVeRsDOonla3NKq$)dN{>S&X>^TtNhIvIhV zKHBHGADGDYAY7|fgJJ)-2~)8vp>v$H3?ZFDiAJBoN0r5GD=XQq?WuKREA4sgS#K^Q zAu`Bm)ZQgH+G=m=W-I@0UcG_swXw6dZSj?wf{Nj9>FH;k>M8{N7?Ts#DL3q?s1z)% zt$PufCo;C>1IjP`y;o3GEWFD*-U&gk`3JxkMmyTUy8cDAgt=Uf0AC>LbR zqa8Fuj->w3$^O!9KVFn9Dll1D_{UAsy8D~;ZMh#@ZQ5ktYq%?LhmQq^|GGwP%}ulq ztu}FamKlmx*Zfn&;h>|llH7197UiS=c|ru#tNcEhtq&6B1}}TV>$9la3yGW*z)+4~ z^g=D9Awy3h!7ps&zKLcJ*6h(dykg<2{{b+x34Vd$>TWa(v1XmSc;OZItb-Rq2F^kE abMH^Tv(p&fuihKr&q)