generated from langchain-ai/langchain-nextjs-template
-
Notifications
You must be signed in to change notification settings - Fork 1
/
route.ts
146 lines (133 loc) · 5.16 KB
/
route.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import { NextRequest, NextResponse } from "next/server";
import { Message as VercelChatMessage, StreamingTextResponse } from "ai";
import { AgentExecutor, createOpenAIFunctionsAgent } from "langchain/agents";
import { ChatOpenAI } from "@langchain/openai";
import { SerpAPI } from "@langchain/community/tools/serpapi";
import { Calculator } from "langchain/tools/calculator";
import { AIMessage, ChatMessage, HumanMessage } from "@langchain/core/messages";
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
export const runtime = "edge";
const convertVercelMessageToLangChainMessage = (message: VercelChatMessage) => {
if (message.role === "user") {
return new HumanMessage(message.content);
} else if (message.role === "assistant") {
return new AIMessage(message.content);
} else {
return new ChatMessage(message.content, message.role);
}
};
const AGENT_SYSTEM_TEMPLATE = `You are a talking parrot named Polly. All final responses must be how a talking parrot would respond. Squawk often!`;
/**
* This handler initializes and calls an OpenAI Functions agent.
* See the docs for more information:
*
* https://js.langchain.com/docs/modules/agents/agent_types/openai_functions_agent
*/
export async function POST(req: NextRequest) {
try {
const body = await req.json();
/**
* We represent intermediate steps as system messages for display purposes,
* but don't want them in the chat history.
*/
const messages = (body.messages ?? []).filter(
(message: VercelChatMessage) =>
message.role === "user" || message.role === "assistant",
);
const returnIntermediateSteps = body.show_intermediate_steps;
const previousMessages = messages
.slice(0, -1)
.map(convertVercelMessageToLangChainMessage);
const currentMessageContent = messages[messages.length - 1].content;
// Requires process.env.SERPAPI_API_KEY to be set: https://serpapi.com/
// You can remove this or use a different tool instead.
const tools = [new Calculator(), new SerpAPI()];
const chat = new ChatOpenAI({
modelName: "gpt-3.5-turbo-1106",
temperature: 0,
// IMPORTANT: Must "streaming: true" on OpenAI to enable final output streaming below.
streaming: true,
});
/**
* Based on https://smith.langchain.com/hub/hwchase17/openai-functions-agent
*
* This default prompt for the OpenAI functions agent has a placeholder
* where chat messages get inserted as "chat_history".
*
* You can customize this prompt yourself!
*/
const prompt = ChatPromptTemplate.fromMessages([
["system", AGENT_SYSTEM_TEMPLATE],
new MessagesPlaceholder("chat_history"),
["human", "{input}"],
new MessagesPlaceholder("agent_scratchpad"),
]);
const agent = await createOpenAIFunctionsAgent({
llm: chat,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
// Set this if you want to receive all intermediate steps in the output of .invoke().
returnIntermediateSteps,
});
if (!returnIntermediateSteps) {
/**
* Agent executors also allow you to stream back all generated tokens and steps
* from their runs.
*
* This contains a lot of data, so we do some filtering of the generated log chunks
* and only stream back the final response.
*
* This filtering is easiest with the OpenAI functions or tools agents, since final outputs
* are log chunk values from the model that contain a string instead of a function call object.
*
* See: https://js.langchain.com/docs/modules/agents/how_to/streaming#streaming-tokens
*/
const logStream = await agentExecutor.streamLog({
input: currentMessageContent,
chat_history: previousMessages,
});
const textEncoder = new TextEncoder();
const transformStream = new ReadableStream({
async start(controller) {
for await (const chunk of logStream) {
if (chunk.ops?.length > 0 && chunk.ops[0].op === "add") {
const addOp = chunk.ops[0];
if (
addOp.path.startsWith("/logs/ChatOpenAI") &&
typeof addOp.value === "string" &&
addOp.value.length
) {
controller.enqueue(textEncoder.encode(addOp.value));
}
}
}
controller.close();
},
});
return new StreamingTextResponse(transformStream);
} else {
/**
* Intermediate steps are the default outputs with the executor's `.stream()` method.
* We could also pick them out from `streamLog` chunks.
* They are generated as JSON objects, so streaming them is a bit more complicated.
*/
const result = await agentExecutor.invoke({
input: currentMessageContent,
chat_history: previousMessages,
});
return NextResponse.json(
{ output: result.output, intermediate_steps: result.intermediateSteps },
{ status: 200 },
);
}
} catch (e: any) {
return NextResponse.json({ error: e.message }, { status: e.status ?? 500 });
}
}