forked from twelvelabs-io/tl-jockey
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.py
105 lines (87 loc) · 3.47 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import os
import sys
from typing import Union
from langchain_openai import AzureChatOpenAI, ChatOpenAI
from jockey.jockey_graph import Jockey, build_jockey_graph
from jockey.util import check_environment_variables
def build_jockey(
planner_llm: Union[AzureChatOpenAI, ChatOpenAI],
supervisor_llm: Union[AzureChatOpenAI, ChatOpenAI],
worker_llm: Union[AzureChatOpenAI, ChatOpenAI]) -> Jockey:
"""Convenience function for standing up a local Jockey instance for dev work.
Args:
planner_llm (Union[BaseChatOpenAI | AzureChatOpenAI]):
The LLM used for the planner node. It is recommended this be a GPT-4 class LLM.
supervisor_llm (Union[BaseChatOpenAI | AzureChatOpenAI]):
The LLM used for the supervisor. It is recommended this be a GPT-4 class LLM or better.
worker_llm (Union[BaseChatOpenAI | AzureChatOpenAI]):
The LLM used for the planner node. It is recommended this be a GPT-4 class LLM or better.
Returns:
Jockey: A local Jockey instance.
"""
# Here we load all the required prompts for a Jockey instance.
supervisor_filepath = os.path.join(os.path.dirname(__file__), "prompts", "supervisor.md")
planner_filepath = os.path.join(os.path.dirname(__file__), "prompts", "planner.md")
with open(supervisor_filepath, "r") as supervisor_prompt_file:
supervisor_prompt = supervisor_prompt_file.read()
with open(planner_filepath, "r") as planner_prompt_file:
planner_prompt = planner_prompt_file.read()
return build_jockey_graph(
planner_llm=planner_llm,
planner_prompt=planner_prompt,
supervisor_llm=supervisor_llm,
supervisor_prompt=supervisor_prompt,
worker_llm=worker_llm
)
# Here we construct all the LLMs for a Jockey instance.
# Currently we only support OpenAI LLMs
# Also note the class of LLM used for each component.
# When implementing your own server you can import build_jockey separately or modify this file directly.
# This allows you to choose your own LLMs.
check_environment_variables()
if os.environ["LLM_PROVIDER"] == "AZURE":
planner_llm = AzureChatOpenAI(
deployment_name="gpt-4o",
streaming=True,
temperature=0,
model_version="2024-08-06",
tags=["planner"]
)
supervisor_llm = AzureChatOpenAI(
deployment_name="gpt-4o",
streaming=True,
temperature=0,
model_version="2024-08-06",
tags=["supervisor"]
)
worker_llm = AzureChatOpenAI(
deployment_name="gpt-4o-mini",
streaming=True,
temperature=0,
model_version="2024-07-18",
tags=["worker"]
)
elif os.environ["LLM_PROVIDER"] == "OPENAI":
planner_llm = ChatOpenAI(
model="gpt-4o",
streaming=True,
temperature=0,
tags=["planner"]
)
supervisor_llm = ChatOpenAI(
model="gpt-4o",
streaming=True,
temperature=0,
tags=["supervisor"]
)
worker_llm = ChatOpenAI(
model="gpt-4o-mini-2024-07-18",
streaming=True,
temperature=0,
tags=["worker"]
)
else:
print(f"LLM_PROVIDER environment variable is incorrect. Must be one of: [AZURE, OPENAI] but got {os.environ['LLM_PROVIDER']}")
sys.exit("Incorrect LLM_PROVIDER environment variable.")
# This variable is what is used by the LangGraph API server.
jockey = build_jockey(planner_llm=planner_llm, supervisor_llm=supervisor_llm, worker_llm=worker_llm)