-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathchatresponsetest.py
68 lines (53 loc) · 2.3 KB
/
chatresponsetest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.schema import HumanMessage, AIMessage
from apikey import apikey
def initialize_openai_model(api_key):
# Initialize the OpenAI model with your API key
# Here, we use the `OpenAI` class from Langchain
return OpenAI(openai_api_key=api_key)
def generate_response(user_input, past_convo, model):
# Construct a conversation chain with the model, prompt, and memory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
prompt = ChatPromptTemplate(
messages=[
SystemMessagePromptTemplate.from_template(
"You are a helpful government assistant."
),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template("{question}"),
]
)
# Create an LLM chain with the model, prompt, and memory
conversation = LLMChain(llm=model, prompt=prompt, memory=memory)
# Update memory with past conversation
for exchange in past_convo:
memory.save_context({"input": exchange["user"]}, {"output": exchange["AI"]})
# Run the conversation chain
response = conversation({"question": user_input})
# Extract only the AI's response from the 'text' field
ai_response = response['text'].split('\n')[-1]
# If the response contains the "AI:" prefix, remove it
if ai_response.startswith("AI: "):
ai_response = ai_response[4:]
return ai_response
def main():
api_key = apikey # Replace with your actual API key
model = initialize_openai_model(api_key)
past_convo = [
{"user": "Query 1", "AI": "Answer 1"},
{"user": "Query 2", "AI": "Answer 2"},
{"user": "Query 3", "AI": "Answer 3"}
]
while True:
user_input = input("User: ")
if user_input.lower() == 'exit':
break
response = generate_response(user_input, past_convo, model)
print("AI:", response)
# Update the conversation history
past_convo.append({"user": user_input, "AI": response})
if __name__ == "__main__":
main()