-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllm.py
115 lines (100 loc) · 3.93 KB
/
llm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import os
import json
from langchain.chains import LLMChain
from langchain_community.chat_models import ChatOpenAI
from langchain_core.prompts import (
PromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_core.messages import SystemMessage
from utils import *
def get_system_message():
message = SystemMessage(content='''
You are an expert in mobile app development and requirements engineering.
You excel at decomposing high-level features into detailed sub-features.
''')
return message
def get_feature_refine_template():
template = '''
**Feature**
```
{feature}: {feature_description}
```
Given the mobile app feature above, please refine it to a list of sub-features.
Ensure that the number of sub-features is 5.
The output should be a list of JSON formatted objects like this:
[{{
"sub-feature": sub-feature,
"description": description
}}]
'''
prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template=template,
input_variables=["feature", "feature_description"],
)
)
prompt_template = ChatPromptTemplate.from_messages([get_system_message(), prompt])
return prompt_template
def get_feature_with_super_feature_refine_template():
template = '''
**Super Feature**
```
super-feature: {super_feature}
description: {super_feature_description}
```
Knowing that the feature "{super_feature}" above is refined into a list of the following features:
```
{sub_features}
```
Please refine the following to a list of sub-features.
Ensure that the number of sub-features is 5.
**Feature**
```
{feature_with_desc}
```
The output should be a list of JSON formatted objects like this:
[{{
"sub-feature": sub-feature,
"description": description
}}]
'''
prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template=template,
input_variables=["feature_with_desc", "super_feature", "super_feature_description", "sub_features"],
)
)
prompt_template = ChatPromptTemplate.from_messages([get_system_message(), prompt])
return prompt_template
class LLM:
def __init__(self, model_name="gpt-4o"):
chat_model = ChatOpenAI(model=model_name, temperature=0, openai_api_key=os.environ["OPENAI_API_KEY"])
self.feature_refine_chain = LLMChain(llm=chat_model, prompt=get_feature_refine_template())
self.feature_with_super_feature_refine_chain = LLMChain(llm=chat_model, prompt=get_feature_with_super_feature_refine_template())
def inspire(self, query):
if all(item in query.keys() for item in ["super_feature", "super_feature_description", "feature", "sibling_features"]):
output = self.feature_with_super_feature_refine_chain.predict(
feature_with_desc=query["feature"] + ":" + query["feature_description"],
super_feature=query["super_feature"],
super_feature_description=query["super_feature_description"],
sub_features=query["sibling_features"]
)
elif "feature" in query:
if "feature_description" not in query: query["feature_description"] = ""
output = self.feature_refine_chain.predict(
feature=query["feature"],
feature_description=query["feature_description"]
)
else:
output = ""
output = select_code_block(output)
json_output = json.loads(output)
return json_output
if __name__ == "__main__":
llm = LLM()
output = llm.inspire(query={"feature": "health monitoring for elderly"})
# output = llm.inspire(query={"feature": "Parking Space Finder", "feature_description": "This app connects drivers with available parking spaces through a marketplace, letting space owners rent their spots or helping users find free and paid parking nearby, while generating revenue through commissions and listing fees."})
output = json.dumps(output, indent=2)
print(output)