forked from Monstermn96/gpt-pilot
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_deepseek_integration.py
145 lines (127 loc) · 4.47 KB
/
test_deepseek_integration.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import asyncio
from core.config import LLMConfig, LLMProvider
from core.llm.convo import Convo
from core.llm.deepseek_client import DeepSeekClient
from core.llm.request_log import LLMRequestStatus
async def test_basic_chat():
print("\nTest 1: Basic chat")
print("-" * 40)
config = LLMConfig(
provider=LLMProvider.DEEPSEEK,
model="deepseek-chat",
api_key="sk-aaa0a2708abf436dbe5d420bd36d68f5",
base_url="https://api.deepseek.com/v1/chat/completions",
temperature=0.7,
extra={
"max_tokens": 8192,
"top_p": 0.95
}
)
client = DeepSeekClient(config)
convo = Convo()
convo.user("Tell me a short joke about Python programming.")
try:
response, request_log = await client(convo)
request_log.status = LLMRequestStatus.SUCCESS
print(f"Response: {response}")
print(f"Tokens used: {request_log.completion_tokens}")
return True
except Exception as e:
print(f"Error: {str(e)}")
return False
async def test_system_message():
print("\nTest 2: System message")
print("-" * 40)
config = LLMConfig(
provider=LLMProvider.DEEPSEEK,
model="deepseek-chat",
api_key="sk-aaa0a2708abf436dbe5d420bd36d68f5",
base_url="https://api.deepseek.com/v1/chat/completions",
temperature=0.7
)
client = DeepSeekClient(config)
convo = Convo()
convo.system("You are a helpful programming teacher who explains concepts clearly and concisely.")
convo.user("Explain what a Python decorator is.")
try:
response, request_log = await client(convo)
request_log.status = LLMRequestStatus.SUCCESS
print(f"Response: {response}")
print(f"Tokens used: {request_log.completion_tokens}")
return True
except Exception as e:
print(f"Error: {str(e)}")
return False
async def test_conversation_memory():
print("\nTest 3: Conversation memory")
print("-" * 40)
config = LLMConfig(
provider=LLMProvider.DEEPSEEK,
model="deepseek-chat",
api_key="sk-aaa0a2708abf436dbe5d420bd36d68f5",
base_url="https://api.deepseek.com/v1/chat/completions",
temperature=0.7
)
client = DeepSeekClient(config)
convo = Convo()
try:
# First message
convo.user("What's the capital of France?")
response1, log1 = await client(convo)
log1.status = LLMRequestStatus.SUCCESS
print(f"First response: {response1}")
# Follow-up question using previous context
convo.user("What's the population of that city?")
response2, log2 = await client(convo)
log2.status = LLMRequestStatus.SUCCESS
print(f"Follow-up response: {response2}")
return True
except Exception as e:
print(f"Error: {str(e)}")
return False
async def test_json_mode():
print("\nTest 4: JSON mode")
print("-" * 40)
config = LLMConfig(
provider=LLMProvider.DEEPSEEK,
model="deepseek-chat",
api_key="sk-aaa0a2708abf436dbe5d420bd36d68f5",
base_url="https://api.deepseek.com/v1/chat/completions",
temperature=0.7
)
client = DeepSeekClient(config)
convo = Convo()
convo.user("Return information about Python in JSON format with fields: name, creator, year_created, and key_features")
try:
response, request_log = await client(convo, json_mode=True)
request_log.status = LLMRequestStatus.SUCCESS
print(f"JSON Response: {response}")
return True
except Exception as e:
print(f"Error: {str(e)}")
return False
async def run_all_tests():
"""Run all integration tests."""
try:
test_results = await asyncio.gather(
test_basic_chat(),
test_system_message(),
test_conversation_memory(),
test_json_mode(),
return_exceptions=True
)
all_passed = all(
result is True if not isinstance(result, Exception) else False
for result in test_results
)
if all_passed:
print("\nAll tests passed successfully!")
else:
print("\nSome tests failed.")
return all_passed
except Exception as e:
print(f"\nTests failed with error: {str(e)}")
return False
if __name__ == "__main__":
success = asyncio.run(run_all_tests())
exit(0 if success else 1)