Skip to content

Commit

Permalink
Remove all embedding tests that require downloading the model for fas…
Browse files Browse the repository at this point in the history
…ter testing

Signed-off-by: SimFG <[email protected]>
  • Loading branch information
SimFG committed Apr 2, 2024
1 parent 2088ddd commit d2ad47c
Show file tree
Hide file tree
Showing 27 changed files with 1,589 additions and 1,610 deletions.
28 changes: 13 additions & 15 deletions gptcache/adapter/openai.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,28 @@
import json
from typing import Any, AsyncGenerator, Iterator, List, Mapping
from deprecated import deprecated
from typing import Any, AsyncGenerator, Iterator

from gptcache import cache
from gptcache.adapter.adapter import aadapt, adapt
from gptcache.adapter.base import BaseCacheLLM
from gptcache.adapter.adapter import adapt
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.utils import import_openai, import_pillow
from gptcache.utils import import_openai
from gptcache.utils.error import wrap_error
from gptcache.utils.response import (
get_audio_text_from_openai_answer,
get_image_from_openai_b64,
get_image_from_openai_url,
get_message_from_openai_answer,
# get_audio_text_from_openai_answer,
# get_image_from_openai_b64,
# get_image_from_openai_url,
# get_message_from_openai_answer,
get_message_from_openai_answer2,
get_stream_message_from_openai_answer,
get_stream_message_from_openai_answer2,
get_text_from_openai_answer,
# get_stream_message_from_openai_answer2,
# get_text_from_openai_answer,
)
from gptcache.utils.token import token_counter
from ._util import (
_construct_audio_text_from_cache,
_construct_image_create_resp_from_cache,
# _construct_audio_text_from_cache,
# _construct_image_create_resp_from_cache,
_construct_resp_from_cache,
_construct_stream_resp_from_cache,
_construct_text_from_cache, _num_tokens_from_messages
# _construct_text_from_cache,
_num_tokens_from_messages,
)

import_openai()
Expand Down
11 changes: 9 additions & 2 deletions tests/integration_tests/examples/map/test_example_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,11 @@ def test_map():
)

expect_answer = "receiver the foo 15"
answer = openai.ChatCompletion.create(
from openai import OpenAI
answer = openai.cache_openai_chat_complete(
OpenAI(
api_key="API_KEY",
),
model="gpt-3.5-turbo",
messages=mock_messages,
)
Expand All @@ -48,7 +52,10 @@ def test_map():
data_manager=get_data_manager(data_path=data_file, max_size=10),
next_cache=bak_cache2,
)
answer = openai.ChatCompletion.create(
answer = openai.cache_openai_chat_complete(
OpenAI(
api_key="API_KEY",
),
model="gpt-3.5-turbo",
messages=mock_messages,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,11 @@ def test_sqlite_faiss():
[f"foo{i}" for i in range(10)], [f"receiver the foo {i}" for i in range(10)]
)

answer = openai.ChatCompletion.create(
from openai import OpenAI
answer = openai.cache_openai_chat_complete(
OpenAI(
api_key="API_KEY",
),
model="gpt-3.5-turbo",
messages=mock_messages,
)
Expand All @@ -60,7 +64,10 @@ def test_sqlite_faiss():
similarity_threshold=0,
),
)
answer = openai.ChatCompletion.create(
answer = openai.cache_openai_chat_complete(
OpenAI(
api_key="API_KEY",
),
model="gpt-3.5-turbo",
messages=mock_messages,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,12 @@ def log_time_func(func_name, delta_time):
]

start_time = time.time()
answer = openai.ChatCompletion.create(
from openai import OpenAI
openai_client = OpenAI(
api_key="API_KEY",
)
answer = openai.cache_openai_chat_complete(
openai_client,
model="gpt-3.5-turbo",
messages=mock_messages,
)
Expand All @@ -51,7 +56,8 @@ def log_time_func(func_name, delta_time):

is_exception = False
try:
openai.ChatCompletion.create(
openai.cache_openai_chat_complete(
openai_client,
model="gpt-3.5-turbo",
messages=mock_messages,
cache_factor=100,
Expand All @@ -67,7 +73,8 @@ def log_time_func(func_name, delta_time):
]
is_exception = False
try:
openai.ChatCompletion.create(
openai.cache_openai_chat_complete(
openai_client,
model="gpt-3.5-turbo",
messages=mock_messages,
)
Expand All @@ -78,7 +85,8 @@ def log_time_func(func_name, delta_time):

is_exception = False
try:
openai.ChatCompletion.create(
openai.cache_openai_chat_complete(
openai_client,
model="gpt-3.5-turbo",
messages=mock_messages,
cache_factor=0.5,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@ def test_pre_without_prompt():
[f"receiver the foo {i}" for i in range(10)],
)

answer = openai.ChatCompletion.create(
from openai import OpenAI
answer = openai.cache_openai_chat_complete(
OpenAI(),
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
Expand Down
32 changes: 6 additions & 26 deletions tests/integration_tests/test_redis_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,34 +36,14 @@ def test_redis_sqlite():
)
question = "what's github"
expect_answer = "GitHub is an online platform used primarily for version control and coding collaborations."
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas

response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
cache_obj=redis_cache,
)
redis_cache.data_manager.save(question, expect_answer, redis_cache.embedding_func(question))

assert get_message_from_openai_answer(response) == expect_answer, response

response = openai.ChatCompletion.create(
from openai import OpenAI
response = openai.cache_openai_chat_complete(
OpenAI(
api_key="API_KEY",
),
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
Expand Down
30 changes: 25 additions & 5 deletions tests/integration_tests/test_sqlite_faiss_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,11 @@ def test_no_openai_key(self):

is_exception = False
try:
openai.ChatCompletion.create(
from openai import OpenAI
openai.cache_openai_chat_complete(
OpenAI(
api_key="API_KEY",
),
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
Expand Down Expand Up @@ -114,7 +118,11 @@ def test_hit_default(self):
answer = "chatgpt is a good application"
cache.data_manager.save(question, answer, cache.embedding_func(question))

openai.ChatCompletion.create(
from openai import OpenAI
openai.cache_openai_chat_complete(
OpenAI(
api_key="API_KEY",
),
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
Expand Down Expand Up @@ -147,7 +155,11 @@ def test_hit(self):
answer = "chatgpt is a good application"
cache.data_manager.save(question, answer, cache.embedding_func(question))

openai.ChatCompletion.create(
from openai import OpenAI
openai.cache_openai_chat_complete(
OpenAI(
api_key="API_KEY",
),
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
Expand Down Expand Up @@ -181,7 +193,11 @@ def test_miss(self):

is_exception = False
try:
openai.ChatCompletion.create(
from openai import OpenAI
openai.cache_openai_chat_complete(
OpenAI(
api_key="API_KEY",
),
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
Expand Down Expand Up @@ -221,7 +237,11 @@ def test_disable_cache(self):

is_exception = False
try:
openai.ChatCompletion.create(
from openai import OpenAI
openai.cache_openai_chat_complete(
OpenAI(
api_key="API_KEY",
),
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
Expand Down
Loading

0 comments on commit d2ad47c

Please sign in to comment.