Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

openai_adapter支持usage #838

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .python-version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3.10.9
1 change: 1 addition & 0 deletions python/.python-version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3.10.9
1 change: 0 additions & 1 deletion python/qianfan/dataset/stress_test/load_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
QianfanLocustRunner
"""


import logging
import os
import time
Expand Down
1 change: 1 addition & 0 deletions python/qianfan/dataset/stress_test/yame/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
locust runner lib
"""

from typing import Any, Dict


Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
yame plugins
"""

from typing import Any

from locust import events
Expand Down
1 change: 1 addition & 0 deletions python/qianfan/dataset/stress_test/yame/users/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
yame users
"""

from qianfan.dataset.stress_test.yame.users.custom_user import CustomUser

__all__ = ["CustomUser"]
43 changes: 36 additions & 7 deletions python/qianfan/extensions/openai/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,12 @@ def add_if_exist(openai_key: str, qianfan_key: Optional[str] = None) -> None:
if not isinstance(response_format, str):
response_format = response_format["type"]
qianfan_request["response_format"] = response_format
if "stream_options" in openai_request:
stream_options = openai_request["stream_options"]
qianfan_request["stream"] = True
if not isinstance(stream_options, str):
stream_options = stream_options["include_usage"]
qianfan_request["stream_options"] = stream_options
return qianfan_request

def openai_chat_request_to_qianfan(
Expand Down Expand Up @@ -464,6 +470,8 @@ async def task(n: int) -> AsyncIterator[Tuple[int, QfResponse]]:
tasks = [task(i) for i in range(n)]
results = merge_async_iters(*tasks)
base = None
total_prompt_tokens = 0
total_completion_tokens = 0
async for i, res in results:
if base is None:
base = {
Expand Down Expand Up @@ -513,11 +521,28 @@ async def task(n: int) -> AsyncIterator[Tuple[int, QfResponse]]:
choices[0]["delta"]["finish_reason"] = "tool_calls"
choices[0]["delta"]["function_call"] = res["function_call"]

if res["is_end"] and "usage" in res:
total_prompt_tokens += res["usage"]["prompt_tokens"]
total_completion_tokens += res["usage"]["completion_tokens"]

yield {
"choices": choices,
**base,
}

# 在流式响应结束后,添加usage信息
base = base or {}
if total_prompt_tokens > 0 or total_completion_tokens > 0:
yield {
"choices": [],
"usage": {
"prompt_tokens": total_prompt_tokens,
"completion_tokens": total_completion_tokens,
"total_tokens": total_prompt_tokens + total_completion_tokens,
},
**base,
}

async def _completion_stream(
self, n: int, openai_request: OpenAIRequest, qianfan_request: QianfanRequest
) -> AsyncIterator[OpenAIResponse]:
Expand All @@ -536,13 +561,17 @@ async def task(n: int) -> AsyncIterator[Tuple[int, QfResponse]]:
base = None
async for i, res in results:
if base is None:
base = {
"id": res["id"],
"created": res["created"],
"model": openai_request["model"],
"system_fingerprint": "fp_?",
"object": "text_completion",
}
base = (
{
"id": res["id"],
"created": res["created"],
"model": openai_request["model"],
"system_fingerprint": "fp_?",
"object": "text_completion",
}
if base is None
else base
)
for j in range(n):
yield {
"choices": [
Expand Down
Loading