Skip to content

Commit

Permalink
Merge pull request #2490 from hlohaus/ccccc
Browse files Browse the repository at this point in the history
Fix docker build and fix api_base issue in OpenaiAPI providers
  • Loading branch information
hlohaus authored Dec 16, 2024
2 parents a45e831 + 75cb616 commit 0332d0d
Show file tree
Hide file tree
Showing 21 changed files with 90 additions and 136 deletions.
26 changes: 13 additions & 13 deletions .github/workflows/publish-workflow.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,19 +57,19 @@ jobs:
username: ${{ github.repository_owner }}
password: ${{ secrets.GHCR_PAT }}

- name: Build and push armv7 image
uses: docker/build-push-action@v5
with:
context: .
file: docker/Dockerfile-armv7
platforms: linux/arm/v7
push: true
tags: |
hlohaus789/g4f:latest-armv7
hlohaus789/g4f:${{ github.ref_name }}-armv7
labels: ${{ steps.metadata.outputs.labels }}
build-args: |
G4F_VERSION=${{ github.ref_name }}
# - name: Build and push armv7 image
# uses: docker/build-push-action@v5
# with:
# context: .
# file: docker/Dockerfile-armv7
# platforms: linux/arm/v7
# push: true
# tags: |
# hlohaus789/g4f:latest-armv7
# hlohaus789/g4f:${{ github.ref_name }}-armv7
# labels: ${{ steps.metadata.outputs.labels }}
# build-args: |
# G4F_VERSION=${{ github.ref_name }}

- name: Build and push small images
uses: docker/build-push-action@v5
Expand Down
2 changes: 1 addition & 1 deletion etc/tool/copilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
GITHUB_REPOSITORY = os.getenv('GITHUB_REPOSITORY')
G4F_PROVIDER = os.getenv('G4F_PROVIDER')
G4F_MODEL = os.getenv('G4F_MODEL') or g4f.models.default
G4F_MODEL = os.getenv('G4F_MODEL') or g4f.models.gpt_4

def get_pr_details(github: Github) -> PullRequest:
"""
Expand Down
4 changes: 2 additions & 2 deletions g4f/Provider/Airforce.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ async def generate_image(
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
"Accept": "image/avif,image/webp,image/png,image/svg+xml,image/*;q=0.8,*/*;q=0.5",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br, zstd",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
Expand Down Expand Up @@ -192,7 +192,7 @@ async def generate_text(
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
"Accept": "application/json, text/event-stream",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br, zstd",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
Expand Down
4 changes: 2 additions & 2 deletions g4f/Provider/Cloudflare.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,9 @@ async def create_async_generator(
cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
try:
await raise_for_status(response)
except ResponseStatusError as e:
except ResponseStatusError:
cls._args = None
raise e
raise
async for line in response.iter_lines():
if line.startswith(b'data: '):
if line == b'data: [DONE]':
Expand Down
4 changes: 2 additions & 2 deletions g4f/Provider/DeepInfraChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ class DeepInfraChat(OpenaiAPI):
label = "DeepInfra Chat"
url = "https://deepinfra.com/chat"
working = True
api_base = "https://api.deepinfra.com/v1/openai"

default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
models = [
Expand Down Expand Up @@ -34,7 +35,6 @@ def create_async_generator(
model: str,
messages: Messages,
proxy: str = None,
api_base: str = "https://api.deepinfra.com/v1/openai",
**kwargs
) -> AsyncResult:
headers = {
Expand All @@ -46,4 +46,4 @@ def create_async_generator(
'X-Deepinfra-Source': 'web-page',
'accept': 'text/event-stream',
}
return super().create_async_generator(model, messages, proxy, api_base=api_base, headers=headers, **kwargs)
return super().create_async_generator(model, messages, proxy, headers=headers, **kwargs)
22 changes: 11 additions & 11 deletions g4f/Provider/PollinationsAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,14 @@
class PollinationsAI(OpenaiAPI):
label = "Pollinations AI"
url = "https://pollinations.ai"

working = True
needs_auth = False
supports_stream = True

api_base = "https://text.pollinations.ai/openai"

default_model = "openai"

additional_models_image = ["midjourney", "dall-e-3"]
additional_models_text = ["sur", "sur-mistral", "claude"]

model_aliases = {
"gpt-4o": "openai",
"mistral-nemo": "mistral",
Expand Down Expand Up @@ -66,7 +64,6 @@ async def create_async_generator(
model: str,
messages: Messages,
prompt: str = None,
api_base: str = "https://text.pollinations.ai/openai",
api_key: str = None,
proxy: str = None,
seed: str = None,
Expand All @@ -76,25 +73,28 @@ async def create_async_generator(
) -> AsyncResult:
model = cls.get_model(model)
if model in cls.image_models:
async for response in cls._generate_image(model, messages, prompt, seed, width, height):
async for response in cls._generate_image(model, messages, prompt, proxy, seed, width, height):
yield response
elif model in cls.models:
async for response in cls._generate_text(model, messages, api_base, api_key, proxy, **kwargs):
async for response in cls._generate_text(model, messages, api_key, proxy, **kwargs):
yield response
else:
raise ValueError(f"Unknown model: {model}")

@classmethod
async def _generate_image(cls, model: str, messages: Messages, prompt: str = None, seed: str = None, width: int = 1024, height: int = 1024):
async def _generate_image(cls, model: str, messages: Messages, prompt: str = None, proxy: str = None, seed: str = None, width: int = 1024, height: int = 1024):
if prompt is None:
prompt = messages[-1]["content"]
if seed is None:
seed = random.randint(0, 100000)
image = f"https://image.pollinations.ai/prompt/{quote(prompt)}?width={width}&height={height}&seed={int(seed)}&nofeed=true&nologo=true&model={quote(model)}"
async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session:
async with session.get(image) as response:
await raise_for_status(response)
yield ImageResponse(image, prompt)

@classmethod
async def _generate_text(cls, model: str, messages: Messages, api_base: str, api_key: str = None, proxy: str = None, **kwargs):
async def _generate_text(cls, model: str, messages: Messages, api_key: str = None, proxy: str = None, **kwargs):
if api_key is None:
async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session:
prompt = format_prompt(messages)
Expand All @@ -104,6 +104,6 @@ async def _generate_text(cls, model: str, messages: Messages, api_base: str, api
yield line.decode(errors="ignore")
else:
async for chunk in super().create_async_generator(
model, messages, api_base=api_base, proxy=proxy, **kwargs
model, messages, proxy=proxy, **kwargs
):
yield chunk
yield chunk
4 changes: 2 additions & 2 deletions g4f/Provider/local/Ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,10 @@ def create_async_generator(
api_base: str = None,
**kwargs
) -> AsyncResult:
if not api_base:
if api_base is None:
host = os.getenv("OLLAMA_HOST", "localhost")
port = os.getenv("OLLAMA_PORT", "11434")
api_base: str = f"http://{host}:{port}/v1"
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
)
)
2 changes: 0 additions & 2 deletions g4f/Provider/needs_auth/Cerebras.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ async def create_async_generator(
cls,
model: str,
messages: Messages,
api_base: str = api_base,
api_key: str = None,
cookies: Cookies = None,
**kwargs
Expand All @@ -41,7 +40,6 @@ async def create_async_generator(
api_key = data.get("user", {}).get("demoApiKey")
async for chunk in super().create_async_generator(
model, messages,
api_base=api_base,
impersonate="chrome",
api_key=api_key,
headers={
Expand Down
3 changes: 1 addition & 2 deletions g4f/Provider/needs_auth/CopilotAccount.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
from __future__ import annotations

from ..base_provider import ProviderModelMixin
from ..Copilot import Copilot

class CopilotAccount(Copilot, ProviderModelMixin):
class CopilotAccount(Copilot):
needs_auth = True
parent = "Copilot"
default_model = "Copilot"
Expand Down
3 changes: 1 addition & 2 deletions g4f/Provider/needs_auth/DeepInfra.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ class DeepInfra(OpenaiAPI):
label = "DeepInfra"
url = "https://deepinfra.com"
working = True
api_base = "https://api.deepinfra.com/v1/openai",
needs_auth = True
supports_stream = True
supports_message_history = True
Expand All @@ -27,7 +28,6 @@ def create_async_generator(
model: str,
messages: Messages,
stream: bool,
api_base: str = "https://api.deepinfra.com/v1/openai",
temperature: float = 0.7,
max_tokens: int = 1028,
**kwargs
Expand All @@ -50,7 +50,6 @@ def create_async_generator(
return super().create_async_generator(
model, messages,
stream=stream,
api_base=api_base,
temperature=temperature,
max_tokens=max_tokens,
headers=headers,
Expand Down
15 changes: 1 addition & 14 deletions g4f/Provider/needs_auth/Groq.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from __future__ import annotations

from .OpenaiAPI import OpenaiAPI
from ...typing import AsyncResult, Messages

class Groq(OpenaiAPI):
label = "Groq"
Expand Down Expand Up @@ -29,16 +28,4 @@ class Groq(OpenaiAPI):
"whisper-large-v3",
"whisper-large-v3-turbo",
]
model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"}

@classmethod
def create_async_generator(
cls,
model: str,
messages: Messages,
api_base: str = api_base,
**kwargs
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
)
model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"}
16 changes: 1 addition & 15 deletions g4f/Provider/needs_auth/HuggingFaceAPI.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

from .OpenaiAPI import OpenaiAPI
from .HuggingChat import HuggingChat
from ...typing import AsyncResult, Messages

class HuggingFaceAPI(OpenaiAPI):
label = "HuggingFace (Inference API)"
Expand All @@ -13,17 +12,4 @@ class HuggingFaceAPI(OpenaiAPI):
default_vision_model = default_model
models = [
*HuggingChat.models
]

@classmethod
def create_async_generator(
cls,
model: str,
messages: Messages,
api_base: str = api_base,
max_tokens: int = 500,
**kwargs
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs
)
]
12 changes: 9 additions & 3 deletions g4f/Provider/needs_auth/OpenaiAPI.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,12 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
fallback_models = []

@classmethod
def get_models(cls, api_key: str = None, api_base: str = api_base) -> list[str]:
def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
if not cls.models:
try:
headers = {}
if api_base is None:
api_base = cls.api_base
if api_key is not None:
headers["authorization"] = f"Bearer {api_key}"
response = requests.get(f"{api_base}/models", headers=headers)
Expand All @@ -48,7 +50,7 @@ async def create_async_generator(
timeout: int = 120,
images: ImagesType = None,
api_key: str = None,
api_base: str = api_base,
api_base: str = None,
temperature: float = None,
max_tokens: int = None,
top_p: float = None,
Expand All @@ -61,6 +63,8 @@ async def create_async_generator(
) -> AsyncResult:
if cls.needs_auth and api_key is None:
raise MissingAuthError('Add a "api_key"')
if api_base is None:
api_base = cls.api_base
if images is not None:
if not model and hasattr(cls, "default_vision_model"):
model = cls.default_vision_model
Expand Down Expand Up @@ -134,8 +138,10 @@ def raise_error(data: dict):
elif "error" in data:
if "code" in data["error"]:
raise ResponseError(f'Error {data["error"]["code"]}: {data["error"]["message"]}')
else:
elif "message" in data["error"]:
raise ResponseError(data["error"]["message"])
else:
raise ResponseError(data["error"])

@classmethod
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
Expand Down
2 changes: 1 addition & 1 deletion g4f/Provider/needs_auth/OpenaiChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ async def create_async_generator(
async for line in response.iter_lines():
async for chunk in cls.iter_messages_line(session, line, conversation):
yield chunk
if not history_disabled and RequestConfig.access_token is not None:
if not history_disabled and cls._api_key is not None:
yield SynthesizeData(cls.__name__, {
"conversation_id": conversation.conversation_id,
"message_id": conversation.message_id,
Expand Down
16 changes: 2 additions & 14 deletions g4f/Provider/needs_auth/PerplexityApi.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from __future__ import annotations

from .OpenaiAPI import OpenaiAPI
from ...typing import AsyncResult, Messages

class PerplexityApi(OpenaiAPI):
label = "Perplexity API"
url = "https://www.perplexity.ai"
working = True
api_base = "https://api.perplexity.ai"
default_model = "llama-3-sonar-large-32k-online"
models = [
"llama-3-sonar-small-32k-chat",
Expand All @@ -15,16 +15,4 @@ class PerplexityApi(OpenaiAPI):
"llama-3-sonar-large-32k-online",
"llama-3-8b-instruct",
"llama-3-70b-instruct",
]

@classmethod
def create_async_generator(
cls,
model: str,
messages: Messages,
api_base: str = "https://api.perplexity.ai",
**kwargs
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
)
]
4 changes: 2 additions & 2 deletions g4f/Provider/needs_auth/ThebApi.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ class ThebApi(OpenaiAPI):
label = "TheB.AI API"
url = "https://theb.ai"
working = True
api_base = "https://api.theb.ai/v1"
needs_auth = True
default_model = "gpt-3.5-turbo"
models = list(models)
Expand All @@ -40,7 +41,6 @@ def create_async_generator(
cls,
model: str,
messages: Messages,
api_base: str = "https://api.theb.ai/v1",
temperature: float = 1,
top_p: float = 1,
**kwargs
Expand All @@ -58,4 +58,4 @@ def create_async_generator(
"top_p": top_p,
}
}
return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs)
return super().create_async_generator(model, messages, extra_data=data, **kwargs)
Loading

0 comments on commit 0332d0d

Please sign in to comment.