From 5b76f2cde01e063dfdf6ed645cae5341ffb78026 Mon Sep 17 00:00:00 2001
From: Heiner Lohaus
Date: Mon, 16 Dec 2024 19:07:41 +0100
Subject: [PATCH 1/2] Fix docker build and fix api_base issue in OpenaiAPI
providers
---
.github/workflows/publish-workflow.yaml | 26 +++++------
g4f/Provider/Airforce.py | 4 +-
g4f/Provider/Cloudflare.py | 4 +-
g4f/Provider/DeepInfraChat.py | 4 +-
g4f/Provider/local/Ollama.py | 4 +-
g4f/Provider/needs_auth/Cerebras.py | 2 -
g4f/Provider/needs_auth/CopilotAccount.py | 3 +-
g4f/Provider/needs_auth/DeepInfra.py | 3 +-
g4f/Provider/needs_auth/Groq.py | 15 +-----
g4f/Provider/needs_auth/HuggingFaceAPI.py | 16 +------
g4f/Provider/needs_auth/OpenaiAPI.py | 12 +++--
g4f/Provider/needs_auth/OpenaiChat.py | 2 +-
g4f/Provider/needs_auth/PerplexityApi.py | 16 +------
g4f/Provider/needs_auth/ThebApi.py | 4 +-
g4f/Provider/needs_auth/xAI.py | 15 +-----
g4f/gui/client/static/css/style.css | 6 +--
g4f/gui/client/static/js/chat.v1.js | 56 +++++++++++++----------
g4f/requests/__init__.py | 6 +--
18 files changed, 77 insertions(+), 121 deletions(-)
diff --git a/.github/workflows/publish-workflow.yaml b/.github/workflows/publish-workflow.yaml
index 6b8112f122e..0fe7ce6ec0a 100644
--- a/.github/workflows/publish-workflow.yaml
+++ b/.github/workflows/publish-workflow.yaml
@@ -57,19 +57,19 @@ jobs:
username: ${{ github.repository_owner }}
password: ${{ secrets.GHCR_PAT }}
- - name: Build and push armv7 image
- uses: docker/build-push-action@v5
- with:
- context: .
- file: docker/Dockerfile-armv7
- platforms: linux/arm/v7
- push: true
- tags: |
- hlohaus789/g4f:latest-armv7
- hlohaus789/g4f:${{ github.ref_name }}-armv7
- labels: ${{ steps.metadata.outputs.labels }}
- build-args: |
- G4F_VERSION=${{ github.ref_name }}
+ # - name: Build and push armv7 image
+ # uses: docker/build-push-action@v5
+ # with:
+ # context: .
+ # file: docker/Dockerfile-armv7
+ # platforms: linux/arm/v7
+ # push: true
+ # tags: |
+ # hlohaus789/g4f:latest-armv7
+ # hlohaus789/g4f:${{ github.ref_name }}-armv7
+ # labels: ${{ steps.metadata.outputs.labels }}
+ # build-args: |
+ # G4F_VERSION=${{ github.ref_name }}
- name: Build and push small images
uses: docker/build-push-action@v5
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 42c654dd130..6f55834c838 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -158,7 +158,7 @@ async def generate_image(
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
"Accept": "image/avif,image/webp,image/png,image/svg+xml,image/*;q=0.8,*/*;q=0.5",
"Accept-Language": "en-US,en;q=0.5",
- "Accept-Encoding": "gzip, deflate, br, zstd",
+ "Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
@@ -192,7 +192,7 @@ async def generate_text(
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
"Accept": "application/json, text/event-stream",
"Accept-Language": "en-US,en;q=0.5",
- "Accept-Encoding": "gzip, deflate, br, zstd",
+ "Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py
index 4416f7a315e..08a4bf0d3c1 100644
--- a/g4f/Provider/Cloudflare.py
+++ b/g4f/Provider/Cloudflare.py
@@ -79,9 +79,9 @@ async def create_async_generator(
cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
try:
await raise_for_status(response)
- except ResponseStatusError as e:
+ except ResponseStatusError:
cls._args = None
- raise e
+ raise
async for line in response.iter_lines():
if line.startswith(b'data: '):
if line == b'data: [DONE]':
diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py
index 5f3e68e5bca..6453d1673e0 100644
--- a/g4f/Provider/DeepInfraChat.py
+++ b/g4f/Provider/DeepInfraChat.py
@@ -7,6 +7,7 @@ class DeepInfraChat(OpenaiAPI):
label = "DeepInfra Chat"
url = "https://deepinfra.com/chat"
working = True
+ api_base = "https://api.deepinfra.com/v1/openai"
default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
models = [
@@ -34,7 +35,6 @@ def create_async_generator(
model: str,
messages: Messages,
proxy: str = None,
- api_base: str = "https://api.deepinfra.com/v1/openai",
**kwargs
) -> AsyncResult:
headers = {
@@ -46,4 +46,4 @@ def create_async_generator(
'X-Deepinfra-Source': 'web-page',
'accept': 'text/event-stream',
}
- return super().create_async_generator(model, messages, proxy, api_base=api_base, headers=headers, **kwargs)
\ No newline at end of file
+ return super().create_async_generator(model, messages, proxy, headers=headers, **kwargs)
\ No newline at end of file
diff --git a/g4f/Provider/local/Ollama.py b/g4f/Provider/local/Ollama.py
index de68a21804a..3103baf44d9 100644
--- a/g4f/Provider/local/Ollama.py
+++ b/g4f/Provider/local/Ollama.py
@@ -31,10 +31,10 @@ def create_async_generator(
api_base: str = None,
**kwargs
) -> AsyncResult:
- if not api_base:
+ if api_base is None:
host = os.getenv("OLLAMA_HOST", "localhost")
port = os.getenv("OLLAMA_PORT", "11434")
api_base: str = f"http://{host}:{port}/v1"
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
- )
+ )
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Cerebras.py b/g4f/Provider/needs_auth/Cerebras.py
index 86b2dcbda99..d981c70a14e 100644
--- a/g4f/Provider/needs_auth/Cerebras.py
+++ b/g4f/Provider/needs_auth/Cerebras.py
@@ -25,7 +25,6 @@ async def create_async_generator(
cls,
model: str,
messages: Messages,
- api_base: str = api_base,
api_key: str = None,
cookies: Cookies = None,
**kwargs
@@ -41,7 +40,6 @@ async def create_async_generator(
api_key = data.get("user", {}).get("demoApiKey")
async for chunk in super().create_async_generator(
model, messages,
- api_base=api_base,
impersonate="chrome",
api_key=api_key,
headers={
diff --git a/g4f/Provider/needs_auth/CopilotAccount.py b/g4f/Provider/needs_auth/CopilotAccount.py
index 0dfb9bd4f57..6d7964d1a0a 100644
--- a/g4f/Provider/needs_auth/CopilotAccount.py
+++ b/g4f/Provider/needs_auth/CopilotAccount.py
@@ -1,9 +1,8 @@
from __future__ import annotations
-from ..base_provider import ProviderModelMixin
from ..Copilot import Copilot
-class CopilotAccount(Copilot, ProviderModelMixin):
+class CopilotAccount(Copilot):
needs_auth = True
parent = "Copilot"
default_model = "Copilot"
diff --git a/g4f/Provider/needs_auth/DeepInfra.py b/g4f/Provider/needs_auth/DeepInfra.py
index 035effb072c..3b5b6227fd6 100644
--- a/g4f/Provider/needs_auth/DeepInfra.py
+++ b/g4f/Provider/needs_auth/DeepInfra.py
@@ -8,6 +8,7 @@ class DeepInfra(OpenaiAPI):
label = "DeepInfra"
url = "https://deepinfra.com"
working = True
+ api_base = "https://api.deepinfra.com/v1/openai",
needs_auth = True
supports_stream = True
supports_message_history = True
@@ -27,7 +28,6 @@ def create_async_generator(
model: str,
messages: Messages,
stream: bool,
- api_base: str = "https://api.deepinfra.com/v1/openai",
temperature: float = 0.7,
max_tokens: int = 1028,
**kwargs
@@ -50,7 +50,6 @@ def create_async_generator(
return super().create_async_generator(
model, messages,
stream=stream,
- api_base=api_base,
temperature=temperature,
max_tokens=max_tokens,
headers=headers,
diff --git a/g4f/Provider/needs_auth/Groq.py b/g4f/Provider/needs_auth/Groq.py
index e9f3fad948c..0ccc39cf3ca 100644
--- a/g4f/Provider/needs_auth/Groq.py
+++ b/g4f/Provider/needs_auth/Groq.py
@@ -1,7 +1,6 @@
from __future__ import annotations
from .OpenaiAPI import OpenaiAPI
-from ...typing import AsyncResult, Messages
class Groq(OpenaiAPI):
label = "Groq"
@@ -29,16 +28,4 @@ class Groq(OpenaiAPI):
"whisper-large-v3",
"whisper-large-v3-turbo",
]
- model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"}
-
- @classmethod
- def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- api_base: str = api_base,
- **kwargs
- ) -> AsyncResult:
- return super().create_async_generator(
- model, messages, api_base=api_base, **kwargs
- )
\ No newline at end of file
+ model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"}
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py
index 661491b24a1..1ea65f25ec7 100644
--- a/g4f/Provider/needs_auth/HuggingFaceAPI.py
+++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py
@@ -2,7 +2,6 @@
from .OpenaiAPI import OpenaiAPI
from .HuggingChat import HuggingChat
-from ...typing import AsyncResult, Messages
class HuggingFaceAPI(OpenaiAPI):
label = "HuggingFace (Inference API)"
@@ -13,17 +12,4 @@ class HuggingFaceAPI(OpenaiAPI):
default_vision_model = default_model
models = [
*HuggingChat.models
- ]
-
- @classmethod
- def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- api_base: str = api_base,
- max_tokens: int = 500,
- **kwargs
- ) -> AsyncResult:
- return super().create_async_generator(
- model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs
- )
+ ]
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenaiAPI.py b/g4f/Provider/needs_auth/OpenaiAPI.py
index a61115eaab9..37b7cf62b4d 100644
--- a/g4f/Provider/needs_auth/OpenaiAPI.py
+++ b/g4f/Provider/needs_auth/OpenaiAPI.py
@@ -23,10 +23,12 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
fallback_models = []
@classmethod
- def get_models(cls, api_key: str = None, api_base: str = api_base) -> list[str]:
+ def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
if not cls.models:
try:
headers = {}
+ if api_base is None:
+ api_base = cls.api_base
if api_key is not None:
headers["authorization"] = f"Bearer {api_key}"
response = requests.get(f"{api_base}/models", headers=headers)
@@ -48,7 +50,7 @@ async def create_async_generator(
timeout: int = 120,
images: ImagesType = None,
api_key: str = None,
- api_base: str = api_base,
+ api_base: str = None,
temperature: float = None,
max_tokens: int = None,
top_p: float = None,
@@ -61,6 +63,8 @@ async def create_async_generator(
) -> AsyncResult:
if cls.needs_auth and api_key is None:
raise MissingAuthError('Add a "api_key"')
+ if api_base is None:
+ api_base = cls.api_base
if images is not None:
if not model and hasattr(cls, "default_vision_model"):
model = cls.default_vision_model
@@ -134,8 +138,10 @@ def raise_error(data: dict):
elif "error" in data:
if "code" in data["error"]:
raise ResponseError(f'Error {data["error"]["code"]}: {data["error"]["message"]}')
- else:
+ elif "message" in data["error"]:
raise ResponseError(data["error"]["message"])
+ else:
+ raise ResponseError(data["error"])
@classmethod
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 0e25a28d8fa..b64f8cd9893 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -438,7 +438,7 @@ async def create_async_generator(
async for line in response.iter_lines():
async for chunk in cls.iter_messages_line(session, line, conversation):
yield chunk
- if not history_disabled and RequestConfig.access_token is not None:
+ if not history_disabled and cls._api_key is not None:
yield SynthesizeData(cls.__name__, {
"conversation_id": conversation.conversation_id,
"message_id": conversation.message_id,
diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py
index 85d7cc98aa6..0e7726f3b27 100644
--- a/g4f/Provider/needs_auth/PerplexityApi.py
+++ b/g4f/Provider/needs_auth/PerplexityApi.py
@@ -1,12 +1,12 @@
from __future__ import annotations
from .OpenaiAPI import OpenaiAPI
-from ...typing import AsyncResult, Messages
class PerplexityApi(OpenaiAPI):
label = "Perplexity API"
url = "https://www.perplexity.ai"
working = True
+ api_base = "https://api.perplexity.ai"
default_model = "llama-3-sonar-large-32k-online"
models = [
"llama-3-sonar-small-32k-chat",
@@ -15,16 +15,4 @@ class PerplexityApi(OpenaiAPI):
"llama-3-sonar-large-32k-online",
"llama-3-8b-instruct",
"llama-3-70b-instruct",
- ]
-
- @classmethod
- def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- api_base: str = "https://api.perplexity.ai",
- **kwargs
- ) -> AsyncResult:
- return super().create_async_generator(
- model, messages, api_base=api_base, **kwargs
- )
+ ]
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py
index 2006f7ad482..845a920dc96 100644
--- a/g4f/Provider/needs_auth/ThebApi.py
+++ b/g4f/Provider/needs_auth/ThebApi.py
@@ -31,6 +31,7 @@ class ThebApi(OpenaiAPI):
label = "TheB.AI API"
url = "https://theb.ai"
working = True
+ api_base = "https://api.theb.ai/v1"
needs_auth = True
default_model = "gpt-3.5-turbo"
models = list(models)
@@ -40,7 +41,6 @@ def create_async_generator(
cls,
model: str,
messages: Messages,
- api_base: str = "https://api.theb.ai/v1",
temperature: float = 1,
top_p: float = 1,
**kwargs
@@ -58,4 +58,4 @@ def create_async_generator(
"top_p": top_p,
}
}
- return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs)
+ return super().create_async_generator(model, messages, extra_data=data, **kwargs)
diff --git a/g4f/Provider/needs_auth/xAI.py b/g4f/Provider/needs_auth/xAI.py
index 0ffeff3bda5..fddaefc192d 100644
--- a/g4f/Provider/needs_auth/xAI.py
+++ b/g4f/Provider/needs_auth/xAI.py
@@ -1,22 +1,9 @@
from __future__ import annotations
from .OpenaiAPI import OpenaiAPI
-from ...typing import AsyncResult, Messages
class xAI(OpenaiAPI):
label = "xAI"
url = "https://console.x.ai"
api_base = "https://api.x.ai/v1"
- working = True
-
- @classmethod
- def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- api_base: str = api_base,
- **kwargs
- ) -> AsyncResult:
- return super().create_async_generator(
- model, messages, api_base=api_base, **kwargs
- )
\ No newline at end of file
+ working = True
\ No newline at end of file
diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css
index a499779a644..16e92ae4fec 100644
--- a/g4f/gui/client/static/css/style.css
+++ b/g4f/gui/client/static/css/style.css
@@ -93,8 +93,9 @@ body {
height: 100vh;
}
-a:-webkit-any-link {
- color: var(--accent);
+body:not(.white) a:link,
+body:not(.white) a:visited{
+ color: var(--colour-3);
}
.row {
@@ -380,7 +381,6 @@ body.white .gradient{
.message .content_inner a:visited{
font-size: 15px;
line-height: 1.3;
- color: var(--colour-3);
}
.message .content_inner pre{
white-space: pre-wrap;
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 222886e9696..daebef2f3e7 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -513,21 +513,7 @@ async function add_message_chunk(message, message_id) {
content_map.inner.innerHTML = markdown_render(message.preview);
} else if (message.type == "content") {
message_storage[message_id] += message.content;
- html = markdown_render(message_storage[message_id]);
- let lastElement, lastIndex = null;
- for (element of ['
', '', '\n\n', '\n', '\n']) {
- const index = html.lastIndexOf(element)
- if (index - element.length > lastIndex) {
- lastElement = element;
- lastIndex = index;
- }
- }
- if (lastIndex) {
- html = html.substring(0, lastIndex) + '' + lastElement;
- }
- content_map.inner.innerHTML = html;
- content_map.count.innerText = count_words_and_tokens(message_storage[message_id], provider_storage[message_id]?.model);
- highlight(content_map.inner);
+ update_message(content_map, message_id);
content_map.inner.style.height = "";
} else if (message.type == "log") {
let p = document.createElement("p");
@@ -536,16 +522,6 @@ async function add_message_chunk(message, message_id) {
} else if (message.type == "synthesize") {
synthesize_storage[message_id] = message.synthesize;
}
- let scroll_down = ()=>{
- if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 100) {
- window.scrollTo(0, 0);
- message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
- }
- }
- if (!content_map.container.classList.contains("regenerate")) {
- scroll_down();
- setTimeout(scroll_down, 200);
- }
}
const ask_gpt = async (message_id, message_index = -1, regenerate = false, provider = null, model = null) => {
@@ -1233,6 +1209,36 @@ function count_words_and_tokens(text, model) {
return `(${count_words(text)} words, ${count_chars(text)} chars, ${count_tokens(model, text)} tokens)`;
}
+function update_message(content_map, message_id) {
+ content_map.inner.dataset.timeout = setTimeout(() => {
+ html = markdown_render(message_storage[message_id]);
+ let lastElement, lastIndex = null;
+ for (element of ['', '', '\n\n', '\n', '\n']) {
+ const index = html.lastIndexOf(element)
+ if (index - element.length > lastIndex) {
+ lastElement = element;
+ lastIndex = index;
+ }
+ }
+ if (lastIndex) {
+ html = html.substring(0, lastIndex) + '' + lastElement;
+ }
+ if (error_storage[message_id]) {
+ content_map.inner.innerHTML += markdown_render(`**An error occured:** ${error_storage[message_id]}`);
+ }
+ content_map.inner.innerHTML = html;
+ content_map.count.innerText = count_words_and_tokens(message_storage[message_id], provider_storage[message_id]?.model);
+ highlight(content_map.inner);
+ if (!content_map.container.classList.contains("regenerate")) {
+ if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 200) {
+ window.scrollTo(0, 0);
+ message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
+ }
+ }
+ if (content_map.inner.dataset.timeout) clearTimeout(content_map.inner.dataset.timeout);
+ }, 100);
+};
+
let countFocus = messageInput;
let timeoutId;
const count_input = async () => {
diff --git a/g4f/requests/__init__.py b/g4f/requests/__init__.py
index dd6d13fe839..627a0bdf7e4 100644
--- a/g4f/requests/__init__.py
+++ b/g4f/requests/__init__.py
@@ -151,14 +151,14 @@ async def get_args_from_nodriver(
else:
await browser.cookies.set_all(get_cookie_params_from_dict(cookies, url=url, domain=domain))
page = await browser.get(url)
- for c in await browser.cookies.get_all():
- if c.domain.endswith(domain):
- cookies[c.name] = c.value
+ for c in await page.send(nodriver.cdp.network.get_cookies([url])):
+ cookies[c.name] = c.value
user_agent = await page.evaluate("window.navigator.userAgent")
await page.wait_for("body:not(.no-js)", timeout=timeout)
await page.close()
browser.stop()
return {
+ "impersonate": "chrome",
"cookies": cookies,
"headers": {
**DEFAULT_HEADERS,
From 75cb6163ae691bad1072ae334500f8a8a44d099e Mon Sep 17 00:00:00 2001
From: Heiner Lohaus
Date: Mon, 16 Dec 2024 19:34:22 +0100
Subject: [PATCH 2/2] Use other model for copilot
---
etc/tool/copilot.py | 2 +-
g4f/Provider/PollinationsAI.py | 22 +++++++++++-----------
g4f/models.py | 4 +---
3 files changed, 13 insertions(+), 15 deletions(-)
diff --git a/etc/tool/copilot.py b/etc/tool/copilot.py
index 4732e341ff7..df4dd796b0f 100644
--- a/etc/tool/copilot.py
+++ b/etc/tool/copilot.py
@@ -18,7 +18,7 @@
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
GITHUB_REPOSITORY = os.getenv('GITHUB_REPOSITORY')
G4F_PROVIDER = os.getenv('G4F_PROVIDER')
-G4F_MODEL = os.getenv('G4F_MODEL') or g4f.models.default
+G4F_MODEL = os.getenv('G4F_MODEL') or g4f.models.gpt_4
def get_pr_details(github: Github) -> PullRequest:
"""
diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py
index 31a7e7e436b..20f3e0c9aaa 100644
--- a/g4f/Provider/PollinationsAI.py
+++ b/g4f/Provider/PollinationsAI.py
@@ -15,16 +15,14 @@
class PollinationsAI(OpenaiAPI):
label = "Pollinations AI"
url = "https://pollinations.ai"
-
working = True
needs_auth = False
supports_stream = True
-
+ api_base = "https://text.pollinations.ai/openai"
+
default_model = "openai"
-
additional_models_image = ["midjourney", "dall-e-3"]
additional_models_text = ["sur", "sur-mistral", "claude"]
-
model_aliases = {
"gpt-4o": "openai",
"mistral-nemo": "mistral",
@@ -66,7 +64,6 @@ async def create_async_generator(
model: str,
messages: Messages,
prompt: str = None,
- api_base: str = "https://text.pollinations.ai/openai",
api_key: str = None,
proxy: str = None,
seed: str = None,
@@ -76,25 +73,28 @@ async def create_async_generator(
) -> AsyncResult:
model = cls.get_model(model)
if model in cls.image_models:
- async for response in cls._generate_image(model, messages, prompt, seed, width, height):
+ async for response in cls._generate_image(model, messages, prompt, proxy, seed, width, height):
yield response
elif model in cls.models:
- async for response in cls._generate_text(model, messages, api_base, api_key, proxy, **kwargs):
+ async for response in cls._generate_text(model, messages, api_key, proxy, **kwargs):
yield response
else:
raise ValueError(f"Unknown model: {model}")
@classmethod
- async def _generate_image(cls, model: str, messages: Messages, prompt: str = None, seed: str = None, width: int = 1024, height: int = 1024):
+ async def _generate_image(cls, model: str, messages: Messages, prompt: str = None, proxy: str = None, seed: str = None, width: int = 1024, height: int = 1024):
if prompt is None:
prompt = messages[-1]["content"]
if seed is None:
seed = random.randint(0, 100000)
image = f"https://image.pollinations.ai/prompt/{quote(prompt)}?width={width}&height={height}&seed={int(seed)}&nofeed=true&nologo=true&model={quote(model)}"
+ async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session:
+ async with session.get(image) as response:
+ await raise_for_status(response)
yield ImageResponse(image, prompt)
@classmethod
- async def _generate_text(cls, model: str, messages: Messages, api_base: str, api_key: str = None, proxy: str = None, **kwargs):
+ async def _generate_text(cls, model: str, messages: Messages, api_key: str = None, proxy: str = None, **kwargs):
if api_key is None:
async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session:
prompt = format_prompt(messages)
@@ -104,6 +104,6 @@ async def _generate_text(cls, model: str, messages: Messages, api_base: str, api
yield line.decode(errors="ignore")
else:
async for chunk in super().create_async_generator(
- model, messages, api_base=api_base, proxy=proxy, **kwargs
+ model, messages, proxy=proxy, **kwargs
):
- yield chunk
+ yield chunk
\ No newline at end of file
diff --git a/g4f/models.py b/g4f/models.py
index 96fead58b6c..857119af5c3 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -24,7 +24,6 @@
HuggingFace,
Liaobots,
Airforce,
- Mhystical,
MetaAI,
MicrosoftDesigner,
OpenaiChat,
@@ -68,7 +67,6 @@ class ImageModel(Model):
best_provider = IterListProvider([
DDG,
Pizzagpt,
- ReplicateHome,
Blackbox2,
Blackbox,
Copilot,
@@ -78,7 +76,7 @@ class ImageModel(Model):
Cloudflare,
PollinationsAI,
ChatGptEs,
- ChatGpt,
+ OpenaiChat,
])
)