Skip to content
This repository has been archived by the owner on Mar 14, 2024. It is now read-only.

Commit

Permalink
0.1.9.8
Browse files Browse the repository at this point in the history
  • Loading branch information
VadimBoev authored Jan 4, 2024
1 parent dce6d6b commit b06c0b9
Show file tree
Hide file tree
Showing 13 changed files with 182 additions and 28 deletions.
4 changes: 2 additions & 2 deletions g4f/Provider/deprecated/AiService.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@
import requests

from ...typing import Any, CreateResult, Messages
from ..base_provider import BaseProvider
from ..base_provider import AbstractProvider


class AiService(BaseProvider):
class AiService(AbstractProvider):
url = "https://aiservice.vercel.app/"
working = False
supports_gpt_35_turbo = True
Expand Down
9 changes: 5 additions & 4 deletions g4f/Provider/deprecated/Aivvm.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
from __future__ import annotations

import requests
import json

from ..base_provider import BaseProvider
from ..base_provider import AbstractProvider
from ...typing import CreateResult, Messages
from json import dumps

# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
models = {
Expand All @@ -17,7 +18,7 @@
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
}

class Aivvm(BaseProvider):
class Aivvm(AbstractProvider):
url = 'https://chat.aivvm.com'
supports_stream = True
working = False
Expand All @@ -44,7 +45,7 @@ def create_completion(cls,
"temperature" : kwargs.get("temperature", 0.7)
}

data = dumps(json_data)
data = json.dumps(json_data)

headers = {
"accept" : "text/event-stream",
Expand Down
4 changes: 2 additions & 2 deletions g4f/Provider/deprecated/DfeHub.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@
import requests

from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
from ..base_provider import AbstractProvider


class DfeHub(BaseProvider):
class DfeHub(AbstractProvider):
url = "https://chat.dfehub.com/"
supports_stream = True
supports_gpt_35_turbo = True
Expand Down
5 changes: 2 additions & 3 deletions g4f/Provider/deprecated/EasyChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,13 @@

import json
import random

import requests

from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
from ..base_provider import AbstractProvider


class EasyChat(BaseProvider):
class EasyChat(AbstractProvider):
url: str = "https://free.easychat.work"
supports_stream = True
supports_gpt_35_turbo = True
Expand Down
4 changes: 2 additions & 2 deletions g4f/Provider/deprecated/Equing.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@
import requests

from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
from ..base_provider import AbstractProvider


class Equing(BaseProvider):
class Equing(AbstractProvider):
url: str = 'https://next.eqing.tech/'
working = False
supports_stream = True
Expand Down
7 changes: 2 additions & 5 deletions g4f/Provider/deprecated/FastGpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,13 @@

import json
import random
from abc import ABC, abstractmethod

import requests

from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
from ..base_provider import AbstractProvider


class FastGpt(BaseProvider):
class FastGpt(AbstractProvider):
url: str = 'https://chat9.fastgpt.me/'
working = False
needs_auth = False
Expand All @@ -19,7 +17,6 @@ class FastGpt(BaseProvider):
supports_gpt_4 = False

@staticmethod
@abstractmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
Expand Down
4 changes: 2 additions & 2 deletions g4f/Provider/deprecated/Forefront.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
import requests

from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
from ..base_provider import AbstractProvider


class Forefront(BaseProvider):
class Forefront(AbstractProvider):
url = "https://forefront.com"
supports_stream = True
supports_gpt_35_turbo = True
Expand Down
4 changes: 2 additions & 2 deletions g4f/Provider/deprecated/GetGpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@
from Cryptodome.Cipher import AES

from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
from ..base_provider import AbstractProvider


class GetGpt(BaseProvider):
class GetGpt(AbstractProvider):
url = 'https://chat.getgpt.world/'
supports_stream = True
working = False
Expand Down
4 changes: 2 additions & 2 deletions g4f/Provider/deprecated/Lockchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
import requests

from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
from ..base_provider import AbstractProvider


class Lockchat(BaseProvider):
class Lockchat(AbstractProvider):
url: str = "http://supertest.lockchat.app"
supports_stream = True
supports_gpt_35_turbo = True
Expand Down
66 changes: 66 additions & 0 deletions g4f/Provider/deprecated/NoowAi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
from __future__ import annotations

import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string

class NoowAi(AsyncGeneratorProvider):
url = "https://noowai.com"
supports_message_history = True
supports_gpt_35_turbo = True
working = False

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "noowai.com",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
data = {
"botId": "default",
"customId": "d49bc3670c3d858458576d75c8ea0f5d",
"session": "N/A",
"chatId": get_random_string(),
"contextId": 25,
"messages": messages,
"newMessage": messages[-1]["content"],
"stream": True
}
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
try:
line = json.loads(line[6:])
assert "type" in line
except:
raise RuntimeError(f"Broken line: {line.decode()}")
if line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
break
elif line["type"] == "error":
raise RuntimeError(line["data"])
4 changes: 2 additions & 2 deletions g4f/Provider/deprecated/V50.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
import requests

from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
from ..base_provider import AbstractProvider


class V50(BaseProvider):
class V50(AbstractProvider):
url = 'https://p5.v50.ltd'
supports_gpt_35_turbo = True
supports_stream = False
Expand Down
91 changes: 91 additions & 0 deletions g4f/Provider/deprecated/VoiGpt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
from __future__ import annotations

import json
import requests
from ..base_provider import AbstractProvider
from ...typing import Messages, CreateResult


class VoiGpt(AbstractProvider):
"""
VoiGpt - A provider for VoiGpt.com
**Note** : to use this provider you have to get your csrf token/cookie from the voigpt.com website
Args:
model: The model to use
messages: The messages to send
stream: Whether to stream the response
proxy: The proxy to use
access_token: The access token to use
**kwargs: Additional keyword arguments
Returns:
A CreateResult object
"""
url = "https://voigpt.com"
working = False
supports_gpt_35_turbo = True
supports_message_history = True
supports_stream = False
_access_token: str = None

@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
access_token: str = None,
**kwargs
) -> CreateResult:

if not model:
model = "gpt-3.5-turbo"
if not access_token:
access_token = cls._access_token
if not access_token:
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6",
"sec-ch-ua": "\"Google Chrome\";v=\"119\", \"Chromium\";v=\"119\", \"Not?A_Brand\";v=\"24\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Linux\"",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
}
req_response = requests.get(cls.url, headers=headers)
access_token = cls._access_token = req_response.cookies.get("csrftoken")

headers = {
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6",
"Cookie": f"csrftoken={access_token};",
"Origin": "https://voigpt.com",
"Referer": "https://voigpt.com/",
"Sec-Ch-Ua": "'Google Chrome';v='119', 'Chromium';v='119', 'Not?A_Brand';v='24'",
"Sec-Ch-Ua-Mobile": "?0",
"Sec-Ch-Ua-Platform": "'Windows'",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
"X-Csrftoken": access_token,
}

payload = {
"messages": messages,
}
request_url = f"{cls.url}/generate_response/"
req_response = requests.post(request_url, headers=headers, json=payload)
try:
response = json.loads(req_response.text)
yield response["response"]
except:
raise RuntimeError(f"Response: {req_response.text}")

4 changes: 2 additions & 2 deletions g4f/Provider/deprecated/Wuguokai.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
import requests

from ...typing import Any, CreateResult
from ..base_provider import BaseProvider, format_prompt
from ..base_provider import AbstractProvider, format_prompt


class Wuguokai(BaseProvider):
class Wuguokai(AbstractProvider):
url = 'https://chat.wuguokai.xyz'
supports_gpt_35_turbo = True
working = False
Expand Down

0 comments on commit b06c0b9

Please sign in to comment.