Skip to content
This repository has been archived by the owner on Mar 14, 2024. It is now read-only.

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
VadimBoev authored Oct 8, 2023
1 parent 7f116bc commit 7bd67f5
Show file tree
Hide file tree
Showing 4 changed files with 244 additions and 0 deletions.
44 changes: 44 additions & 0 deletions g4f/Provider/unfinished/Komo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from __future__ import annotations

import json

from ...requests import StreamSession
from ...typing import AsyncGenerator
from ..base_provider import AsyncGeneratorProvider, format_prompt

class Komo(AsyncGeneratorProvider):
url = "https://komo.ai/api/ask"
supports_gpt_35_turbo = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
async with StreamSession(impersonate="chrome107") as session:
prompt = format_prompt(messages)
data = {
"query": prompt,
"FLAG_URLEXTRACT": "false",
"token": "",
"FLAG_MODELA": "1",
}
headers = {
'authority': 'komo.ai',
'accept': 'text/event-stream',
'cache-control': 'no-cache',
'referer': 'https://komo.ai/',
}

async with session.get(cls.url, params=data, headers=headers) as response:
response.raise_for_status()
next = False
async for line in response.iter_lines():
if line == b"event: line":
next = True
elif next and line.startswith(b"data: "):
yield json.loads(line[6:])
next = False

97 changes: 97 additions & 0 deletions g4f/Provider/unfinished/MikuChat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
from __future__ import annotations

import random, json
from datetime import datetime
from ...requests import StreamSession

from ...typing import AsyncGenerator
from ..base_provider import AsyncGeneratorProvider


class MikuChat(AsyncGeneratorProvider):
url = "https://ai.okmiku.com"
supports_gpt_35_turbo = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
if not model:
model = "gpt-3.5-turbo"
headers = {
"authority": "api.catgpt.cc",
"accept": "application/json",
"origin": cls.url,
"referer": f"{cls.url}/chat/",
'x-app-version': 'undefined',
'x-date': get_datetime(),
'x-fingerprint': get_fingerprint(),
'x-platform': 'web'
}
async with StreamSession(headers=headers, impersonate="chrome107") as session:
data = {
"model": model,
"top_p": 0.8,
"temperature": 0.5,
"presence_penalty": 1,
"frequency_penalty": 0,
"max_tokens": 2000,
"stream": True,
"messages": messages,
}
async with session.post("https://api.catgpt.cc/ai/v1/chat/completions", json=data) as response:
print(await response.text())
response.raise_for_status()
async for line in response.iter_lines():
if line.startswith(b"data: "):
line = json.loads(line[6:])
chunk = line["choices"][0]["delta"].get("content")
if chunk:
yield chunk

def k(e: str, t: int):
a = len(e) & 3
s = len(e) - a
i = t
c = 3432918353
o = 461845907
n = 0
r = 0
while n < s:
r = (ord(e[n]) & 255) | ((ord(e[n + 1]) & 255) << 8) | ((ord(e[n + 2]) & 255) << 16) | ((ord(e[n + 3]) & 255) << 24)
n += 4
r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
r = (r << 15) | (r >> 17)
r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
i ^= r
i = (i << 13) | (i >> 19)
l = (i & 65535) * 5 + (((i >> 16) * 5 & 65535) << 16) & 4294967295
i = (l & 65535) + 27492 + (((l >> 16) + 58964 & 65535) << 16)

if a == 3:
r ^= (ord(e[n + 2]) & 255) << 16
elif a == 2:
r ^= (ord(e[n + 1]) & 255) << 8
elif a == 1:
r ^= ord(e[n]) & 255
r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
r = (r << 15) | (r >> 17)
r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
i ^= r

i ^= len(e)
i ^= i >> 16
i = (i & 65535) * 2246822507 + (((i >> 16) * 2246822507 & 65535) << 16) & 4294967295
i ^= i >> 13
i = (i & 65535) * 3266489909 + (((i >> 16) * 3266489909 & 65535) << 16) & 4294967295
i ^= i >> 16
return i & 0xFFFFFFFF

def get_fingerprint() -> str:
return str(k(str(int(random.random() * 100000)), 256))

def get_datetime() -> str:
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
100 changes: 100 additions & 0 deletions g4f/Provider/unfinished/PerplexityAi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
from __future__ import annotations

import json
import time
import base64
from curl_cffi.requests import AsyncSession

from ..base_provider import AsyncProvider, format_prompt, get_cookies


class PerplexityAi(AsyncProvider):
url = "https://www.perplexity.ai"
supports_gpt_35_turbo = True
_sources = []

@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs
) -> str:
url = cls.url + "/socket.io/?EIO=4&transport=polling"
headers = {
"Referer": f"{cls.url}/"
}
async with AsyncSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107") as session:
url_session = "https://www.perplexity.ai/api/auth/session"
response = await session.get(url_session)
response.raise_for_status()

url_session = "https://www.perplexity.ai/api/auth/session"
response = await session.get(url_session)
response.raise_for_status()

response = await session.get(url, params={"t": timestamp()})
response.raise_for_status()
sid = json.loads(response.text[1:])["sid"]

response = await session.get(url, params={"t": timestamp(), "sid": sid})
response.raise_for_status()

data = '40{"jwt":"anonymous-ask-user"}'
response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
response.raise_for_status()

response = await session.get(url, params={"t": timestamp(), "sid": sid})
response.raise_for_status()

data = "424" + json.dumps([
"perplexity_ask",
format_prompt(messages),
{
"version":"2.1",
"source":"default",
"language":"en",
"timezone": time.tzname[0],
"search_focus":"internet",
"mode":"concise"
}
])
response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
response.raise_for_status()

while True:
response = await session.get(url, params={"t": timestamp(), "sid": sid})
response.raise_for_status()
for line in response.text.splitlines():
if line.startswith("434"):
result = json.loads(json.loads(line[3:])[0]["text"])

cls._sources = [{
"title": source["name"],
"url": source["url"],
"snippet": source["snippet"]
} for source in result["web_results"]]

return result["answer"]

@classmethod
def get_sources(cls):
return cls._sources


@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"


def timestamp() -> str:
return base64.urlsafe_b64encode(int(time.time()-1407782612).to_bytes(4, 'big')).decode()
3 changes: 3 additions & 0 deletions g4f/Provider/unfinished/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from .MikuChat import MikuChat
from .PerplexityAi import PerplexityAi
from .Komo import Komo

0 comments on commit 7bd67f5

Please sign in to comment.