Skip to content

Commit

Permalink
~| Merge pull request #889 from hlohaus/new
Browse files Browse the repository at this point in the history
Add CodeLinkAva Provider, Improve test_async.py
  • Loading branch information
xtekky committed Sep 11, 2023
2 parents 1fc9ec9 + c2741a9 commit edee7e7
Show file tree
Hide file tree
Showing 6 changed files with 142 additions and 15 deletions.
62 changes: 62 additions & 0 deletions g4f/Provider/ChatBase.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
from __future__ import annotations

from aiohttp import ClientSession

from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider


class ChatBase(AsyncGeneratorProvider):
url = "https://www.chatbase.co"
supports_gpt_35_turbo = True
supports_gpt_4 = True
working = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
if model == "gpt-4":
chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn"
elif model == "gpt-3.5-turbo" or True:
chat_id = "chatbase--1--pdf-p680fxvnm"
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(
headers=headers
) as session:
data = {
"messages": messages,
"captchaCode": "hadsa",
"chatId": chat_id,
"conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
}
async with session.post("https://www.chatbase.co/api/fe/chat", json=data) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
stream = stream.decode()
if stream:
yield stream


@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
63 changes: 63 additions & 0 deletions g4f/Provider/CodeLinkAva.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
from __future__ import annotations

from aiohttp import ClientSession
import json

from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider


class CodeLinkAva(AsyncGeneratorProvider):
url = "https://ava-ai-ef611.web.app"
supports_gpt_35_turbo = True
working = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(
headers=headers
) as session:
data = {
"messages": messages,
"temperature": 0.6,
"stream": True,
**kwargs
}
async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
response.raise_for_status()
start = "data: "
async for line in response.content:
line = line.decode()
if line.startswith("data: ") and not line.startswith("data: [DONE]"):
line = json.loads(line[len(start):-1])
content = line["choices"][0]["delta"].get("content")
if content:
yield content


@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
2 changes: 1 addition & 1 deletion g4f/Provider/HuggingChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ async def create_async_generator(
if "error" in data:
raise RuntimeError(data["error"])
elif isinstance(data, list):
yield data[0]["generated_text"]
yield data[0]["generated_text"].strip()
else:
raise RuntimeError(f"Response: {data}")
else:
Expand Down
2 changes: 1 addition & 1 deletion g4f/Provider/OpenaiChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ async def create_async(
"https": proxy
}
if not access_token:
access_token = await cls.get_access_token(cookies)
access_token = await cls.get_access_token(cookies, proxies)
headers = {
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
Expand Down
4 changes: 4 additions & 0 deletions g4f/Provider/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,10 @@
from .AItianhu import AItianhu
from .Bard import Bard
from .Bing import Bing
from .ChatBase import ChatBase
from .ChatgptAi import ChatgptAi
from .ChatgptLogin import ChatgptLogin
from .CodeLinkAva import CodeLinkAva
from .DeepAi import DeepAi
from .DfeHub import DfeHub
from .EasyChat import EasyChat
Expand Down Expand Up @@ -42,8 +44,10 @@
'AItianhu',
'Bard',
'Bing',
'ChatBase',
'ChatgptAi',
'ChatgptLogin',
'CodeLinkAva',
'DeepAi',
'DfeHub',
'EasyChat',
Expand Down
24 changes: 11 additions & 13 deletions testing/test_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,31 +7,29 @@
import g4f
from g4f.Provider import AsyncProvider
from testing.test_providers import get_providers
from testing.log_time import log_time_async
from testing.log_time import log_time_async

async def create_async(provider: AsyncProvider):
async def create_async(provider):
model = g4f.models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else g4f.models.default.name
try:
response = await log_time_async(
response = await log_time_async(
provider.create_async,
model=model,
messages=[{"role": "user", "content": "Hello Assistant!"}]
)
assert type(response) is str
assert len(response) > 0
return response
print(f"{provider.__name__}:", response)
except Exception as e:
return e
return f"{provider.__name__}: {e.__class__.__name__}: {e}"

async def run_async():
_providers: list[AsyncProvider] = [
_provider
responses: list = [
create_async(_provider)
for _provider in get_providers()
if _provider.working and hasattr(_provider, "create_async")
if _provider.working and issubclass(_provider, AsyncProvider)
]
responses = [create_async(_provider) for _provider in _providers]
responses = await asyncio.gather(*responses)
for idx, provider in enumerate(_providers):
print(f"{provider.__name__}:", responses[idx])
for error in responses:
if error:
print(error)

print("Total:", asyncio.run(log_time_async(run_async)))

0 comments on commit edee7e7

Please sign in to comment.