diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py new file mode 100644 index 0000000000000000000000000000000000000000..2e12989619ae3dd03b1b0b8e2c23784fb4aad5d2 --- /dev/null +++ b/g4f/Provider/AItianhu.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession, http + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt + + +class AItianhu(AsyncGeneratorProvider): + url = "https://www.aitianhu.com" + working = True + supports_gpt_35_turbo = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0", + "Accept": "application/json, text/plain, */*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Content-Type": "application/json", + "Origin": cls.url, + "Connection": "keep-alive", + "Referer": cls.url + "/", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + } + async with ClientSession( + headers=headers, + version=http.HttpVersion10 + ) as session: + data = { + "prompt": format_prompt(messages), + "options": {}, + "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", + "temperature": 0.8, + "top_p": 1, + **kwargs + } + async with session.post( + cls.url + "/api/chat-process", + proxy=proxy, + json=data, + ssl=False, + ) as response: + response.raise_for_status() + async for line in response.content: + line = json.loads(line.decode('utf-8')) + token = line["detail"]["choices"][0]["delta"].get("content") + if token: + yield token + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ("temperature", "float"), + ("top_p", "int"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Acytoo.py b/g4f/Provider/Acytoo.py new file mode 100644 index 0000000000000000000000000000000000000000..d36ca6da22ddfa43690abdd0db27e6f971320f93 --- /dev/null +++ b/g4f/Provider/Acytoo.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider + + +class Acytoo(AsyncGeneratorProvider): + url = 'https://chat.acytoo.com' + working = True + supports_gpt_35_turbo = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + + async with ClientSession( + headers=_create_header() + ) as session: + async with session.post( + cls.url + '/api/completions', + proxy=proxy, + json=_create_payload(messages, **kwargs) + ) as response: + response.raise_for_status() + async for stream in response.content.iter_any(): + if stream: + yield stream.decode() + + +def _create_header(): + return { + 'accept': '*/*', + 'content-type': 'application/json', + } + + +def _create_payload(messages: list[dict[str, str]], temperature: float = 0.5, **kwargs): + return { + 'key' : '', + 'model' : 'gpt-3.5-turbo', + 'messages' : messages, + 'temperature' : temperature, + 'password' : '' + } \ No newline at end of file diff --git a/g4f/Provider/AiService.py b/g4f/Provider/AiService.py new file mode 100644 index 0000000000000000000000000000000000000000..2b5a6e7de3912f7588377a881b7d5523e35d7212 --- /dev/null +++ b/g4f/Provider/AiService.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class AiService(BaseProvider): + url = "https://aiservice.vercel.app/" + working = False + supports_gpt_35_turbo = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, + **kwargs: Any, + ) -> CreateResult: + base = "\n".join(f"{message['role']}: {message['content']}" for message in messages) + base += "\nassistant: " + + headers = { + "accept": "*/*", + "content-type": "text/plain;charset=UTF-8", + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "Referer": "https://aiservice.vercel.app/chat", + } + data = {"input": base} + url = "https://aiservice.vercel.app/api/chat/answer" + response = requests.post(url, headers=headers, json=data) + response.raise_for_status() + yield response.json()["data"] diff --git a/g4f/Provider/Aichat.py b/g4f/Provider/Aichat.py new file mode 100644 index 0000000000000000000000000000000000000000..8edd17e2c6938e2fdd4886e2354580f7e4108960 --- /dev/null +++ b/g4f/Provider/Aichat.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from .base_provider import AsyncProvider, format_prompt + + +class Aichat(AsyncProvider): + url = "https://chat-gpt.org/chat" + working = True + supports_gpt_35_turbo = True + + @staticmethod + async def create_async( + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> str: + headers = { + "authority": "chat-gpt.org", + "accept": "*/*", + "cache-control": "no-cache", + "content-type": "application/json", + "origin": "https://chat-gpt.org", + "pragma": "no-cache", + "referer": "https://chat-gpt.org/chat", + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"macOS"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36", + } + async with ClientSession( + headers=headers + ) as session: + json_data = { + "message": format_prompt(messages), + "temperature": kwargs.get('temperature', 0.5), + "presence_penalty": 0, + "top_p": kwargs.get('top_p', 1), + "frequency_penalty": 0, + } + async with session.post( + "https://chat-gpt.org/api/text", + proxy=proxy, + json=json_data + ) as response: + response.raise_for_status() + result = await response.json() + if not result['response']: + raise Exception(f"Error Response: {result}") + return result["message"] diff --git a/g4f/Provider/Ails.py b/g4f/Provider/Ails.py new file mode 100644 index 0000000000000000000000000000000000000000..d533ae247cba63b236668375786124852f5bbad5 --- /dev/null +++ b/g4f/Provider/Ails.py @@ -0,0 +1,106 @@ +from __future__ import annotations + +import hashlib +import time +import uuid +import json +from datetime import datetime +from aiohttp import ClientSession + +from ..typing import SHA256, AsyncGenerator +from .base_provider import AsyncGeneratorProvider + + +class Ails(AsyncGeneratorProvider): + url: str = "https://ai.ls" + working = True + supports_gpt_35_turbo = True + + @staticmethod + async def create_async_generator( + model: str, + messages: list[dict[str, str]], + stream: bool, + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + headers = { + "authority": "api.caipacity.com", + "accept": "*/*", + "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "authorization": "Bearer free", + "client-id": str(uuid.uuid4()), + "client-v": "0.1.278", + "content-type": "application/json", + "origin": "https://ai.ls", + "referer": "https://ai.ls/", + "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Windows"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "cross-site", + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", + "from-url": "https://ai.ls/?chat=1" + } + async with ClientSession( + headers=headers + ) as session: + timestamp = _format_timestamp(int(time.time() * 1000)) + json_data = { + "model": "gpt-3.5-turbo", + "temperature": kwargs.get("temperature", 0.6), + "stream": True, + "messages": messages, + "d": datetime.now().strftime("%Y-%m-%d"), + "t": timestamp, + "s": _hash({"t": timestamp, "m": messages[-1]["content"]}), + } + async with session.post( + "https://api.caipacity.com/v1/chat/completions", + proxy=proxy, + json=json_data + ) as response: + response.raise_for_status() + start = "data: " + async for line in response.content: + line = line.decode('utf-8') + if line.startswith(start) and line != "data: [DONE]": + line = line[len(start):-1] + line = json.loads(line) + token = line["choices"][0]["delta"].get("content") + if token: + if "ai.ls" in token or "ai.ci" in token: + raise Exception("Response Error: " + token) + yield token + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" + + +def _hash(json_data: dict[str, str]) -> SHA256: + base_string: str = "%s:%s:%s:%s" % ( + json_data["t"], + json_data["m"], + "WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf", + len(json_data["m"]), + ) + + return SHA256(hashlib.sha256(base_string.encode()).hexdigest()) + + +def _format_timestamp(timestamp: int) -> str: + e = timestamp + n = e % 10 + r = n + 1 if n % 2 == 0 else n + return str(e - n + r) \ No newline at end of file diff --git a/g4f/Provider/Bard.py b/g4f/Provider/Bard.py new file mode 100644 index 0000000000000000000000000000000000000000..2137d820c447eb5610b6d1c629b36bc668a2fd72 --- /dev/null +++ b/g4f/Provider/Bard.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +import json +import random +import re + +from aiohttp import ClientSession + +from .base_provider import AsyncProvider, format_prompt, get_cookies + + +class Bard(AsyncProvider): + url = "https://bard.google.com" + needs_auth = True + working = True + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + cookies: dict = None, + **kwargs + ) -> str: + prompt = format_prompt(messages) + if proxy and "://" not in proxy: + proxy = f"http://{proxy}" + if not cookies: + cookies = get_cookies(".google.com") + + headers = { + 'authority': 'bard.google.com', + 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8', + 'origin': 'https://bard.google.com', + 'referer': 'https://bard.google.com/', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', + 'x-same-domain': '1', + } + + async with ClientSession( + cookies=cookies, + headers=headers + ) as session: + async with session.get(cls.url, proxy=proxy) as response: + text = await response.text() + + match = re.search(r'SNlM0e\":\"(.*?)\"', text) + if not match: + raise RuntimeError("No snlm0e value.") + snlm0e = match.group(1) + + params = { + 'bl': 'boq_assistant-bard-web-server_20230326.21_p0', + '_reqid': random.randint(1111, 9999), + 'rt': 'c' + } + + data = { + 'at': snlm0e, + 'f.req': json.dumps([None, json.dumps([[prompt]])]) + } + + intents = '.'.join([ + 'assistant', + 'lamda', + 'BardFrontendService' + ]) + + async with session.post( + f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate', + data=data, + params=params, + proxy=proxy + ) as response: + response = await response.text() + response = json.loads(response.splitlines()[3])[0][2] + response = json.loads(response)[4][0][1][0] + return response + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py new file mode 100644 index 0000000000000000000000000000000000000000..05be27e7285590ca063a636f8b601d92665ae832 --- /dev/null +++ b/g4f/Provider/Bing.py @@ -0,0 +1,283 @@ +from __future__ import annotations + +import random +import json +import os +from aiohttp import ClientSession, ClientTimeout +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, get_cookies + + +class Bing(AsyncGeneratorProvider): + url = "https://bing.com/chat" + working = True + supports_gpt_4 = True + + @staticmethod + def create_async_generator( + model: str, + messages: list[dict[str, str]], + cookies: dict = None, **kwargs) -> AsyncGenerator: + + if not cookies: + cookies = get_cookies(".bing.com") + if len(messages) < 2: + prompt = messages[0]["content"] + context = None + else: + prompt = messages[-1]["content"] + context = create_context(messages[:-1]) + + if not cookies or "SRCHD" not in cookies: + cookies = { + 'SRCHD' : 'AF=NOFORM', + 'PPLState' : '1', + 'KievRPSSecAuth': '', + 'SUID' : '', + 'SRCHUSR' : '', + 'SRCHHPGUSR' : '', + } + return stream_generate(prompt, context, cookies) + +def create_context(messages: list[dict[str, str]]): + context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages) + + return context + +class Conversation(): + def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None: + self.conversationId = conversationId + self.clientId = clientId + self.conversationSignature = conversationSignature + +async def create_conversation(session: ClientSession) -> Conversation: + url = 'https://www.bing.com/turing/conversation/create' + async with await session.get(url) as response: + response = await response.json() + conversationId = response.get('conversationId') + clientId = response.get('clientId') + conversationSignature = response.get('conversationSignature') + + if not conversationId or not clientId or not conversationSignature: + raise Exception('Failed to create conversation.') + + return Conversation(conversationId, clientId, conversationSignature) + +async def list_conversations(session: ClientSession) -> list: + url = "https://www.bing.com/turing/conversation/chats" + async with session.get(url) as response: + response = await response.json() + return response["chats"] + +async def delete_conversation(session: ClientSession, conversation: Conversation) -> list: + url = "https://sydney.bing.com/sydney/DeleteSingleConversation" + json = { + "conversationId": conversation.conversationId, + "conversationSignature": conversation.conversationSignature, + "participant": {"id": conversation.clientId}, + "source": "cib", + "optionsSets": ["autosave"] + } + async with session.post(url, json=json) as response: + response = await response.json() + return response["result"]["value"] == "Success" + +class Defaults: + delimiter = "\x1e" + ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" + + allowedMessageTypes = [ + "Chat", + "Disengaged", + "AdsQuery", + "SemanticSerp", + "GenerateContentQuery", + "SearchQuery", + "ActionRequest", + "Context", + "Progress", + "AdsQuery", + "SemanticSerp", + ] + + sliceIds = [ + "winmuid3tf", + "osbsdusgreccf", + "ttstmout", + "crchatrev", + "winlongmsgtf", + "ctrlworkpay", + "norespwtf", + "tempcacheread", + "temptacache", + "505scss0", + "508jbcars0", + "515enbotdets0", + "5082tsports", + "515vaoprvs", + "424dagslnv1s0", + "kcimgattcf", + "427startpms0", + ] + + location = { + "locale": "en-US", + "market": "en-US", + "region": "US", + "locationHints": [ + { + "country": "United States", + "state": "California", + "city": "Los Angeles", + "timezoneoffset": 8, + "countryConfidence": 8, + "Center": {"Latitude": 34.0536909, "Longitude": -118.242766}, + "RegionType": 2, + "SourceType": 1, + } + ], + } + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'max-age=0', + 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', + 'sec-ch-ua-arch': '"x86"', + 'sec-ch-ua-bitness': '"64"', + 'sec-ch-ua-full-version': '"110.0.1587.69"', + 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-model': '""', + 'sec-ch-ua-platform': '"Windows"', + 'sec-ch-ua-platform-version': '"15.0.0"', + 'sec-fetch-dest': 'document', + 'sec-fetch-mode': 'navigate', + 'sec-fetch-site': 'none', + 'sec-fetch-user': '?1', + 'upgrade-insecure-requests': '1', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', + 'x-edge-shopping-flag': '1', + 'x-forwarded-for': ip_address, + } + + optionsSets = { + "optionsSets": [ + 'saharasugg', + 'enablenewsfc', + 'clgalileo', + 'gencontentv3', + "nlu_direct_response_filter", + "deepleo", + "disable_emoji_spoken_text", + "responsible_ai_policy_235", + "enablemm", + "h3precise" + "dtappid", + "cricinfo", + "cricinfov2", + "dv3sugg", + "nojbfedge" + ] + } + +def format_message(msg: dict) -> str: + return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter + +def create_message(conversation: Conversation, prompt: str, context: str=None) -> str: + struct = { + 'arguments': [ + { + **Defaults.optionsSets, + 'source': 'cib', + 'allowedMessageTypes': Defaults.allowedMessageTypes, + 'sliceIds': Defaults.sliceIds, + 'traceId': os.urandom(16).hex(), + 'isStartOfSession': True, + 'message': Defaults.location | { + 'author': 'user', + 'inputMethod': 'Keyboard', + 'text': prompt, + 'messageType': 'Chat' + }, + 'conversationSignature': conversation.conversationSignature, + 'participant': { + 'id': conversation.clientId + }, + 'conversationId': conversation.conversationId + } + ], + 'invocationId': '0', + 'target': 'chat', + 'type': 4 + } + + if context: + struct['arguments'][0]['previousMessages'] = [{ + "author": "user", + "description": context, + "contextType": "WebPage", + "messageType": "Context", + "messageId": "discover-web--page-ping-mriduna-----" + }] + return format_message(struct) + +async def stream_generate( + prompt: str, + context: str=None, + cookies: dict=None + ): + async with ClientSession( + timeout=ClientTimeout(total=900), + cookies=cookies, + headers=Defaults.headers, + ) as session: + conversation = await create_conversation(session) + try: + async with session.ws_connect( + 'wss://sydney.bing.com/sydney/ChatHub', + autoping=False, + ) as wss: + + await wss.send_str(format_message({'protocol': 'json', 'version': 1})) + msg = await wss.receive(timeout=900) + + await wss.send_str(create_message(conversation, prompt, context)) + + response_txt = '' + result_text = '' + returned_text = '' + final = False + + while not final: + msg = await wss.receive(timeout=900) + objects = msg.data.split(Defaults.delimiter) + for obj in objects: + if obj is None or not obj: + continue + + response = json.loads(obj) + if response.get('type') == 1 and response['arguments'][0].get('messages'): + message = response['arguments'][0]['messages'][0] + if (message['contentOrigin'] != 'Apology'): + response_txt = result_text + \ + message['adaptiveCards'][0]['body'][0].get('text', '') + + if message.get('messageType'): + inline_txt = message['adaptiveCards'][0]['body'][0]['inlines'][0].get('text') + response_txt += inline_txt + '\n' + result_text += inline_txt + '\n' + + if response_txt.startswith(returned_text): + new = response_txt[len(returned_text):] + if new != "\n": + yield new + returned_text = response_txt + elif response.get('type') == 2: + result = response['item']['result'] + if result.get('error'): + raise Exception(f"{result['value']}: {result['message']}") + final = True + break + finally: + await delete_conversation(session, conversation) \ No newline at end of file diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/ChatgptAi.py new file mode 100644 index 0000000000000000000000000000000000000000..e6416cc3ce13728e137fa4c7f95f2f44daa9253f --- /dev/null +++ b/g4f/Provider/ChatgptAi.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +import re +import html +import json +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider + + +class ChatgptAi(AsyncGeneratorProvider): + url: str = "https://chatgpt.ai/" + working = True + supports_gpt_35_turbo = True + _system_data = None + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + headers = { + "authority" : "chatgpt.ai", + "accept" : "*/*", + "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "cache-control" : "no-cache", + "origin" : "https://chatgpt.ai", + "pragma" : "no-cache", + "referer" : cls.url, + "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', + "sec-ch-ua-mobile" : "?0", + "sec-ch-ua-platform" : '"Windows"', + "sec-fetch-dest" : "empty", + "sec-fetch-mode" : "cors", + "sec-fetch-site" : "same-origin", + "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", + } + async with ClientSession( + headers=headers + ) as session: + if not cls._system_data: + async with session.get(cls.url, proxy=proxy) as response: + response.raise_for_status() + match = re.findall(r"data-system='([^']+)'", await response.text()) + if not match: + raise RuntimeError("No system data") + cls._system_data = json.loads(html.unescape(match[0])) + + data = { + "botId": cls._system_data["botId"], + "clientId": "", + "contextId": cls._system_data["contextId"], + "id": cls._system_data["id"], + "messages": messages[:-1], + "newMessage": messages[-1]["content"], + "session": cls._system_data["sessionId"], + "stream": True + } + async with session.post( + "https://chatgpt.ai/wp-json/mwai-ui/v1/chats/submit", + proxy=proxy, + json=data + ) as response: + response.raise_for_status() + start = "data: " + async for line in response.content: + line = line.decode('utf-8') + if line.startswith(start): + line = json.loads(line[len(start):-1]) + if line["type"] == "live": + yield line["data"] \ No newline at end of file diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/ChatgptLogin.py new file mode 100644 index 0000000000000000000000000000000000000000..8b868f8e88b825b87fd7ded88d0ac670c031d030 --- /dev/null +++ b/g4f/Provider/ChatgptLogin.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +import os, re +from aiohttp import ClientSession + +from .base_provider import AsyncProvider, format_prompt + + +class ChatgptLogin(AsyncProvider): + url = "https://opchatgpts.net" + supports_gpt_35_turbo = True + working = True + _nonce = None + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> str: + headers = { + "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", + "Accept" : "*/*", + "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "Origin" : "https://opchatgpts.net", + "Alt-Used" : "opchatgpts.net", + "Referer" : "https://opchatgpts.net/chatgpt-free-use/", + "Sec-Fetch-Dest" : "empty", + "Sec-Fetch-Mode" : "cors", + "Sec-Fetch-Site" : "same-origin", + } + async with ClientSession( + headers=headers + ) as session: + if not cls._nonce: + async with session.get( + "https://opchatgpts.net/chatgpt-free-use/", + params={"id": os.urandom(6).hex()}, + ) as response: + result = re.search(r'data-nonce="(.*?)"', await response.text()) + if not result: + raise RuntimeError("No nonce value") + cls._nonce = result.group(1) + data = { + "_wpnonce": cls._nonce, + "post_id": 28, + "url": "https://opchatgpts.net/chatgpt-free-use", + "action": "wpaicg_chat_shortcode_message", + "message": format_prompt(messages), + "bot_id": 0 + } + async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response: + response.raise_for_status() + return (await response.json())["data"] + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/Provider/DeepAi.py b/g4f/Provider/DeepAi.py new file mode 100644 index 0000000000000000000000000000000000000000..88225f1f98cc16b1eb28577efc91efb6b523f8f0 --- /dev/null +++ b/g4f/Provider/DeepAi.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +import json +import js2py +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider + + +class DeepAi(AsyncGeneratorProvider): + url: str = "https://deepai.org" + working = True + supports_gpt_35_turbo = True + + @staticmethod + async def create_async_generator( + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + + token_js = """ +var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' +var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y; +h = Math.round(1E11 * Math.random()) + ""; +f = function () { + for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI); + + return function (t) { + var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y], + Z = [], + A = unescape(encodeURI(t)) + "\u0080", + z = A.length; + t = --z / 4 + 2 | 15; + for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--; + for (q = A = 0; q < t; q += 16) { + for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2]; + for (A = 4; A;) ea[--A] += z[A] + } + for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16); + return t.split("").reverse().join("") + } +}(); + +"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x"))); +""" + + payload = {"chas_style": "chat", "chatHistory": json.dumps(messages)} + api_key = js2py.eval_js(token_js) + headers = { + "api-key": api_key, + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36", + } + async with ClientSession( + headers=headers + ) as session: + async with session.post("https://api.deepai.org/make_me_a_pizza", proxy=proxy, data=payload) as response: + response.raise_for_status() + async for stream in response.content.iter_any(): + if stream: + yield stream.decode() \ No newline at end of file diff --git a/g4f/Provider/DfeHub.py b/g4f/Provider/DfeHub.py new file mode 100644 index 0000000000000000000000000000000000000000..d40e03803130ff4169f66bfe4f9cd2e90239f784 --- /dev/null +++ b/g4f/Provider/DfeHub.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +import json +import re +import time + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class DfeHub(BaseProvider): + url = "https://chat.dfehub.com/" + supports_stream = True + supports_gpt_35_turbo = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + headers = { + "authority" : "chat.dfehub.com", + "accept" : "*/*", + "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "content-type" : "application/json", + "origin" : "https://chat.dfehub.com", + "referer" : "https://chat.dfehub.com/", + "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', + "sec-ch-ua-mobile" : "?0", + "sec-ch-ua-platform": '"macOS"', + "sec-fetch-dest" : "empty", + "sec-fetch-mode" : "cors", + "sec-fetch-site" : "same-origin", + "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", + "x-requested-with" : "XMLHttpRequest", + } + + json_data = { + "messages" : messages, + "model" : "gpt-3.5-turbo", + "temperature" : kwargs.get("temperature", 0.5), + "presence_penalty" : kwargs.get("presence_penalty", 0), + "frequency_penalty" : kwargs.get("frequency_penalty", 0), + "top_p" : kwargs.get("top_p", 1), + "stream" : True + } + + response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions", + headers=headers, json=json_data, timeout=3) + + for chunk in response.iter_lines(): + if b"detail" in chunk: + delay = re.findall(r"\d+\.\d+", chunk.decode()) + delay = float(delay[-1]) + time.sleep(delay) + yield from DfeHub.create_completion(model, messages, stream, **kwargs) + if b"content" in chunk: + data = json.loads(chunk.decode().split("data: ")[1]) + yield (data["choices"][0]["delta"]["content"]) + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("presence_penalty", "int"), + ("frequency_penalty", "int"), + ("top_p", "int"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/EasyChat.py b/g4f/Provider/EasyChat.py new file mode 100644 index 0000000000000000000000000000000000000000..946d4a4d560e6e0b6b86443f9038f51516a407bb --- /dev/null +++ b/g4f/Provider/EasyChat.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +import json +import random + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class EasyChat(BaseProvider): + url: str = "https://free.easychat.work" + supports_stream = True + supports_gpt_35_turbo = True + working = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + active_servers = [ + "https://chat10.fastgpt.me", + "https://chat9.fastgpt.me", + "https://chat1.fastgpt.me", + "https://chat2.fastgpt.me", + "https://chat3.fastgpt.me", + "https://chat4.fastgpt.me", + "https://gxos1h1ddt.fastgpt.me" + ] + + server = active_servers[kwargs.get("active_server", random.randint(0, 5))] + headers = { + "authority" : f"{server}".replace("https://", ""), + "accept" : "text/event-stream", + "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2", + "content-type" : "application/json", + "origin" : f"{server}", + "referer" : f"{server}/", + "x-requested-with" : "XMLHttpRequest", + 'plugins' : '0', + 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', + 'sec-ch-ua-mobile' : '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest' : 'empty', + 'sec-fetch-mode' : 'cors', + 'sec-fetch-site' : 'same-origin', + 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', + 'usesearch' : 'false', + 'x-requested-with' : 'XMLHttpRequest' + } + + json_data = { + "messages" : messages, + "stream" : stream, + "model" : model, + "temperature" : kwargs.get("temperature", 0.5), + "presence_penalty" : kwargs.get("presence_penalty", 0), + "frequency_penalty" : kwargs.get("frequency_penalty", 0), + "top_p" : kwargs.get("top_p", 1) + } + + session = requests.Session() + # init cookies from server + session.get(f"{server}/") + + response = session.post(f"{server}/api/openai/v1/chat/completions", + headers=headers, json=json_data, stream=stream) + + if response.status_code == 200: + + if stream == False: + json_data = response.json() + + if "choices" in json_data: + yield json_data["choices"][0]["message"]["content"] + else: + raise Exception("No response from server") + + else: + + for chunk in response.iter_lines(): + + if b"content" in chunk: + splitData = chunk.decode().split("data:") + + if len(splitData) > 1: + yield json.loads(splitData[1])["choices"][0]["delta"]["content"] + else: + continue + else: + raise Exception(f"Error {response.status_code} from server : {response.reason}") + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("presence_penalty", "int"), + ("frequency_penalty", "int"), + ("top_p", "int"), + ("active_server", "int"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Equing.py b/g4f/Provider/Equing.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebb93a513d5f0d0325433bb086d290040d36746 --- /dev/null +++ b/g4f/Provider/Equing.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +import json +from abc import ABC, abstractmethod + +import requests + +from ..typing import Any, CreateResult + + +class Equing(ABC): + url: str = 'https://next.eqing.tech/' + working = True + needs_auth = False + supports_stream = True + supports_gpt_35_turbo = True + supports_gpt_4 = False + + @staticmethod + @abstractmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + headers = { + 'authority' : 'next.eqing.tech', + 'accept' : 'text/event-stream', + 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'cache-control' : 'no-cache', + 'content-type' : 'application/json', + 'origin' : 'https://next.eqing.tech', + 'plugins' : '0', + 'pragma' : 'no-cache', + 'referer' : 'https://next.eqing.tech/', + 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', + 'sec-ch-ua-mobile' : '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest' : 'empty', + 'sec-fetch-mode' : 'cors', + 'sec-fetch-site' : 'same-origin', + 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', + 'usesearch' : 'false', + 'x-requested-with' : 'XMLHttpRequest' + } + + json_data = { + 'messages' : messages, + 'stream' : stream, + 'model' : model, + 'temperature' : kwargs.get('temperature', 0.5), + 'presence_penalty' : kwargs.get('presence_penalty', 0), + 'frequency_penalty' : kwargs.get('frequency_penalty', 0), + 'top_p' : kwargs.get('top_p', 1), + } + + response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions', + headers=headers, json=json_data, stream=stream) + + if not stream: + yield response.json()["choices"][0]["message"]["content"] + return + + for line in response.iter_content(chunk_size=1024): + if line: + if b'content' in line: + line_json = json.loads(line.decode('utf-8').split('data: ')[1]) + token = line_json['choices'][0]['delta'].get('content') + if token: + yield token + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/Provider/FastGpt.py b/g4f/Provider/FastGpt.py new file mode 100644 index 0000000000000000000000000000000000000000..ef47f75215ba933c540c7cfaa575e7a3b244ffc4 --- /dev/null +++ b/g4f/Provider/FastGpt.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +import json +import random +from abc import ABC, abstractmethod + +import requests + +from ..typing import Any, CreateResult + + +class FastGpt(ABC): + url: str = 'https://chat9.fastgpt.me/' + working = False + needs_auth = False + supports_stream = True + supports_gpt_35_turbo = True + supports_gpt_4 = False + + @staticmethod + @abstractmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + headers = { + 'authority' : 'chat9.fastgpt.me', + 'accept' : 'text/event-stream', + 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'cache-control' : 'no-cache', + 'content-type' : 'application/json', + 'origin' : 'https://chat9.fastgpt.me', + 'plugins' : '0', + 'pragma' : 'no-cache', + 'referer' : 'https://chat9.fastgpt.me/', + 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', + 'sec-ch-ua-mobile' : '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest' : 'empty', + 'sec-fetch-mode' : 'cors', + 'sec-fetch-site' : 'same-origin', + 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', + 'usesearch' : 'false', + 'x-requested-with' : 'XMLHttpRequest', + } + + json_data = { + 'messages' : messages, + 'stream' : stream, + 'model' : model, + 'temperature' : kwargs.get('temperature', 0.5), + 'presence_penalty' : kwargs.get('presence_penalty', 0), + 'frequency_penalty' : kwargs.get('frequency_penalty', 0), + 'top_p' : kwargs.get('top_p', 1), + } + + subdomain = random.choice([ + 'jdaen979ew', + 'chat9' + ]) + + response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions', + headers=headers, json=json_data, stream=stream) + + for line in response.iter_lines(): + if line: + try: + if b'content' in line: + line_json = json.loads(line.decode('utf-8').split('data: ')[1]) + token = line_json['choices'][0]['delta'].get('content') + if token: + yield token + except: + continue + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/Provider/Forefront.py b/g4f/Provider/Forefront.py new file mode 100644 index 0000000000000000000000000000000000000000..8f51fb579ae40c5a8c7609dc481a13bcefa7a366 --- /dev/null +++ b/g4f/Provider/Forefront.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +import json + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class Forefront(BaseProvider): + url = "https://forefront.com" + supports_stream = True + supports_gpt_35_turbo = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + json_data = { + "text" : messages[-1]["content"], + "action" : "noauth", + "id" : "", + "parentId" : "", + "workspaceId" : "", + "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0", + "model" : "gpt-4", + "messages" : messages[:-1] if len(messages) > 1 else [], + "internetMode" : "auto", + } + + response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat", + json=json_data, stream=True) + + response.raise_for_status() + for token in response.iter_lines(): + if b"delta" in token: + yield json.loads(token.decode().split("data: ")[1])["delta"] diff --git a/g4f/Provider/GetGpt.py b/g4f/Provider/GetGpt.py new file mode 100644 index 0000000000000000000000000000000000000000..74e772b0fef338dd4bf5681e5978fc1e1d6ad2a0 --- /dev/null +++ b/g4f/Provider/GetGpt.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +import json +import os +import uuid + +import requests +from Crypto.Cipher import AES + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class GetGpt(BaseProvider): + url = 'https://chat.getgpt.world/' + supports_stream = True + working = True + supports_gpt_35_turbo = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + headers = { + 'Content-Type' : 'application/json', + 'Referer' : 'https://chat.getgpt.world/', + 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', + } + + data = json.dumps( + { + 'messages' : messages, + 'frequency_penalty' : kwargs.get('frequency_penalty', 0), + 'max_tokens' : kwargs.get('max_tokens', 4000), + 'model' : 'gpt-3.5-turbo', + 'presence_penalty' : kwargs.get('presence_penalty', 0), + 'temperature' : kwargs.get('temperature', 1), + 'top_p' : kwargs.get('top_p', 1), + 'stream' : True, + 'uuid' : str(uuid.uuid4()) + } + ) + + res = requests.post('https://chat.getgpt.world/api/chat/stream', + headers=headers, json={'signature': _encrypt(data)}, stream=True) + + res.raise_for_status() + for line in res.iter_lines(): + if b'content' in line: + line_json = json.loads(line.decode('utf-8').split('data: ')[1]) + yield (line_json['choices'][0]['delta']['content']) + + @classmethod + @property + def params(cls): + params = [ + ('model', 'str'), + ('messages', 'list[dict[str, str]]'), + ('stream', 'bool'), + ('temperature', 'float'), + ('presence_penalty', 'int'), + ('frequency_penalty', 'int'), + ('top_p', 'int'), + ('max_tokens', 'int'), + ] + param = ', '.join([': '.join(p) for p in params]) + return f'g4f.provider.{cls.__name__} supports: ({param})' + + +def _encrypt(e: str): + t = os.urandom(8).hex().encode('utf-8') + n = os.urandom(8).hex().encode('utf-8') + r = e.encode('utf-8') + + cipher = AES.new(t, AES.MODE_CBC, n) + ciphertext = cipher.encrypt(_pad_data(r)) + + return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8') + + +def _pad_data(data: bytes) -> bytes: + block_size = AES.block_size + padding_size = block_size - len(data) % block_size + padding = bytes([padding_size] * padding_size) + + return data + padding diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py new file mode 100644 index 0000000000000000000000000000000000000000..30090a5820ddd216fd03b8ddbdf7cdd1e6da8b9c --- /dev/null +++ b/g4f/Provider/H2o.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +import json +import uuid + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt + + +class H2o(AsyncGeneratorProvider): + url = "https://gpt-gm.h2o.ai" + working = True + model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1" + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + model = model if model else cls.model + headers = {"Referer": "https://gpt-gm.h2o.ai/"} + + async with ClientSession( + headers=headers + ) as session: + data = { + "ethicsModalAccepted": "true", + "shareConversationsWithModelAuthors": "true", + "ethicsModalAcceptedAt": "", + "activeModel": model, + "searchEnabled": "true", + } + async with session.post( + "https://gpt-gm.h2o.ai/settings", + proxy=proxy, + data=data + ) as response: + response.raise_for_status() + + async with session.post( + "https://gpt-gm.h2o.ai/conversation", + proxy=proxy, + json={"model": model}, + ) as response: + response.raise_for_status() + conversationId = (await response.json())["conversationId"] + + data = { + "inputs": format_prompt(messages), + "parameters": { + "temperature": 0.4, + "truncate": 2048, + "max_new_tokens": 1024, + "do_sample": True, + "repetition_penalty": 1.2, + "return_full_text": False, + **kwargs + }, + "stream": True, + "options": { + "id": str(uuid.uuid4()), + "response_id": str(uuid.uuid4()), + "is_retry": False, + "use_cache": False, + "web_search_id": "", + }, + } + async with session.post( + f"https://gpt-gm.h2o.ai/conversation/{conversationId}", + proxy=proxy, + json=data + ) as response: + start = "data:" + async for line in response.content: + line = line.decode("utf-8") + if line and line.startswith(start): + line = json.loads(line[len(start):-1]) + if not line["token"]["special"]: + yield line["token"]["text"] + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("truncate", "int"), + ("max_new_tokens", "int"), + ("do_sample", "bool"), + ("repetition_penalty", "float"), + ("return_full_text", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py new file mode 100644 index 0000000000000000000000000000000000000000..11310a69e43cc4e396a93250c6513690f183c6c3 --- /dev/null +++ b/g4f/Provider/HuggingChat.py @@ -0,0 +1,106 @@ +from __future__ import annotations + +import json + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies + + +class HuggingChat(AsyncGeneratorProvider): + url = "https://huggingface.co/chat/" + needs_auth = True + working = True + model = "OpenAssistant/oasst-sft-6-llama-30b-xor" + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + stream: bool = True, + proxy: str = None, + cookies: dict = None, + **kwargs + ) -> AsyncGenerator: + model = model if model else cls.model + if not cookies: + cookies = get_cookies(".huggingface.co") + if proxy and "://" not in proxy: + proxy = f"http://{proxy}" + + headers = { + 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', + } + async with ClientSession( + cookies=cookies, + headers=headers + ) as session: + async with session.post("https://huggingface.co/chat/conversation", proxy=proxy, json={"model": model}) as response: + conversation_id = (await response.json())["conversationId"] + + send = { + "inputs": format_prompt(messages), + "parameters": { + "temperature": 0.2, + "truncate": 1000, + "max_new_tokens": 1024, + "stop": [""], + "top_p": 0.95, + "repetition_penalty": 1.2, + "top_k": 50, + "return_full_text": False, + **kwargs + }, + "stream": stream, + "options": { + "id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37", + "response_id": "04ce2602-3bea-45e8-8efc-cef00680376a", + "is_retry": False, + "use_cache": False, + "web_search_id": "" + } + } + async with session.post(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy, json=send) as response: + if not stream: + data = await response.json() + if "error" in data: + raise RuntimeError(data["error"]) + elif isinstance(data, list): + yield data[0]["generated_text"] + else: + raise RuntimeError(f"Response: {data}") + else: + start = "data:" + first = True + async for line in response.content: + line = line.decode("utf-8") + if not line: + continue + if line.startswith(start): + line = json.loads(line[len(start):-1]) + if "token" not in line: + raise RuntimeError(f"Response: {line}") + if not line["token"]["special"]: + if first: + yield line["token"]["text"].lstrip() + first = False + else: + yield line["token"]["text"] + + async with session.delete(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy) as response: + response.raise_for_status() + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py new file mode 100644 index 0000000000000000000000000000000000000000..2cc5ff994c1db65262e56b1bc687f63f50d441b1 --- /dev/null +++ b/g4f/Provider/Liaobots.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +import json +import uuid + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider + +models = { + "gpt-4": { + "id": "gpt-4", + "name": "GPT-4", + "maxLength": 24000, + "tokenLimit": 8000, + }, + "gpt-3.5-turbo": { + "id": "gpt-3.5-turbo", + "name": "GPT-3.5", + "maxLength": 12000, + "tokenLimit": 4000, + }, + "gpt-3.5-turbo-16k": { + "id": "gpt-3.5-turbo-16k", + "name": "GPT-3.5-16k", + "maxLength": 48000, + "tokenLimit": 16000, + }, +} + +class Liaobots(AsyncGeneratorProvider): + url = "https://liaobots.com" + working = True + supports_gpt_35_turbo = True + supports_gpt_4 = True + _auth_code = None + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + auth: str = None, + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + model = model if model in models else "gpt-3.5-turbo" + if proxy and "://" not in proxy: + proxy = f"http://{proxy}" + headers = { + "authority": "liaobots.com", + "content-type": "application/json", + "origin": cls.url, + "referer": cls.url + "/", + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", + } + async with ClientSession( + headers=headers + ) as session: + auth_code = auth if isinstance(auth, str) else cls._auth_code + if not auth_code: + async with session.post(cls.url + "/api/user", proxy=proxy, json={"authcode": ""}) as response: + response.raise_for_status() + auth_code = cls._auth_code = json.loads(await response.text())["authCode"] + data = { + "conversationId": str(uuid.uuid4()), + "model": models[model], + "messages": messages, + "key": "", + "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", + } + async with session.post(cls.url + "/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response: + response.raise_for_status() + async for stream in response.content.iter_any(): + if stream: + yield stream.decode() + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ("auth", "str"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Lockchat.py b/g4f/Provider/Lockchat.py new file mode 100644 index 0000000000000000000000000000000000000000..c15eec8dd99f6a50b7eb02cf8ff14494380f4b9a --- /dev/null +++ b/g4f/Provider/Lockchat.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import json + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class Lockchat(BaseProvider): + url: str = "http://supertest.lockchat.app" + supports_stream = True + supports_gpt_35_turbo = True + supports_gpt_4 = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + temperature = float(kwargs.get("temperature", 0.7)) + payload = { + "temperature": temperature, + "messages" : messages, + "model" : model, + "stream" : True, + } + + headers = { + "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0", + } + response = requests.post("http://supertest.lockchat.app/v1/chat/completions", + json=payload, headers=headers, stream=True) + + response.raise_for_status() + for token in response.iter_lines(): + if b"The model: `gpt-4` does not exist" in token: + print("error, retrying...") + Lockchat.create_completion( + model = model, + messages = messages, + stream = stream, + temperature = temperature, + **kwargs) + + if b"content" in token: + token = json.loads(token.decode("utf-8").split("data: ")[1]) + token = token["choices"][0]["delta"].get("content") + if token: + yield (token) + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Opchatgpts.py b/g4f/Provider/Opchatgpts.py new file mode 100644 index 0000000000000000000000000000000000000000..166323bdd329ce2a66c1ccbe76ed77086a7e19d6 --- /dev/null +++ b/g4f/Provider/Opchatgpts.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from .ChatgptLogin import ChatgptLogin + + +class Opchatgpts(ChatgptLogin): + url = "https://opchatgpts.net" + working = True \ No newline at end of file diff --git a/g4f/Provider/OpenAssistant.py b/g4f/Provider/OpenAssistant.py new file mode 100644 index 0000000000000000000000000000000000000000..3a931597d6f761c3ec1694f5dd1f58a3b533a6a7 --- /dev/null +++ b/g4f/Provider/OpenAssistant.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +import json + +from aiohttp import ClientSession + +from ..typing import Any, AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies + + +class OpenAssistant(AsyncGeneratorProvider): + url = "https://open-assistant.io/chat" + needs_auth = True + working = True + model = "OA_SFT_Llama_30B_6" + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + cookies: dict = None, + **kwargs: Any + ) -> AsyncGenerator: + if proxy and "://" not in proxy: + proxy = f"http://{proxy}" + if not cookies: + cookies = get_cookies("open-assistant.io") + + headers = { + 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', + } + async with ClientSession( + cookies=cookies, + headers=headers + ) as session: + async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response: + chat_id = (await response.json())["id"] + + data = { + "chat_id": chat_id, + "content": f"[INST]\n{format_prompt(messages)}\n[/INST]", + "parent_id": None + } + async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response: + parent_id = (await response.json())["id"] + + data = { + "chat_id": chat_id, + "parent_id": parent_id, + "model_config_name": model if model else cls.model, + "sampling_parameters":{ + "top_k": 50, + "top_p": None, + "typical_p": None, + "temperature": 0.35, + "repetition_penalty": 1.1111111111111112, + "max_new_tokens": 1024, + **kwargs + }, + "plugins":[] + } + async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response: + data = await response.json() + if "id" in data: + message_id = data["id"] + elif "message" in data: + raise RuntimeError(data["message"]) + else: + response.raise_for_status() + + params = { + 'chat_id': chat_id, + 'message_id': message_id, + } + async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response: + start = "data: " + async for line in response.content: + line = line.decode("utf-8") + if line and line.startswith(start): + line = json.loads(line[len(start):]) + if line["event_type"] == "token": + yield line["text"] + + params = { + 'chat_id': chat_id, + } + async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response: + response.raise_for_status() + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/OpenaiChat.py b/g4f/Provider/OpenaiChat.py new file mode 100644 index 0000000000000000000000000000000000000000..c93977ecf256b6fcc5c1a84f96382d7eae7441b2 --- /dev/null +++ b/g4f/Provider/OpenaiChat.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +from curl_cffi.requests import AsyncSession +import uuid +import json + +from .base_provider import AsyncProvider, get_cookies, format_prompt +from ..typing import AsyncGenerator + + +class OpenaiChat(AsyncProvider): + url = "https://chat.openai.com" + needs_auth = True + working = True + supports_gpt_35_turbo = True + _access_token = None + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + access_token: str = None, + cookies: dict = None, + **kwargs: dict + ) -> AsyncGenerator: + proxies = None + if proxy: + if "://" not in proxy: + proxy = f"http://{proxy}" + proxies = { + "http": proxy, + "https": proxy + } + if not access_token: + access_token = await cls.get_access_token(cookies) + headers = { + "Accept": "text/event-stream", + "Authorization": f"Bearer {access_token}", + } + async with AsyncSession(proxies=proxies, headers=headers, impersonate="chrome107") as session: + messages = [ + { + "id": str(uuid.uuid4()), + "author": {"role": "user"}, + "content": {"content_type": "text", "parts": [format_prompt(messages)]}, + }, + ] + data = { + "action": "next", + "messages": messages, + "conversation_id": None, + "parent_message_id": str(uuid.uuid4()), + "model": "text-davinci-002-render-sha", + "history_and_training_disabled": True, + } + response = await session.post("https://chat.openai.com/backend-api/conversation", json=data) + response.raise_for_status() + last_message = None + for line in response.content.decode().splitlines(): + if line.startswith("data: "): + line = line[6:] + if line != "[DONE]": + line = json.loads(line) + if "message" in line: + last_message = line["message"]["content"]["parts"][0] + return last_message + + + @classmethod + async def get_access_token(cls, cookies: dict = None, proxies: dict = None): + if not cls._access_token: + cookies = cookies if cookies else get_cookies("chat.openai.com") + async with AsyncSession(proxies=proxies, cookies=cookies, impersonate="chrome107") as session: + response = await session.get("https://chat.openai.com/api/auth/session") + response.raise_for_status() + cls._access_token = response.json()["accessToken"] + return cls._access_token + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ("access_token", "str"), + ("cookies", "dict[str, str]") + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/Provider/Raycast.py b/g4f/Provider/Raycast.py new file mode 100644 index 0000000000000000000000000000000000000000..7ddc8acd70f870bab1db90f3d279c37de4f46234 --- /dev/null +++ b/g4f/Provider/Raycast.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +import json + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class Raycast(BaseProvider): + url = "https://raycast.com" + supports_gpt_35_turbo = True + supports_gpt_4 = True + supports_stream = True + needs_auth = True + working = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, + **kwargs: Any, + ) -> CreateResult: + auth = kwargs.get('auth') + headers = { + 'Accept': 'application/json', + 'Accept-Language': 'en-US,en;q=0.9', + 'Authorization': f'Bearer {auth}', + 'Content-Type': 'application/json', + 'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0', + } + parsed_messages = [] + for message in messages: + parsed_messages.append({ + 'author': message['role'], + 'content': {'text': message['content']} + }) + data = { + "debug": False, + "locale": "en-CN", + "messages": parsed_messages, + "model": model, + "provider": "openai", + "source": "ai_chat", + "system_instruction": "markdown", + "temperature": 0.5 + } + response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True) + for token in response.iter_lines(): + if b'data: ' not in token: + continue + completion_chunk = json.loads(token.decode().replace('data: ', '')) + token = completion_chunk['text'] + if token != None: + yield token + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("top_p", "int"), + ("model", "str"), + ("auth", "str"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Theb.py b/g4f/Provider/Theb.py new file mode 100644 index 0000000000000000000000000000000000000000..72fce3ac6f2b58fbd569153cc2025c7f03d94c12 --- /dev/null +++ b/g4f/Provider/Theb.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +import json +import random + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class Theb(BaseProvider): + url = "https://theb.ai" + working = True + supports_stream = True + supports_gpt_35_turbo = True + needs_auth = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages) + conversation += "\nassistant: " + + auth = kwargs.get("auth", { + "bearer_token":"free", + "org_id":"theb", + }) + + bearer_token = auth["bearer_token"] + org_id = auth["org_id"] + + headers = { + 'authority' : 'beta.theb.ai', + 'accept' : 'text/event-stream', + 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', + 'authorization' : 'Bearer '+bearer_token, + 'content-type' : 'application/json', + 'origin' : 'https://beta.theb.ai', + 'referer' : 'https://beta.theb.ai/home', + 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', + 'sec-ch-ua-mobile' : '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest' : 'empty', + 'sec-fetch-mode' : 'cors', + 'sec-fetch-site' : 'same-origin', + 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', + 'x-ai-model' : 'ee8d4f29cb7047f78cbe84313ed6ace8', + } + + req_rand = random.randint(100000000, 9999999999) + + json_data: dict[str, Any] = { + "text" : conversation, + "category" : "04f58f64a4aa4191a957b47290fee864", + "model" : "ee8d4f29cb7047f78cbe84313ed6ace8", + "model_params": { + "system_prompt" : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}", + "temperature" : kwargs.get("temperature", 1), + "top_p" : kwargs.get("top_p", 1), + "frequency_penalty" : kwargs.get("frequency_penalty", 0), + "presence_penalty" : kwargs.get("presence_penalty", 0), + "long_term_memory" : "auto" + } + } + + response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}", + headers=headers, json=json_data, stream=True) + + response.raise_for_status() + content = "" + next_content = "" + for chunk in response.iter_lines(): + if b"content" in chunk: + next_content = content + data = json.loads(chunk.decode().split("data: ")[1]) + content = data["content"] + yield data["content"].replace(next_content, "") + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("auth", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("presence_penalty", "int"), + ("frequency_penalty", "int"), + ("top_p", "int") + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/Provider/V50.py b/g4f/Provider/V50.py new file mode 100644 index 0000000000000000000000000000000000000000..81a95ba8db7211de946cce0711b52827145c9dca --- /dev/null +++ b/g4f/Provider/V50.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +import uuid + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class V50(BaseProvider): + url = 'https://p5.v50.ltd' + supports_gpt_35_turbo = True + supports_stream = False + needs_auth = False + working = False + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages) + conversation += "\nassistant: " + + payload = { + "prompt" : conversation, + "options" : {}, + "systemMessage" : ".", + "temperature" : kwargs.get("temperature", 0.4), + "top_p" : kwargs.get("top_p", 0.4), + "model" : model, + "user" : str(uuid.uuid4()) + } + + headers = { + 'authority' : 'p5.v50.ltd', + 'accept' : 'application/json, text/plain, */*', + 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', + 'content-type' : 'application/json', + 'origin' : 'https://p5.v50.ltd', + 'referer' : 'https://p5.v50.ltd/', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest' : 'empty', + 'sec-fetch-mode' : 'cors', + 'sec-fetch-site' : 'same-origin', + 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36' + } + response = requests.post("https://p5.v50.ltd/api/chat-process", + json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {}) + + if "https://fk1.v50.ltd" not in response.text: + yield response.text + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("top_p", "int"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py new file mode 100644 index 0000000000000000000000000000000000000000..0bccaaab2e475d41f06b8475755ad450224d467b --- /dev/null +++ b/g4f/Provider/Vercel.py @@ -0,0 +1,353 @@ +from __future__ import annotations + +import base64, json, uuid, random +from curl_cffi.requests import AsyncSession + +from ..typing import Any, TypedDict +from .base_provider import AsyncProvider + + +class Vercel(AsyncProvider): + url = "https://sdk.vercel.ai" + working = True + supports_gpt_35_turbo = True + model = "replicate:replicate/llama-2-70b-chat" + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> str: + if model in ["gpt-3.5-turbo", "gpt-4"]: + model = "openai:" + model + model = model if model else cls.model + proxies = None + if proxy: + if "://" not in proxy: + proxy = "http://" + proxy + proxies = {"http": proxy, "https": proxy} + headers = { + "User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.{rand1}.{rand2} Safari/537.36".format( + rand1=random.randint(0,9999), + rand2=random.randint(0,9999) + ), + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8", + "Accept-Encoding": "gzip, deflate, br", + "Accept-Language": "en-US,en;q=0.5", + "TE": "trailers", + } + async with AsyncSession(headers=headers, proxies=proxies, impersonate="chrome107") as session: + response = await session.get(cls.url + "/openai.jpeg") + response.raise_for_status() + custom_encoding = _get_custom_encoding(response.text) + headers = { + "Content-Type": "application/json", + "Custom-Encoding": custom_encoding, + } + data = _create_payload(model, messages) + response = await session.post(cls.url + "/api/generate", json=data, headers=headers) + response.raise_for_status() + return response.text + + +def _create_payload(model: str, messages: list[dict[str, str]]) -> dict[str, Any]: + if model not in model_info: + raise RuntimeError(f'Model "{model}" are not supported') + default_params = model_info[model]["default_params"] + return { + "messages": messages, + "playgroundId": str(uuid.uuid4()), + "chatIndex": 0, + "model": model + } | default_params + +# based on https://github.com/ading2210/vercel-llm-api +def _get_custom_encoding(text: str) -> str: + data = json.loads(base64.b64decode(text, validate=True)) + script = """ + String.prototype.fontcolor = function() {{ + return `${{this}}` + }} + var globalThis = {{marker: "mark"}}; + ({script})({key}) + """.format( + script=data["c"], key=data["a"] + ) + context = quickjs.Context() # type: ignore + token_data = json.loads(context.eval(script).json()) # type: ignore + token_data[2] = "mark" + token = {"r": token_data, "t": data["t"]} + token_str = json.dumps(token, separators=(",", ":")).encode("utf-16le") + return base64.b64encode(token_str).decode() + + +class ModelInfo(TypedDict): + id: str + default_params: dict[str, Any] + + +model_info: dict[str, ModelInfo] = { + "anthropic:claude-instant-v1": { + "id": "anthropic:claude-instant-v1", + "default_params": { + "temperature": 1, + "maxTokens": 200, + "topP": 1, + "topK": 1, + "presencePenalty": 1, + "frequencyPenalty": 1, + "stopSequences": ["\n\nHuman:"], + }, + }, + "anthropic:claude-v1": { + "id": "anthropic:claude-v1", + "default_params": { + "temperature": 1, + "maxTokens": 200, + "topP": 1, + "topK": 1, + "presencePenalty": 1, + "frequencyPenalty": 1, + "stopSequences": ["\n\nHuman:"], + }, + }, + "anthropic:claude-v2": { + "id": "anthropic:claude-v2", + "default_params": { + "temperature": 1, + "maxTokens": 200, + "topP": 1, + "topK": 1, + "presencePenalty": 1, + "frequencyPenalty": 1, + "stopSequences": ["\n\nHuman:"], + }, + }, + "replicate:a16z-infra/llama7b-v2-chat": { + "id": "replicate:a16z-infra/llama7b-v2-chat", + "default_params": { + "temperature": 0.75, + "maxTokens": 500, + "topP": 1, + "repetitionPenalty": 1, + }, + }, + "replicate:a16z-infra/llama13b-v2-chat": { + "id": "replicate:a16z-infra/llama13b-v2-chat", + "default_params": { + "temperature": 0.75, + "maxTokens": 500, + "topP": 1, + "repetitionPenalty": 1, + }, + }, + "replicate:replicate/llama-2-70b-chat": { + "id": "replicate:replicate/llama-2-70b-chat", + "default_params": { + "temperature": 0.75, + "maxTokens": 1000, + "topP": 1, + "repetitionPenalty": 1, + }, + }, + "huggingface:bigscience/bloom": { + "id": "huggingface:bigscience/bloom", + "default_params": { + "temperature": 0.5, + "maxTokens": 200, + "topP": 0.95, + "topK": 4, + "repetitionPenalty": 1.03, + }, + }, + "huggingface:google/flan-t5-xxl": { + "id": "huggingface:google/flan-t5-xxl", + "default_params": { + "temperature": 0.5, + "maxTokens": 200, + "topP": 0.95, + "topK": 4, + "repetitionPenalty": 1.03, + }, + }, + "huggingface:EleutherAI/gpt-neox-20b": { + "id": "huggingface:EleutherAI/gpt-neox-20b", + "default_params": { + "temperature": 0.5, + "maxTokens": 200, + "topP": 0.95, + "topK": 4, + "repetitionPenalty": 1.03, + "stopSequences": [], + }, + }, + "huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5": { + "id": "huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", + "default_params": {"maxTokens": 200, "typicalP": 0.2, "repetitionPenalty": 1}, + }, + "huggingface:OpenAssistant/oasst-sft-1-pythia-12b": { + "id": "huggingface:OpenAssistant/oasst-sft-1-pythia-12b", + "default_params": {"maxTokens": 200, "typicalP": 0.2, "repetitionPenalty": 1}, + }, + "huggingface:bigcode/santacoder": { + "id": "huggingface:bigcode/santacoder", + "default_params": { + "temperature": 0.5, + "maxTokens": 200, + "topP": 0.95, + "topK": 4, + "repetitionPenalty": 1.03, + }, + }, + "cohere:command-light-nightly": { + "id": "cohere:command-light-nightly", + "default_params": { + "temperature": 0.9, + "maxTokens": 200, + "topP": 1, + "topK": 0, + "presencePenalty": 0, + "frequencyPenalty": 0, + "stopSequences": [], + }, + }, + "cohere:command-nightly": { + "id": "cohere:command-nightly", + "default_params": { + "temperature": 0.9, + "maxTokens": 200, + "topP": 1, + "topK": 0, + "presencePenalty": 0, + "frequencyPenalty": 0, + "stopSequences": [], + }, + }, + "openai:gpt-4": { + "id": "openai:gpt-4", + "default_params": { + "temperature": 0.7, + "maxTokens": 500, + "topP": 1, + "presencePenalty": 0, + "frequencyPenalty": 0, + "stopSequences": [], + }, + }, + "openai:gpt-4-0613": { + "id": "openai:gpt-4-0613", + "default_params": { + "temperature": 0.7, + "maxTokens": 500, + "topP": 1, + "presencePenalty": 0, + "frequencyPenalty": 0, + "stopSequences": [], + }, + }, + "openai:code-davinci-002": { + "id": "openai:code-davinci-002", + "default_params": { + "temperature": 0.5, + "maxTokens": 200, + "topP": 1, + "presencePenalty": 0, + "frequencyPenalty": 0, + "stopSequences": [], + }, + }, + "openai:gpt-3.5-turbo": { + "id": "openai:gpt-3.5-turbo", + "default_params": { + "temperature": 0.7, + "maxTokens": 500, + "topP": 1, + "topK": 1, + "presencePenalty": 1, + "frequencyPenalty": 1, + "stopSequences": [], + }, + }, + "openai:gpt-3.5-turbo-16k": { + "id": "openai:gpt-3.5-turbo-16k", + "default_params": { + "temperature": 0.7, + "maxTokens": 500, + "topP": 1, + "topK": 1, + "presencePenalty": 1, + "frequencyPenalty": 1, + "stopSequences": [], + }, + }, + "openai:gpt-3.5-turbo-16k-0613": { + "id": "openai:gpt-3.5-turbo-16k-0613", + "default_params": { + "temperature": 0.7, + "maxTokens": 500, + "topP": 1, + "topK": 1, + "presencePenalty": 1, + "frequencyPenalty": 1, + "stopSequences": [], + }, + }, + "openai:text-ada-001": { + "id": "openai:text-ada-001", + "default_params": { + "temperature": 0.5, + "maxTokens": 200, + "topP": 1, + "presencePenalty": 0, + "frequencyPenalty": 0, + "stopSequences": [], + }, + }, + "openai:text-babbage-001": { + "id": "openai:text-babbage-001", + "default_params": { + "temperature": 0.5, + "maxTokens": 200, + "topP": 1, + "presencePenalty": 0, + "frequencyPenalty": 0, + "stopSequences": [], + }, + }, + "openai:text-curie-001": { + "id": "openai:text-curie-001", + "default_params": { + "temperature": 0.5, + "maxTokens": 200, + "topP": 1, + "presencePenalty": 0, + "frequencyPenalty": 0, + "stopSequences": [], + }, + }, + "openai:text-davinci-002": { + "id": "openai:text-davinci-002", + "default_params": { + "temperature": 0.5, + "maxTokens": 200, + "topP": 1, + "presencePenalty": 0, + "frequencyPenalty": 0, + "stopSequences": [], + }, + }, + "openai:text-davinci-003": { + "id": "openai:text-davinci-003", + "default_params": { + "temperature": 0.5, + "maxTokens": 200, + "topP": 1, + "presencePenalty": 0, + "frequencyPenalty": 0, + "stopSequences": [], + }, + }, +} diff --git a/g4f/Provider/Wewordle.py b/g4f/Provider/Wewordle.py new file mode 100644 index 0000000000000000000000000000000000000000..a7bdc722795274270750f2609121c79a311df92e --- /dev/null +++ b/g4f/Provider/Wewordle.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import random, string, time +from aiohttp import ClientSession + +from .base_provider import AsyncProvider + + +class Wewordle(AsyncProvider): + url = "https://wewordle.org" + working = True + supports_gpt_35_turbo = True + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> str: + + headers = { + "accept" : "*/*", + "pragma" : "no-cache", + "Content-Type" : "application/json", + "Connection" : "keep-alive" + } + + _user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16)) + _app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31)) + _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime()) + data = { + "user" : _user_id, + "messages" : messages, + "subscriber": { + "originalPurchaseDate" : None, + "originalApplicationVersion" : None, + "allPurchaseDatesMillis" : {}, + "entitlements" : {"active": {}, "all": {}}, + "allPurchaseDates" : {}, + "allExpirationDatesMillis" : {}, + "allExpirationDates" : {}, + "originalAppUserId" : f"$RCAnonymousID:{_app_id}", + "latestExpirationDate" : None, + "requestDate" : _request_date, + "latestExpirationDateMillis" : None, + "nonSubscriptionTransactions" : [], + "originalPurchaseDateMillis" : None, + "managementURL" : None, + "allPurchasedProductIdentifiers": [], + "firstSeen" : _request_date, + "activeSubscriptions" : [], + } + } + + + async with ClientSession( + headers=headers + ) as session: + async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response: + response.raise_for_status() + content = (await response.json())["message"]["content"] + if content: + return content \ No newline at end of file diff --git a/g4f/Provider/Wuguokai.py b/g4f/Provider/Wuguokai.py new file mode 100644 index 0000000000000000000000000000000000000000..a9614626b7864cfd9e934740633158ac05f99d02 --- /dev/null +++ b/g4f/Provider/Wuguokai.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +import random + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class Wuguokai(BaseProvider): + url = 'https://chat.wuguokai.xyz' + supports_gpt_35_turbo = True + working = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, + **kwargs: Any, + ) -> CreateResult: + base = '' + for message in messages: + base += '%s: %s\n' % (message['role'], message['content']) + base += 'assistant:' + + headers = { + 'authority': 'ai-api.wuguokai.xyz', + 'accept': 'application/json, text/plain, */*', + 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', + 'content-type': 'application/json', + 'origin': 'https://chat.wuguokai.xyz', + 'referer': 'https://chat.wuguokai.xyz/', + 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-site', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' + } + data ={ + "prompt": base, + "options": {}, + "userId": f"#/chat/{random.randint(1,99999999)}", + "usingContext": True + } + response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {}) + _split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试") + if response.status_code == 200: + if len(_split) > 1: + yield _split[1].strip() + else: + yield _split[0].strip() + else: + raise Exception(f"Error: {response.status_code} {response.reason}") + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool") + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py new file mode 100644 index 0000000000000000000000000000000000000000..121d1dbde4811b0cf37710e0bfa3098d9027fd2d --- /dev/null +++ b/g4f/Provider/You.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +import json + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies + + +class You(AsyncGeneratorProvider): + url = "https://you.com" + working = True + supports_gpt_35_turbo = True + supports_stream = True + + @staticmethod + async def create_async_generator( + model: str, + messages: list[dict[str, str]], + cookies: dict = None, + **kwargs, + ) -> AsyncGenerator: + if not cookies: + cookies = get_cookies("you.com") + headers = { + "Accept": "text/event-stream", + "Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat", + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0" + } + async with ClientSession(headers=headers, cookies=cookies) as session: + async with session.get( + "https://you.com/api/streamingSearch", + params={"q": format_prompt(messages), "domain": "youchat", "chat": ""}, + ) as response: + start = 'data: {"youChatToken": ' + async for line in response.content: + line = line.decode('utf-8') + if line.startswith(start): + yield json.loads(line[len(start): -2]) \ No newline at end of file diff --git a/g4f/Provider/Yqcloud.py b/g4f/Provider/Yqcloud.py new file mode 100644 index 0000000000000000000000000000000000000000..ac93315c22f5bc5943eba60ac6da125c266c217d --- /dev/null +++ b/g4f/Provider/Yqcloud.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt + + +class Yqcloud(AsyncGeneratorProvider): + url = "https://chat9.yqcloud.top/" + working = True + supports_gpt_35_turbo = True + + @staticmethod + async def create_async_generator( + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs, + ) -> AsyncGenerator: + async with ClientSession( + headers=_create_header() + ) as session: + payload = _create_payload(messages) + async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response: + response.raise_for_status() + async for stream in response.content.iter_any(): + if stream: + yield stream.decode() + + +def _create_header(): + return { + "accept" : "application/json, text/plain, */*", + "content-type" : "application/json", + "origin" : "https://chat9.yqcloud.top", + } + + +def _create_payload(messages: list[dict[str, str]]): + return { + "prompt": format_prompt(messages), + "network": True, + "system": "", + "withoutContext": False, + "stream": True, + "userId": "#/chat/1693025544336" + } diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 9d0442d0d270c5dc410d69ed01b6bf8716ab363c..fa1bdb875c61fd8cd5d0062cd76f39b61fad2b28 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -1,30 +1,69 @@ -from . import Provider -from .Providers import ( - Ails, - You, - Bing, - Yqcloud, - Theb, - Aichat, - Bard, - Vercel, - Forefront, - Lockchat, - Liaobots, - H2o, - ChatgptLogin, - DeepAi, - GetGpt, - AItianhu, - EasyChat, - Acytoo, - DfeHub, - AiService, - BingHuan, - Wewordle, - ChatgptAi, - opchatgpts, - Raycast, -) +from __future__ import annotations +from .Acytoo import Acytoo +from .Aichat import Aichat +from .Ails import Ails +from .AiService import AiService +from .AItianhu import AItianhu +from .Bard import Bard +from .Bing import Bing +from .ChatgptAi import ChatgptAi +from .ChatgptLogin import ChatgptLogin +from .DeepAi import DeepAi +from .DfeHub import DfeHub +from .EasyChat import EasyChat +from .Forefront import Forefront +from .GetGpt import GetGpt +from .H2o import H2o +from .HuggingChat import HuggingChat +from .Liaobots import Liaobots +from .Lockchat import Lockchat +from .Opchatgpts import Opchatgpts +from .OpenaiChat import OpenaiChat +from .OpenAssistant import OpenAssistant +from .Raycast import Raycast +from .Theb import Theb +from .Vercel import Vercel +from .Wewordle import Wewordle +from .You import You +from .Yqcloud import Yqcloud +from .Equing import Equing +from .FastGpt import FastGpt +from .V50 import V50 +from .Wuguokai import Wuguokai -Palm = Bard +from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider + +__all__ = [ + 'BaseProvider', + 'Acytoo', + 'Aichat', + 'Ails', + 'AiService', + 'AItianhu', + 'Bard', + 'Bing', + 'ChatgptAi', + 'ChatgptLogin', + 'DeepAi', + 'DfeHub', + 'EasyChat', + 'Forefront', + 'GetGpt', + 'H2o', + 'HuggingChat', + 'Liaobots', + 'Lockchat', + 'Opchatgpts', + 'Raycast', + 'OpenaiChat', + 'OpenAssistant', + 'Theb', + 'Vercel', + 'Wewordle', + 'You', + 'Yqcloud', + 'Equing', + 'FastGpt', + 'Wuguokai', + 'V50' +] diff --git a/g4f/Provider/__pycache__/AItianhu.cpython-311.pyc b/g4f/Provider/__pycache__/AItianhu.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff42e9bed22dea4cae16dc0173d207c47e6426e6 Binary files /dev/null and b/g4f/Provider/__pycache__/AItianhu.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/AItianhu.cpython-38.pyc b/g4f/Provider/__pycache__/AItianhu.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0debf5316d193bc29d5af1c168536e621d7d683 Binary files /dev/null and b/g4f/Provider/__pycache__/AItianhu.cpython-38.pyc differ diff --git a/g4f/Provider/__pycache__/Acytoo.cpython-311.pyc b/g4f/Provider/__pycache__/Acytoo.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e40d1fe4fc3a9dd468e11860e094400f4c68eacb Binary files /dev/null and b/g4f/Provider/__pycache__/Acytoo.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Acytoo.cpython-38.pyc b/g4f/Provider/__pycache__/Acytoo.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1e9e831b458d4260caa40e3c84fc3453228e6d1 Binary files /dev/null and b/g4f/Provider/__pycache__/Acytoo.cpython-38.pyc differ diff --git a/g4f/Provider/__pycache__/AiService.cpython-311.pyc b/g4f/Provider/__pycache__/AiService.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6741798968414c0cb8c019c92223234476c71886 Binary files /dev/null and b/g4f/Provider/__pycache__/AiService.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/AiService.cpython-38.pyc b/g4f/Provider/__pycache__/AiService.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4ecd6612c3914ada614b2de6f86865305c8aa24 Binary files /dev/null and b/g4f/Provider/__pycache__/AiService.cpython-38.pyc differ diff --git a/g4f/Provider/__pycache__/Aichat.cpython-311.pyc b/g4f/Provider/__pycache__/Aichat.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae2526ca365a3b2bf84cacf66113581f50bc116c Binary files /dev/null and b/g4f/Provider/__pycache__/Aichat.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Aichat.cpython-38.pyc b/g4f/Provider/__pycache__/Aichat.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69810e3af6c6b7b4d0cdb74c98299b54edd8e68f Binary files /dev/null and b/g4f/Provider/__pycache__/Aichat.cpython-38.pyc differ diff --git a/g4f/Provider/__pycache__/Ails.cpython-311.pyc b/g4f/Provider/__pycache__/Ails.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8387cb99295ae494ebb9b8e46d6af3d9bd5c053 Binary files /dev/null and b/g4f/Provider/__pycache__/Ails.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Ails.cpython-38.pyc b/g4f/Provider/__pycache__/Ails.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e747c4fc5d1136d733b61aef1be9d3b131180cab Binary files /dev/null and b/g4f/Provider/__pycache__/Ails.cpython-38.pyc differ diff --git a/g4f/Provider/__pycache__/Bard.cpython-311.pyc b/g4f/Provider/__pycache__/Bard.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66efa4d1a656d6bc766bf71d645b87916278daaf Binary files /dev/null and b/g4f/Provider/__pycache__/Bard.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Bard.cpython-38.pyc b/g4f/Provider/__pycache__/Bard.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fcdb6681d6f8a3c8a01ba14c1db0c484c73035f Binary files /dev/null and b/g4f/Provider/__pycache__/Bard.cpython-38.pyc differ diff --git a/g4f/Provider/__pycache__/Bing.cpython-311.pyc b/g4f/Provider/__pycache__/Bing.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dab324d03f146792a0d3f1da8f178ac4a1fb3a7f Binary files /dev/null and b/g4f/Provider/__pycache__/Bing.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Bing.cpython-38.pyc b/g4f/Provider/__pycache__/Bing.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51a7c8965b5a5c679fcd33b64f23f6831c3521d9 Binary files /dev/null and b/g4f/Provider/__pycache__/Bing.cpython-38.pyc differ diff --git a/g4f/Provider/__pycache__/ChatgptAi.cpython-311.pyc b/g4f/Provider/__pycache__/ChatgptAi.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c25d301dce6d44a5c5ab76fefe654293628b49f6 Binary files /dev/null and b/g4f/Provider/__pycache__/ChatgptAi.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/ChatgptAi.cpython-38.pyc b/g4f/Provider/__pycache__/ChatgptAi.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d28d404c512671988f09706d823280e3258a891 Binary files /dev/null and b/g4f/Provider/__pycache__/ChatgptAi.cpython-38.pyc differ diff --git a/g4f/Provider/__pycache__/ChatgptLogin.cpython-311.pyc b/g4f/Provider/__pycache__/ChatgptLogin.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a83079e51af192bb6bf23a1850cdf17b95db8736 Binary files /dev/null and b/g4f/Provider/__pycache__/ChatgptLogin.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/ChatgptLogin.cpython-38.pyc b/g4f/Provider/__pycache__/ChatgptLogin.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5adc1cd6ca0f4bf2b9e67564e2d8b487e3fa776b Binary files /dev/null and b/g4f/Provider/__pycache__/ChatgptLogin.cpython-38.pyc differ diff --git a/g4f/Provider/__pycache__/DeepAi.cpython-311.pyc b/g4f/Provider/__pycache__/DeepAi.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f7debe843e9b1468b84d9c6f82f9d8c8c5e5780 Binary files /dev/null and b/g4f/Provider/__pycache__/DeepAi.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/DeepAi.cpython-38.pyc b/g4f/Provider/__pycache__/DeepAi.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1395a63835aa165b4b1d6c7b722ca5c06a97464c Binary files /dev/null and b/g4f/Provider/__pycache__/DeepAi.cpython-38.pyc differ diff --git a/g4f/Provider/__pycache__/DfeHub.cpython-311.pyc b/g4f/Provider/__pycache__/DfeHub.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2ccabc7363af5193d44a8bb01458824c85c3e03 Binary files /dev/null and b/g4f/Provider/__pycache__/DfeHub.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/EasyChat.cpython-311.pyc b/g4f/Provider/__pycache__/EasyChat.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2869bb7e370c4a5e56283c379255c54536f23c09 Binary files /dev/null and b/g4f/Provider/__pycache__/EasyChat.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Equing.cpython-311.pyc b/g4f/Provider/__pycache__/Equing.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfd5fe17acf069b5ae00e2aad1ab8b3d56590622 Binary files /dev/null and b/g4f/Provider/__pycache__/Equing.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/FastGpt.cpython-311.pyc b/g4f/Provider/__pycache__/FastGpt.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2752f5f709fd997fb814707f09ae90ce9718599 Binary files /dev/null and b/g4f/Provider/__pycache__/FastGpt.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Forefront.cpython-311.pyc b/g4f/Provider/__pycache__/Forefront.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b12b9048e1bc3c52a2fa842149aa91a550a2c98 Binary files /dev/null and b/g4f/Provider/__pycache__/Forefront.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/GetGpt.cpython-311.pyc b/g4f/Provider/__pycache__/GetGpt.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf29a4d8084991204023425f1e92aed0821a1633 Binary files /dev/null and b/g4f/Provider/__pycache__/GetGpt.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/H2o.cpython-311.pyc b/g4f/Provider/__pycache__/H2o.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d619e4c3121d52d03775fa0da1aedb7f00a3538 Binary files /dev/null and b/g4f/Provider/__pycache__/H2o.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/HuggingChat.cpython-311.pyc b/g4f/Provider/__pycache__/HuggingChat.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6e6fdc8d542848734d1da1b97d87e87a2713312 Binary files /dev/null and b/g4f/Provider/__pycache__/HuggingChat.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Liaobots.cpython-311.pyc b/g4f/Provider/__pycache__/Liaobots.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04b3d909a58c6deeb300d8b4470f238162c80791 Binary files /dev/null and b/g4f/Provider/__pycache__/Liaobots.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Lockchat.cpython-311.pyc b/g4f/Provider/__pycache__/Lockchat.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d4d19441f2a5c380cd704b7e0067f8df9408a1f Binary files /dev/null and b/g4f/Provider/__pycache__/Lockchat.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Opchatgpts.cpython-311.pyc b/g4f/Provider/__pycache__/Opchatgpts.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a7ffc34a99ceeeb571c605946c10b1bc96efaaa Binary files /dev/null and b/g4f/Provider/__pycache__/Opchatgpts.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/OpenAssistant.cpython-311.pyc b/g4f/Provider/__pycache__/OpenAssistant.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8ec067fdb519c19e8f8b240bc267e3b05e34003 Binary files /dev/null and b/g4f/Provider/__pycache__/OpenAssistant.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/OpenaiChat.cpython-311.pyc b/g4f/Provider/__pycache__/OpenaiChat.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5761b8a664dbb91abb99e58a0d207da9eed2d1aa Binary files /dev/null and b/g4f/Provider/__pycache__/OpenaiChat.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Raycast.cpython-311.pyc b/g4f/Provider/__pycache__/Raycast.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb21b17bd7dd8cb892732d31abe5d02257d04d4f Binary files /dev/null and b/g4f/Provider/__pycache__/Raycast.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Theb.cpython-311.pyc b/g4f/Provider/__pycache__/Theb.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93eebb4dd25ddc0efefbebe310e284a6bbeb95df Binary files /dev/null and b/g4f/Provider/__pycache__/Theb.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/V50.cpython-311.pyc b/g4f/Provider/__pycache__/V50.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72293f10c915a499d1ceb4756040f09bc9f612bd Binary files /dev/null and b/g4f/Provider/__pycache__/V50.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Vercel.cpython-311.pyc b/g4f/Provider/__pycache__/Vercel.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccfa5f873d2017b4508b85024a3890b81078ad64 Binary files /dev/null and b/g4f/Provider/__pycache__/Vercel.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Wewordle.cpython-311.pyc b/g4f/Provider/__pycache__/Wewordle.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..656910c46cac4a6b7a42b9649a7107377fedebd7 Binary files /dev/null and b/g4f/Provider/__pycache__/Wewordle.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Wuguokai.cpython-311.pyc b/g4f/Provider/__pycache__/Wuguokai.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d191bc57c0dbc4523db8a212617a6c505612659f Binary files /dev/null and b/g4f/Provider/__pycache__/Wuguokai.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/You.cpython-311.pyc b/g4f/Provider/__pycache__/You.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b27106e4ab1f178502acd8dbb2bca7d5886e9e3f Binary files /dev/null and b/g4f/Provider/__pycache__/You.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/Yqcloud.cpython-311.pyc b/g4f/Provider/__pycache__/Yqcloud.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1a818e95537fccccc4421d0386614f15c2c08ab Binary files /dev/null and b/g4f/Provider/__pycache__/Yqcloud.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/__init__.cpython-311.pyc b/g4f/Provider/__pycache__/__init__.cpython-311.pyc index e4e6fed3ef5dc1ced1169eda19d0711737ea9ddd..a8f23fcbb95652c1f1a74750e5bfca2d2da519fa 100644 Binary files a/g4f/Provider/__pycache__/__init__.cpython-311.pyc and b/g4f/Provider/__pycache__/__init__.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/__init__.cpython-38.pyc b/g4f/Provider/__pycache__/__init__.cpython-38.pyc index dffbb297aaf7df5bb948211c303c7242b76a4310..4c3fc4aadf3f35549db28fd1669b94eeeb77957d 100644 Binary files a/g4f/Provider/__pycache__/__init__.cpython-38.pyc and b/g4f/Provider/__pycache__/__init__.cpython-38.pyc differ diff --git a/g4f/Provider/__pycache__/base_provider.cpython-311.pyc b/g4f/Provider/__pycache__/base_provider.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac56d1830f31e8454ee88ccefa89d05d510f9c6a Binary files /dev/null and b/g4f/Provider/__pycache__/base_provider.cpython-311.pyc differ diff --git a/g4f/Provider/__pycache__/base_provider.cpython-38.pyc b/g4f/Provider/__pycache__/base_provider.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e3be15ed0db0bbb11d9eb0a6a256a1c39d5b005 Binary files /dev/null and b/g4f/Provider/__pycache__/base_provider.cpython-38.pyc differ diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..0f499c8ce60bb3fdc027008c6de28b1a4fae7e87 --- /dev/null +++ b/g4f/Provider/base_provider.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +import asyncio +from abc import ABC, abstractmethod + +import browser_cookie3 + +from ..typing import Any, AsyncGenerator, CreateResult, Union + + +class BaseProvider(ABC): + url: str + working = False + needs_auth = False + supports_stream = False + supports_gpt_35_turbo = False + supports_gpt_4 = False + + @staticmethod + @abstractmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + raise NotImplementedError() + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" + + +_cookies = {} + +def get_cookies(cookie_domain: str) -> dict: + if cookie_domain not in _cookies: + _cookies[cookie_domain] = {} + try: + for cookie in browser_cookie3.load(cookie_domain): + _cookies[cookie_domain][cookie.name] = cookie.value + except: + pass + return _cookies[cookie_domain] + + +def format_prompt(messages: list[dict[str, str]], add_special_tokens=False): + if add_special_tokens or len(messages) > 1: + formatted = "\n".join( + ["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages] + ) + return f"{formatted}\nAssistant:" + else: + return messages.pop()["content"] + + + +class AsyncProvider(BaseProvider): + @classmethod + def create_completion( + cls, + model: str, + messages: list[dict[str, str]], + stream: bool = False, **kwargs: Any) -> CreateResult: + + yield asyncio.run(cls.create_async(model, messages, **kwargs)) + + @staticmethod + @abstractmethod + async def create_async( + model: str, + messages: list[dict[str, str]], **kwargs: Any) -> str: + raise NotImplementedError() + + +class AsyncGeneratorProvider(AsyncProvider): + supports_stream = True + + @classmethod + def create_completion( + cls, + model: str, + messages: list[dict[str, str]], + stream: bool = True, + **kwargs + ) -> CreateResult: + yield from run_generator(cls.create_async_generator(model, messages, stream=stream, **kwargs)) + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> str: + chunks = [chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)] + if chunks: + return "".join(chunks) + + @staticmethod + @abstractmethod + def create_async_generator( + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> AsyncGenerator: + raise NotImplementedError() + + +def run_generator(generator: AsyncGenerator[Union[Any, str], Any]): + loop = asyncio.new_event_loop() + gen = generator.__aiter__() + + while True: + try: + yield loop.run_until_complete(gen.__anext__()) + + except StopAsyncIteration: + break diff --git a/g4f/__init__.py b/g4f/__init__.py index 119203566a38364b7c172ebdd135e0b4b99169e0..065acee65e004f1a018e2d93356494c97d184654 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -1,42 +1,43 @@ -import sys -from . import Provider -from g4f import models +from __future__ import annotations +from . import models +from .Provider import BaseProvider +from .typing import Any, CreateResult, Union logging = False class ChatCompletion: @staticmethod - def create(model: models.Model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs): - kwargs['auth'] = auth - if provider and provider.working == False: - return f'{provider.__name__} is not working' - - if provider and provider.needs_auth and not auth: - print( - f'ValueError: {provider.__name__} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr) - sys.exit(1) - - try: - if isinstance(model, str): - try: - model = models.ModelUtils.convert[model] - except KeyError: - raise Exception(f'The model: {model} does not exist') - - engine = model.best_provider if not provider else provider - - if not engine.supports_stream and stream == True: - print( - f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr) - sys.exit(1) - - if logging: print(f'Using {engine.__name__} provider') - - return (engine._create_completion(model.name, messages, stream, **kwargs) - if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs))) - except TypeError as e: - print(e) - arg: str = str(e).split("'")[1] - print( - f"ValueError: {engine.__name__} does not support '{arg}' argument", file=sys.stderr) - sys.exit(1) \ No newline at end of file + def create( + model : Union[models.Model, str], + messages : list[dict[str, str]], + provider : Union[type[BaseProvider], None] = None, + stream : bool = False, + auth : Union[str, None] = None, **kwargs: Any) -> Union[CreateResult, str]: + + if isinstance(model, str): + try: + model = models.ModelUtils.convert[model] + except KeyError: + raise Exception(f'The model: {model} does not exist') + + provider = model.best_provider if provider == None else provider + + if not provider.working: + raise Exception(f'{provider.__name__} is not working') + + if provider.needs_auth and not auth: + raise Exception( + f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)') + + if provider.needs_auth: + kwargs['auth'] = auth + + if not provider.supports_stream and stream: + raise Exception( + f'ValueError: {provider.__name__} does not support "stream" argument') + + if logging: + print(f'Using {provider.__name__} provider') + + result = provider.create_completion(model.name, messages, stream, **kwargs) + return result if stream else ''.join(result) diff --git a/g4f/__pycache__/__init__.cpython-311.pyc b/g4f/__pycache__/__init__.cpython-311.pyc index 9273d6672506bb8e74e7f24274d05abc7449f43b..029a052200f33b0925167c4668ae18fcb9666400 100644 Binary files a/g4f/__pycache__/__init__.cpython-311.pyc and b/g4f/__pycache__/__init__.cpython-311.pyc differ diff --git a/g4f/__pycache__/__init__.cpython-38.pyc b/g4f/__pycache__/__init__.cpython-38.pyc index 39450f56134a5ac8d4a055144c8884a9d44a6b08..2f8921585ab011728e4ceed512d79ec9a48c8414 100644 Binary files a/g4f/__pycache__/__init__.cpython-38.pyc and b/g4f/__pycache__/__init__.cpython-38.pyc differ diff --git a/g4f/__pycache__/models.cpython-311.pyc b/g4f/__pycache__/models.cpython-311.pyc index 140b5b1ce08022d4678f7785af1bd858f8c45e2b..25fb1acbcd5eb2eb6c4009a7d65d478b071d8f8a 100644 Binary files a/g4f/__pycache__/models.cpython-311.pyc and b/g4f/__pycache__/models.cpython-311.pyc differ diff --git a/g4f/__pycache__/models.cpython-38.pyc b/g4f/__pycache__/models.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ed912043d73702d385fdb4257d3817326996e08 Binary files /dev/null and b/g4f/__pycache__/models.cpython-38.pyc differ diff --git a/g4f/__pycache__/typing.cpython-311.pyc b/g4f/__pycache__/typing.cpython-311.pyc index 9c0200f5aed1d5414e6f6bc59c45d012ee33f1ee..5593a3ab8284ed0f487056c24d206912137243d3 100644 Binary files a/g4f/__pycache__/typing.cpython-311.pyc and b/g4f/__pycache__/typing.cpython-311.pyc differ diff --git a/g4f/__pycache__/typing.cpython-38.pyc b/g4f/__pycache__/typing.cpython-38.pyc index f1e00b716e93056e82b0b6adc80ce90e0ad4048d..4dc2b7d3029ed1688704d056c3bfc22d8362148e 100644 Binary files a/g4f/__pycache__/typing.cpython-38.pyc and b/g4f/__pycache__/typing.cpython-38.pyc differ diff --git a/g4f/models.py b/g4f/models.py index 27336244ff156aea35be3b06b8607d96051aeee5..0c5eb961c7e9b48587bef233d997766b0c992c6e 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -1,243 +1,207 @@ -from types import ModuleType -from . import Provider +from __future__ import annotations from dataclasses import dataclass - +from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel, Equing @dataclass class Model: name: str base_provider: str - best_provider: ModuleType or None + best_provider: type[BaseProvider] -gpt_35_turbo = Model( - name="gpt-3.5-turbo", - base_provider="openai", - best_provider=Provider.GetGpt, +# Config for HuggingChat, OpenAssistant +# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You +default = Model( + name="", + base_provider="huggingface", + best_provider=H2o, ) -gpt_4 = Model( - name="gpt-4", - base_provider="openai", - best_provider=Provider.Bing, -) +# GPT-3.5 / GPT-4 +gpt_35_turbo = Model( + name = 'gpt-3.5-turbo', + base_provider = 'openai', + best_provider = GetGpt) -claude_instant_v1_100k = Model( - name="claude-instant-v1-100k", - base_provider="anthropic", - best_provider=Provider.Vercel, -) +gpt_4 = Model( + name = 'gpt-4', + base_provider = 'openai', + best_provider = Liaobots) -claude_instant_v1 = Model( - name="claude-instant-v1", - base_provider="anthropic", - best_provider=Provider.Vercel, -) +# Bard +palm = Model( + name = 'palm', + base_provider = 'google', + best_provider = Bard) -claude_v1_100k = Model( - name="claude-v1-100k", - base_provider="anthropic", - best_provider=Provider.Vercel, -) +# H2o +falcon_7b = Model( + name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3', + base_provider = 'huggingface', + best_provider = H2o) -claude_v1 = Model( - name="claude-v1", - base_provider="anthropic", - best_provider=Provider.Vercel, -) +falcon_40b = Model( + name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1', + base_provider = 'huggingface', + best_provider = H2o) -alpaca_7b = Model( - name="alpaca-7b", - base_provider="replicate", - best_provider=Provider.Vercel, -) +llama_13b = Model( + name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b', + base_provider = 'huggingface', + best_provider = H2o) -stablelm_tuned_alpha_7b = Model( - name="stablelm-tuned-alpha-7b", - base_provider="replicate", - best_provider=Provider.Vercel, -) +# Vercel +claude_instant_v1 = Model( + name = 'anthropic:claude-instant-v1', + base_provider = 'anthropic', + best_provider = Vercel) -bloom = Model( - name="bloom", - base_provider="huggingface", - best_provider=Provider.Vercel, -) +claude_v1 = Model( + name = 'anthropic:claude-v1', + base_provider = 'anthropic', + best_provider = Vercel) -bloomz = Model( - name="bloomz", - base_provider="huggingface", - best_provider=Provider.Vercel, -) +claude_v2 = Model( + name = 'anthropic:claude-v2', + base_provider = 'anthropic', + best_provider = Vercel) -flan_t5_xxl = Model( - name="flan-t5-xxl", - base_provider="huggingface", - best_provider=Provider.Vercel, -) +command_light_nightly = Model( + name = 'cohere:command-light-nightly', + base_provider = 'cohere', + best_provider = Vercel) -flan_ul2 = Model( - name="flan-ul2", - base_provider="huggingface", - best_provider=Provider.Vercel, -) +command_nightly = Model( + name = 'cohere:command-nightly', + base_provider = 'cohere', + best_provider = Vercel) gpt_neox_20b = Model( - name="gpt-neox-20b", - base_provider="huggingface", - best_provider=Provider.Vercel, -) + name = 'huggingface:EleutherAI/gpt-neox-20b', + base_provider = 'huggingface', + best_provider = Vercel) + +oasst_sft_1_pythia_12b = Model( + name = 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b', + base_provider = 'huggingface', + best_provider = Vercel) oasst_sft_4_pythia_12b_epoch_35 = Model( - name="oasst-sft-4-pythia-12b-epoch-3.5", - base_provider="huggingface", - best_provider=Provider.Vercel, -) + name = 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', + base_provider = 'huggingface', + best_provider = Vercel) santacoder = Model( - name="santacoder", - base_provider="huggingface", - best_provider=Provider.Vercel, -) - -command_medium_nightly = Model( - name="command-medium-nightly", - base_provider="cohere", - best_provider=Provider.Vercel, -) + name = 'huggingface:bigcode/santacoder', + base_provider = 'huggingface', + best_provider = Vercel) -command_xlarge_nightly = Model( - name="command-xlarge-nightly", - base_provider="cohere", - best_provider=Provider.Vercel, -) +bloom = Model( + name = 'huggingface:bigscience/bloom', + base_provider = 'huggingface', + best_provider = Vercel) -code_cushman_001 = Model( - name="code-cushman-001", - base_provider="openai", - best_provider=Provider.Vercel, -) +flan_t5_xxl = Model( + name = 'huggingface:google/flan-t5-xxl', + base_provider = 'huggingface', + best_provider = Vercel) code_davinci_002 = Model( - name="code-davinci-002", - base_provider="openai", - best_provider=Provider.Vercel, -) + name = 'openai:code-davinci-002', + base_provider = 'openai', + best_provider = Vercel) + +gpt_35_turbo_16k = Model( + name = 'openai:gpt-3.5-turbo-16k', + base_provider = 'openai', + best_provider = Vercel) + +gpt_35_turbo_16k_0613 = Model( + name = 'openai:gpt-3.5-turbo-16k-0613', + base_provider = 'openai', + best_provider = Equing) + +gpt_4_0613 = Model( + name = 'openai:gpt-4-0613', + base_provider = 'openai', + best_provider = Vercel) text_ada_001 = Model( - name="text-ada-001", - base_provider="openai", - best_provider=Provider.Vercel, -) + name = 'openai:text-ada-001', + base_provider = 'openai', + best_provider = Vercel) text_babbage_001 = Model( - name="text-babbage-001", - base_provider="openai", - best_provider=Provider.Vercel, -) + name = 'openai:text-babbage-001', + base_provider = 'openai', + best_provider = Vercel) text_curie_001 = Model( - name="text-curie-001", - base_provider="openai", - best_provider=Provider.Vercel, -) + name = 'openai:text-curie-001', + base_provider = 'openai', + best_provider = Vercel) text_davinci_002 = Model( - name="text-davinci-002", - base_provider="openai", - best_provider=Provider.Vercel, -) + name = 'openai:text-davinci-002', + base_provider = 'openai', + best_provider = Vercel) text_davinci_003 = Model( - name="text-davinci-003", - base_provider="openai", - best_provider=Provider.Vercel, -) - -palm = Model( - name="palm", - base_provider="google", - best_provider=Provider.Bard, -) - -falcon_40b = Model( - name="falcon-40b", - base_provider="huggingface", - best_provider=Provider.H2o, -) - -falcon_7b = Model( - name="falcon-7b", - base_provider="huggingface", - best_provider=Provider.H2o, -) - -llama_13b = Model( - name="llama-13b", - base_provider="huggingface", - best_provider=Provider.H2o, -) - -gpt_35_turbo_16k = Model( - name="gpt-3.5-turbo-16k", - base_provider="openai", - best_provider=Provider.EasyChat, -) - -gpt_35_turbo_0613 = Model( - name="gpt-3.5-turbo-0613", - base_provider="openai", - best_provider=Provider.EasyChat, -) - -gpt_35_turbo_16k_0613 = Model( - name="gpt-3.5-turbo-16k-0613", - base_provider="openai", - best_provider=Provider.EasyChat, -) + name = 'openai:text-davinci-003', + base_provider = 'openai', + best_provider = Vercel) -gpt_4_32k = Model(name="gpt-4-32k", base_provider="openai", best_provider=None) +llama13b_v2_chat = Model( + name = 'replicate:a16z-infra/llama13b-v2-chat', + base_provider = 'replicate', + best_provider = Vercel) -gpt_4_0613 = Model(name="gpt-4-0613", base_provider="openai", best_provider=None) +llama7b_v2_chat = Model( + name = 'replicate:a16z-infra/llama7b-v2-chat', + base_provider = 'replicate', + best_provider = Vercel) class ModelUtils: convert: dict[str, Model] = { - "gpt-3.5-turbo": gpt_35_turbo, - "gpt-3.5-turbo-16k": gpt_35_turbo_16k, - "gpt-3.5-turbo-0613": gpt_35_turbo_0613, - "gpt-3.5-turbo-16k-0613": gpt_35_turbo_16k_0613, - "gpt-4": gpt_4, - "gpt-4-32k": gpt_4_32k, - "gpt-4-0613": gpt_4_0613, - "claude-instant-v1-100k": claude_instant_v1_100k, - "claude-v1-100k": claude_v1_100k, - "claude-instant-v1": claude_instant_v1, - "claude-v1": claude_v1, - "alpaca-7b": alpaca_7b, - "stablelm-tuned-alpha-7b": stablelm_tuned_alpha_7b, - "bloom": bloom, - "bloomz": bloomz, - "flan-t5-xxl": flan_t5_xxl, - "flan-ul2": flan_ul2, - "gpt-neox-20b": gpt_neox_20b, - "oasst-sft-4-pythia-12b-epoch-3.5": oasst_sft_4_pythia_12b_epoch_35, - "santacoder": santacoder, - "command-medium-nightly": command_medium_nightly, - "command-xlarge-nightly": command_xlarge_nightly, - "code-cushman-001": code_cushman_001, - "code-davinci-002": code_davinci_002, - "text-ada-001": text_ada_001, - "text-babbage-001": text_babbage_001, - "text-curie-001": text_curie_001, - "text-davinci-002": text_davinci_002, - "text-davinci-003": text_davinci_003, - "palm2": palm, - "palm": palm, - "google": palm, - "google-bard": palm, - "google-palm": palm, - "bard": palm, - "falcon-40b": falcon_40b, - "falcon-7b": falcon_7b, - "llama-13b": llama_13b, - } + # GPT-3.5 / GPT-4 + 'gpt-3.5-turbo' : gpt_35_turbo, + 'gpt-4' : gpt_4, + + # Bard + 'palm2' : palm, + 'palm' : palm, + 'google' : palm, + 'google-bard' : palm, + 'google-palm' : palm, + 'bard' : palm, + + # H2o + 'falcon-40b' : falcon_40b, + 'falcon-7b' : falcon_7b, + 'llama-13b' : llama_13b, + + # Vercel + 'claude-instant-v1' : claude_instant_v1, + 'claude-v1' : claude_v1, + 'claude-v2' : claude_v2, + 'command-nightly' : command_nightly, + 'gpt-neox-20b' : gpt_neox_20b, + 'santacoder' : santacoder, + 'bloom' : bloom, + 'flan-t5-xxl' : flan_t5_xxl, + 'code-davinci-002' : code_davinci_002, + 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k, + 'gpt-4-0613' : gpt_4_0613, + 'text-ada-001' : text_ada_001, + 'text-babbage-001' : text_babbage_001, + 'text-curie-001' : text_curie_001, + 'text-davinci-002' : text_davinci_002, + 'text-davinci-003' : text_davinci_003, + 'llama13b-v2-chat' : llama13b_v2_chat, + 'llama7b-v2-chat' : llama7b_v2_chat, + + 'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b, + 'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35, + 'command-light-nightly' : command_light_nightly, + 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613, + } \ No newline at end of file diff --git a/g4f/typing.py b/g4f/typing.py index e41a567ae49dd26d2ace2a3732b0e8f0bbbaa4b0..0238603750f94b8f584cf5600a47ac35d8c81bc2 100644 --- a/g4f/typing.py +++ b/g4f/typing.py @@ -1,3 +1,14 @@ -from typing import Dict, NewType, Union, Optional, List, get_type_hints +from typing import Any, AsyncGenerator, Generator, NewType, Tuple, TypedDict, Union -sha256 = NewType('sha_256_hash', str) \ No newline at end of file +SHA256 = NewType('sha_256_hash', str) +CreateResult = Generator[str, None, None] + +__all__ = [ + 'Any', + 'AsyncGenerator', + 'Generator', + 'Tuple', + 'TypedDict', + 'SHA256', + 'CreateResult', +] \ No newline at end of file