diff --git a/g4f/.v1/requirements.txt b/g4f/.v1/requirements.txt
index 3a1f815b17ce3ea1a3475c192e2586d65c573164..4cbabf17a49e4b1d5d2c4a4d70458e631a016392 100644
--- a/g4f/.v1/requirements.txt
+++ b/g4f/.v1/requirements.txt
@@ -5,11 +5,13 @@ pypasser
names
colorama
curl_cffi
+aiohttp
+flask
+flask_cors
streamlit
selenium
fake-useragent
twocaptcha
-https://github.com/AI-Yash/st-chat/archive/refs/pull/24/head.zip
pydantic
pymailtm
Levenshtein
@@ -18,4 +20,6 @@ mailgw_temporary_email
pycryptodome
random-password-generator
numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
-tornado>=6.3.2 # not directly required, pinned by Snyk to avoid a vulnerability
\ No newline at end of file
+tornado>=6.3.2 # not directly required, pinned by Snyk to avoid a vulnerability
+PyExecJS
+browser_cookie3
\ No newline at end of file
diff --git a/g4f/Provider/Provider.py b/g4f/Provider/Provider.py
index 12c23333f87185e5fa0ae8f368540c816ab079f8..f123becd8aeda0fd9c6a84a6e548a9d2c67eb349 100644
--- a/g4f/Provider/Provider.py
+++ b/g4f/Provider/Provider.py
@@ -5,6 +5,7 @@ url = None
model = None
supports_stream = False
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
return
diff --git a/g4f/Provider/Providers/AItianhu.py b/g4f/Provider/Providers/AItianhu.py
new file mode 100644
index 0000000000000000000000000000000000000000..0bdaa09a5770205a3e8e0afbeef57bb89bd67e2e
--- /dev/null
+++ b/g4f/Provider/Providers/AItianhu.py
@@ -0,0 +1,38 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+import json
+
+url = "https://www.aitianhu.com/api/chat-process"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+working = True
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+ headers = {
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
+ }
+ data = {
+ "prompt": base,
+ "options": {},
+ "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
+ "temperature": kwargs.get("temperature", 0.8),
+ "top_p": kwargs.get("top_p", 1)
+ }
+ response = requests.post(url, headers=headers, json=data)
+ if response.status_code == 200:
+ lines = response.text.strip().split('\n')
+ res = json.loads(lines[-1])
+ yield res['text']
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/g4f/Provider/Providers/Acytoo.py b/g4f/Provider/Providers/Acytoo.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f40eac21ed8d9ee32cea92ff797661c87ce90d0
--- /dev/null
+++ b/g4f/Provider/Providers/Acytoo.py
@@ -0,0 +1,42 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+import json
+
+url = "https://chat.acytoo.com/api/completions"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+working = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+ headers = {
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
+ }
+ data = {
+ "key": "",
+ "model": "gpt-3.5-turbo",
+ "messages": [
+ {
+ "role": "user",
+ "content": base,
+ "createdAt": 1688518523500
+ }
+ ],
+ "temperature": 1,
+ "password": ""
+ }
+
+ response = requests.post(url, headers=headers, data=json.dumps(data))
+ if response.status_code == 200:
+ yield response.text
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/g4f/Provider/Providers/AiService.py b/g4f/Provider/Providers/AiService.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f9d9c478f5f30bd3bb32034a3231806c4e15c5f
--- /dev/null
+++ b/g4f/Provider/Providers/AiService.py
@@ -0,0 +1,41 @@
+import os,sys
+import requests
+from ...typing import get_type_hints
+
+url = "https://aiservice.vercel.app/api/chat/answer"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+working = True
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+ headers = {
+ "accept": "*/*",
+ "content-type": "text/plain;charset=UTF-8",
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "Referer": "https://aiservice.vercel.app/chat",
+ }
+ data = {
+ "input": base
+ }
+ response = requests.post(url, headers=headers, json=data)
+ if response.status_code == 200:
+ _json = response.json()
+ yield _json['data']
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/g4f/Provider/Providers/Aichat.py b/g4f/Provider/Providers/Aichat.py
index e4fde8c309f9ba0666c5edce35d777fa8fcf8d41..919486f2f419445fbf7d7df150e2927a0525eaea 100644
--- a/g4f/Provider/Providers/Aichat.py
+++ b/g4f/Provider/Providers/Aichat.py
@@ -5,6 +5,7 @@ url = 'https://chat-gpt.org/chat'
model = ['gpt-3.5-turbo']
supports_stream = False
needs_auth = False
+working = True
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
base = ''
diff --git a/g4f/Provider/Providers/Ails.py b/g4f/Provider/Providers/Ails.py
index 1a14b2e9aec50328b7b21d5980bd67c5eaee2b3a..60d3603ea6d897796ed39f870f915d263e792fe8 100644
--- a/g4f/Provider/Providers/Ails.py
+++ b/g4f/Provider/Providers/Ails.py
@@ -13,6 +13,8 @@ url: str = 'https://ai.ls'
model: str = 'gpt-3.5-turbo'
supports_stream = True
needs_auth = False
+working = True
+
class Utils:
def hash(json_data: Dict[str, str]) -> sha256:
@@ -45,7 +47,7 @@ def _create_completion(model: str, messages: list, temperature: float = 0.6, str
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'authorization': 'Bearer free',
'client-id': str(uuid.uuid4()),
- 'client-v': '0.1.217',
+ 'client-v': '0.1.249',
'content-type': 'application/json',
'origin': 'https://ai.ls',
'referer': 'https://ai.ls/',
@@ -73,7 +75,7 @@ def _create_completion(model: str, messages: list, temperature: float = 0.6, str
json_data = json.dumps(separators=(',', ':'), obj={
'model': 'gpt-3.5-turbo',
- 'temperature': 0.6,
+ 'temperature': temperature,
'stream': True,
'messages': messages} | sig)
diff --git a/g4f/Provider/Providers/Bard.py b/g4f/Provider/Providers/Bard.py
index 4c37c4b719430031fce41ce49946f0e6ac93d155..0d007a10ecc376949688726e2c633060e6e3b6e2 100644
--- a/g4f/Provider/Providers/Bard.py
+++ b/g4f/Provider/Providers/Bard.py
@@ -5,6 +5,8 @@ url = 'https://bard.google.com'
model = ['Palm2']
supports_stream = False
needs_auth = True
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
diff --git a/g4f/Provider/Providers/Bing.py b/g4f/Provider/Providers/Bing.py
index 1d33cda5a8eb0e05e1074dc11713cda86ba777f4..94c1e21ac48225218aa5d4df73e08245f1e63a56 100644
--- a/g4f/Provider/Providers/Bing.py
+++ b/g4f/Provider/Providers/Bing.py
@@ -16,6 +16,7 @@ url = 'https://bing.com/chat'
model = ['gpt-4']
supports_stream = True
needs_auth = False
+working = True
ssl_context = ssl.create_default_context()
ssl_context.load_verify_locations(certifi.where())
diff --git a/g4f/Provider/Providers/BingHuan.py b/g4f/Provider/Providers/BingHuan.py
new file mode 100644
index 0000000000000000000000000000000000000000..64b67e4b108e81fa7c3d328da10a0cfb8128a64f
--- /dev/null
+++ b/g4f/Provider/Providers/BingHuan.py
@@ -0,0 +1,28 @@
+import os,sys
+import json
+import subprocess
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://b.ai-huan.xyz'
+model = ['gpt-3.5-turbo', 'gpt-4']
+supports_stream = True
+needs_auth = False
+working = False
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ path = os.path.dirname(os.path.realpath(__file__))
+ config = json.dumps({
+ 'messages': messages,
+ 'model': model}, separators=(',', ':'))
+ cmd = ['python', f'{path}/helpers/binghuan.py', config]
+
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ for line in iter(p.stdout.readline, b''):
+ yield line.decode('cp1252')
+
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/g4f/Provider/Providers/ChatgptAi.py b/g4f/Provider/Providers/ChatgptAi.py
index 00d4cf6f6bfb6435de9978900829662b26f12047..1f9ead0e558759a37af6c172db4fbee1595db8fb 100644
--- a/g4f/Provider/Providers/ChatgptAi.py
+++ b/g4f/Provider/Providers/ChatgptAi.py
@@ -6,6 +6,8 @@ url = 'https://chatgpt.ai/gpt-4/'
model = ['gpt-4']
supports_stream = False
needs_auth = False
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
chat = ''
@@ -13,8 +15,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
chat += '%s: %s\n' % (message['role'], message['content'])
chat += 'assistant: '
- response = requests.get('https://chatgpt.ai/gpt-4/')
-
+ response = requests.get('https://chatgpt.ai/')
nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', response.text)[0]
headers = {
diff --git a/g4f/Provider/Providers/ChatgptLogin.py b/g4f/Provider/Providers/ChatgptLogin.py
index 9551d15dd5121c4b42f80d0ba547a10f0868563b..0fdbab8e8354d774a956525344537084fca5aaea 100644
--- a/g4f/Provider/Providers/ChatgptLogin.py
+++ b/g4f/Provider/Providers/ChatgptLogin.py
@@ -8,7 +8,7 @@ url = 'https://chatgptlogin.ac'
model = ['gpt-3.5-turbo']
supports_stream = False
needs_auth = False
-
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
def get_nonce():
@@ -75,7 +75,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
'userName': '
User:
',
'aiName': 'AI:
',
'model': 'gpt-3.5-turbo',
- 'temperature': 0.8,
+ 'temperature': kwargs.get('temperature', 0.8),
'maxTokens': 1024,
'maxResults': 1,
'apiKey': '',
diff --git a/g4f/Provider/Providers/DeepAi.py b/g4f/Provider/Providers/DeepAi.py
index 02b08120ec8ef50c91c9237047a4f36c822a7bfc..b34dd60d21dd60695962b09bd98e0e14d4f6b66c 100644
--- a/g4f/Provider/Providers/DeepAi.py
+++ b/g4f/Provider/Providers/DeepAi.py
@@ -10,6 +10,8 @@ url = 'https://deepai.org'
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
def md5(text: str) -> str:
diff --git a/g4f/Provider/Providers/DfeHub.py b/g4f/Provider/Providers/DfeHub.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3ff804569e91373d72935d2fb53136313ab26c4
--- /dev/null
+++ b/g4f/Provider/Providers/DfeHub.py
@@ -0,0 +1,56 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+import json
+import re
+import time
+
+url = "https://chat.dfehub.com/api/chat"
+model = ['gpt-3.5-turbo']
+supports_stream = True
+needs_auth = False
+working = True
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ headers = {
+ 'authority': 'chat.dfehub.com',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.dfehub.com',
+ 'referer': 'https://chat.dfehub.com/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ 'x-requested-with': 'XMLHttpRequest',
+ }
+
+ json_data = {
+ 'messages': messages,
+ 'model': 'gpt-3.5-turbo',
+ 'temperature': kwargs.get('temperature', 0.5),
+ 'presence_penalty': kwargs.get('presence_penalty', 0),
+ 'frequency_penalty': kwargs.get('frequency_penalty', 0),
+ 'top_p': kwargs.get('top_p', 1),
+ "stream": True,
+ }
+ response = requests.post('https://chat.dfehub.com/api/openai/v1/chat/completions',
+ headers=headers, json=json_data)
+
+ for chunk in response.iter_lines():
+ if b'detail' in chunk:
+ delay = re.findall(r"\d+\.\d+", chunk.decode())
+ delay = float(delay[-1])
+ print(f"Provider.DfeHub::Rate Limit Reached::Waiting {delay} seconds")
+ time.sleep(delay)
+ yield from _create_completion(model, messages, stream, **kwargs)
+ if b'content' in chunk:
+ data = json.loads(chunk.decode().split('data: ')[1])
+ yield (data['choices'][0]['delta']['content'])
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/EasyChat.py b/g4f/Provider/Providers/EasyChat.py
new file mode 100644
index 0000000000000000000000000000000000000000..909428faddc36760b08a3dd02ddf25d13ed4fb23
--- /dev/null
+++ b/g4f/Provider/Providers/EasyChat.py
@@ -0,0 +1,52 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+import json
+
+url = "https://free.easychat.work"
+model = ['gpt-3.5-turbo']
+supports_stream = True
+needs_auth = False
+working = True
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ headers = {
+ 'authority': 'free.easychat.work',
+ 'accept': 'text/event-stream',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'content-type': 'application/json',
+ 'endpoint': '',
+ 'origin': 'https://free.easychat.work',
+ 'plugins': '0',
+ 'referer': 'https://free.easychat.work/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ 'usesearch': 'false',
+ 'x-requested-with': 'XMLHttpRequest',
+ }
+
+ json_data = {
+ 'messages': messages,
+ 'stream': True,
+ 'model': model,
+ 'temperature': kwargs.get('temperature', 0.5),
+ 'presence_penalty': kwargs.get('presence_penalty', 0),
+ 'frequency_penalty': kwargs.get('frequency_penalty', 0),
+ 'top_p': kwargs.get('top_p', 1),
+ }
+
+ response = requests.post('https://free.easychat.work/api/openai/v1/chat/completions',
+ headers=headers, json=json_data)
+
+ for chunk in response.iter_lines():
+ if b'content' in chunk:
+ data = json.loads(chunk.decode().split('data: ')[1])
+ yield (data['choices'][0]['delta']['content'])
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/g4f/Provider/Providers/Forefront.py b/g4f/Provider/Providers/Forefront.py
index e7e89831cc4ec6dc37ea094d9828a7582e981ff1..70ea6725ecef8f3e1acfc1f8f60c4acf06bb71b8 100644
--- a/g4f/Provider/Providers/Forefront.py
+++ b/g4f/Provider/Providers/Forefront.py
@@ -7,6 +7,8 @@ url = 'https://forefront.com'
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = False
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
json_data = {
diff --git a/g4f/Provider/Providers/GetGpt.py b/g4f/Provider/Providers/GetGpt.py
index 56a121f6ee5f430da7beda3b65abdea64a87c36b..bafc0ce88123ca09f55e7e76ed786d11f1c9ba9b 100644
--- a/g4f/Provider/Providers/GetGpt.py
+++ b/g4f/Provider/Providers/GetGpt.py
@@ -9,6 +9,8 @@ url = 'https://chat.getgpt.world/'
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
def encrypt(e):
diff --git a/g4f/Provider/Providers/H2o.py b/g4f/Provider/Providers/H2o.py
index eabf94e2dc1e6167f746a820e34c335f2aa8578e..93e0d63be5ba2f8e334c9ebca4ee04c31eeaf6e0 100644
--- a/g4f/Provider/Providers/H2o.py
+++ b/g4f/Provider/Providers/H2o.py
@@ -10,6 +10,7 @@ url = 'https://gpt-gm.h2o.ai'
model = ['falcon-40b', 'falcon-7b', 'llama-13b']
supports_stream = True
needs_auth = False
+working = True
models = {
'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
@@ -18,89 +19,76 @@ models = {
}
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- conversation = 'instruction: this is a conversation beween, a user and an AI assistant, respond to the latest message, referring to the conversation if needed\n'
+
+ conversation = ''
for message in messages:
conversation += '%s: %s\n' % (message['role'], message['content'])
- conversation += 'assistant:'
- client = Session()
- client.headers = {
- 'authority': 'gpt-gm.h2o.ai',
- 'origin': 'https://gpt-gm.h2o.ai',
- 'referer': 'https://gpt-gm.h2o.ai/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'document',
- 'sec-fetch-mode': 'navigate',
- 'sec-fetch-site': 'same-origin',
- 'sec-fetch-user': '?1',
- 'upgrade-insecure-requests': '1',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- }
-
- client.get('https://gpt-gm.h2o.ai/')
- response = client.post('https://gpt-gm.h2o.ai/settings', data={
- 'ethicsModalAccepted': 'true',
- 'shareConversationsWithModelAuthors': 'true',
- 'ethicsModalAcceptedAt': '',
- 'activeModel': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
- 'searchEnabled': 'true',
- })
+ conversation += 'assistant: '
+ session = requests.Session()
+ response = session.get("https://gpt-gm.h2o.ai/")
headers = {
- 'authority': 'gpt-gm.h2o.ai',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'origin': 'https://gpt-gm.h2o.ai',
- 'referer': 'https://gpt-gm.h2o.ai/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
+ "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Upgrade-Insecure-Requests": "1",
+ "Sec-Fetch-Dest": "document",
+ "Sec-Fetch-Mode": "navigate",
+ "Sec-Fetch-Site": "same-origin",
+ "Sec-Fetch-User": "?1",
+ "Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"
}
-
- json_data = {
- 'model': models[model]
+ data = {
+ "ethicsModalAccepted": "true",
+ "shareConversationsWithModelAuthors": "true",
+ "ethicsModalAcceptedAt": "",
+ "activeModel": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
+ "searchEnabled": "true"
}
+ response = session.post("https://gpt-gm.h2o.ai/settings", headers=headers, data=data)
- response = client.post('https://gpt-gm.h2o.ai/conversation',
- headers=headers, json=json_data)
- conversationId = response.json()['conversationId']
-
-
- completion = client.post(f'https://gpt-gm.h2o.ai/conversation/{conversationId}', stream=True, json = {
- 'inputs': conversation,
- 'parameters': {
- 'temperature': kwargs.get('temperature', 0.4),
- 'truncate': kwargs.get('truncate', 2048),
- 'max_new_tokens': kwargs.get('max_new_tokens', 1024),
- 'do_sample': kwargs.get('do_sample', True),
- 'repetition_penalty': kwargs.get('repetition_penalty', 1.2),
- 'return_full_text': kwargs.get('return_full_text', False)
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
+ "Accept": "*/*",
+ "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
+ "Content-Type": "application/json",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Referer": "https://gpt-gm.h2o.ai/"
+ }
+ data = {
+ "model": models[model]
+ }
+
+ conversation_id = session.post("https://gpt-gm.h2o.ai/conversation", headers=headers, json=data)
+ data = {
+ "inputs": conversation,
+ "parameters": {
+ "temperature": kwargs.get('temperature', 0.4),
+ "truncate": kwargs.get('truncate', 2048),
+ "max_new_tokens": kwargs.get('max_new_tokens', 1024),
+ "do_sample": kwargs.get('do_sample', True),
+ "repetition_penalty": kwargs.get('repetition_penalty', 1.2),
+ "return_full_text": kwargs.get('return_full_text', False)
},
- 'stream': True,
- 'options': {
- 'id': kwargs.get('id', str(uuid4())),
- 'response_id': kwargs.get('response_id', str(uuid4())),
- 'is_retry': False,
- 'use_cache': False,
- 'web_search_id': ''
+ "stream": True,
+ "options": {
+ "id": kwargs.get('id', str(uuid4())),
+ "response_id": kwargs.get('response_id', str(uuid4())),
+ "is_retry": False,
+ "use_cache": False,
+ "web_search_id": ""
}
- })
+ }
+
+ response = session.post(f"https://gpt-gm.h2o.ai/conversation/{conversation_id.json()['conversationId']}", headers=headers, json=data)
+ generated_text = response.text.replace("\n", "").split("data:")
+ generated_text = json.loads(generated_text[-1])
+
+ return generated_text["generated_text"]
- for line in completion.iter_lines():
- if b'data' in line:
- line = loads(line.decode('utf-8').replace('data:', ''))
- token = line['token']['text']
-
- if token == '<|endoftext|>':
- break
- else:
- yield (token)
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/g4f/Provider/Providers/Liaobots.py b/g4f/Provider/Providers/Liaobots.py
index 76b13c31924b443423c8376f1b0082e2f900a0b7..75746c03baa570334c8306ddf7f08962556e71e6 100644
--- a/g4f/Provider/Providers/Liaobots.py
+++ b/g4f/Provider/Providers/Liaobots.py
@@ -5,6 +5,7 @@ url = 'https://liaobots.com'
model = ['gpt-3.5-turbo', 'gpt-4']
supports_stream = True
needs_auth = True
+working = False
models = {
'gpt-4': {
diff --git a/g4f/Provider/Providers/Lockchat.py b/g4f/Provider/Providers/Lockchat.py
index d97bc67b55a66ac96ef2071fe1fd4bf6ae6220da..dd1edb84b5ee458ae3d706bd97f8ab281799354b 100644
--- a/g4f/Provider/Providers/Lockchat.py
+++ b/g4f/Provider/Providers/Lockchat.py
@@ -2,15 +2,16 @@ import requests
import os
import json
from ...typing import sha256, Dict, get_type_hints
-url = 'http://super.lockchat.app'
+url = 'http://supertest.lockchat.app'
model = ['gpt-4', 'gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
payload = {
- "temperature": 0.7,
+ "temperature": temperature,
"messages": messages,
"model": model,
"stream": True,
@@ -18,7 +19,7 @@ def _create_completion(model: str, messages: list, stream: bool, temperature: fl
headers = {
"user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
}
- response = requests.post("http://super.lockchat.app/v1/chat/completions?auth=FnMNPlwZEnGFqvEc9470Vw==",
+ response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
json=payload, headers=headers, stream=True)
for token in response.iter_lines():
if b'The model: `gpt-4` does not exist' in token:
diff --git a/g4f/Provider/Providers/Theb.py b/g4f/Provider/Providers/Theb.py
index aa43ebc55d74ffaa722fe008424fce97c622a323..a78fb51f83f89c6067130db1ca59eed8aa64ae8b 100644
--- a/g4f/Provider/Providers/Theb.py
+++ b/g4f/Provider/Providers/Theb.py
@@ -9,6 +9,7 @@ url = 'https://theb.ai'
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
@@ -20,7 +21,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
cmd = ['python3', f'{path}/helpers/theb.py', config]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-
+
for line in iter(p.stdout.readline, b''):
yield line.decode('utf-8')
diff --git a/g4f/Provider/Providers/Vercel.py b/g4f/Provider/Providers/Vercel.py
index e5df9cf017e4c1a265f5c9d5e48eb5c10a56e60a..03d9be174ded204153edad5c1542e4b4db8a8b84 100644
--- a/g4f/Provider/Providers/Vercel.py
+++ b/g4f/Provider/Providers/Vercel.py
@@ -11,6 +11,7 @@ from ...typing import sha256, Dict, get_type_hints
url = 'https://play.vercel.ai'
supports_stream = True
needs_auth = False
+working = False
models = {
'claude-instant-v1': 'anthropic:claude-instant-v1',
@@ -41,122 +42,19 @@ vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant
'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}}
-# based on https://github.com/ading2210/vercel-llm-api // modified
-class Client:
- def __init__(self):
- self.session = requests.Session()
- self.headers = {
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36',
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Te': 'trailers',
- 'Upgrade-Insecure-Requests': '1'
- }
- self.session.headers.update(self.headers)
-
- def get_token(self):
- b64 = self.session.get('https://sdk.vercel.ai/openai.jpeg').text
- data = json.loads(base64.b64decode(b64))
-
- code = 'const globalThis = {data: `sentinel`}; function token() {return (%s)(%s)}' % (
- data['c'], data['a'])
-
- token_string = json.dumps(separators=(',', ':'),
- obj={'r': execjs.compile(code).call('token'), 't': data['t']})
-
- return base64.b64encode(token_string.encode()).decode()
-
- def get_default_params(self, model_id):
- return {key: param['value'] for key, param in vercel_models[model_id]['parameters'].items()}
-
- def generate(self, model_id: str, prompt: str, params: dict = {}):
- if not ':' in model_id:
- model_id = models[model_id]
-
- defaults = self.get_default_params(model_id)
-
- payload = defaults | params | {
- 'prompt': prompt,
- 'model': model_id,
- }
-
- headers = self.headers | {
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Custom-Encoding': self.get_token(),
- 'Host': 'sdk.vercel.ai',
- 'Origin': 'https://sdk.vercel.ai',
- 'Referrer': 'https://sdk.vercel.ai',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- }
-
- chunks_queue = queue.Queue()
- error = None
- response = None
-
- def callback(data):
- chunks_queue.put(data.decode())
-
- def request_thread():
- nonlocal response, error
- for _ in range(3):
- try:
- response = self.session.post('https://sdk.vercel.ai/api/generate',
- json=payload, headers=headers, content_callback=callback)
- response.raise_for_status()
-
- except Exception as e:
- if _ == 2:
- error = e
-
- else:
- continue
-
- thread = threading.Thread(target=request_thread, daemon=True)
- thread.start()
-
- text = ''
- index = 0
- while True:
- try:
- chunk = chunks_queue.get(block=True, timeout=0.1)
-
- except queue.Empty:
- if error:
- raise error
-
- elif response:
- break
-
- else:
- continue
-
- text += chunk
- lines = text.split('\n')
-
- if len(lines) - 1 > index:
- new = lines[index:-1]
- for word in new:
- yield json.loads(word)
- index = len(lines) - 1
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- yield 'Vercel is currently not working.'
return
+ # conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
- conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
-
- for message in messages:
- conversation += '%s: %s\n' % (message['role'], message['content'])
+ # for message in messages:
+ # conversation += '%s: %s\n' % (message['role'], message['content'])
- conversation += 'assistant: '
+ # conversation += 'assistant: '
- completion = Client().generate(model, conversation)
+ # completion = Client().generate(model, conversation)
- for token in completion:
- yield token
+ # for token in completion:
+ # yield token
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/g4f/Provider/Providers/Wewordle.py b/g4f/Provider/Providers/Wewordle.py
new file mode 100644
index 0000000000000000000000000000000000000000..116ebb85e075ef9e53ca7b4cf26f1e193cac0382
--- /dev/null
+++ b/g4f/Provider/Providers/Wewordle.py
@@ -0,0 +1,73 @@
+import os,sys
+import requests
+import json
+import random
+import time
+import string
+from ...typing import sha256, Dict, get_type_hints
+
+url = "https://wewordle.org/gptapi/v1/android/turbo"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+working = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+ # randomize user id and app id
+ _user_id = ''.join(random.choices(f'{string.ascii_lowercase}{string.digits}', k=16))
+ _app_id = ''.join(random.choices(f'{string.ascii_lowercase}{string.digits}', k=31))
+ # make current date with format utc
+ _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
+ headers = {
+ 'accept': '*/*',
+ 'pragma': 'no-cache',
+ 'Content-Type': 'application/json',
+ 'Connection':'keep-alive'
+ # user agent android client
+ # 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)',
+
+ }
+ data = {
+ "user": _user_id,
+ "messages": [
+ {"role": "user", "content": base}
+ ],
+ "subscriber": {
+ "originalPurchaseDate": None,
+ "originalApplicationVersion": None,
+ "allPurchaseDatesMillis": {},
+ "entitlements": {
+ "active": {},
+ "all": {}
+ },
+ "allPurchaseDates": {},
+ "allExpirationDatesMillis": {},
+ "allExpirationDates": {},
+ "originalAppUserId": f"$RCAnonymousID:{_app_id}",
+ "latestExpirationDate": None,
+ "requestDate": _request_date,
+ "latestExpirationDateMillis": None,
+ "nonSubscriptionTransactions": [],
+ "originalPurchaseDateMillis": None,
+ "managementURL": None,
+ "allPurchasedProductIdentifiers": [],
+ "firstSeen": _request_date,
+ "activeSubscriptions": []
+ }
+ }
+ response = requests.post(url, headers=headers, data=json.dumps(data))
+ if response.status_code == 200:
+ _json = response.json()
+ if 'message' in _json:
+ yield _json['message']['content']
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/g4f/Provider/Providers/You.py b/g4f/Provider/Providers/You.py
index 02a2774ce62bae33612a73272d584dc2acaf3eb0..3c32111815c6f111fac8b63051d7a52fcbd986ae 100644
--- a/g4f/Provider/Providers/You.py
+++ b/g4f/Provider/Providers/You.py
@@ -9,6 +9,7 @@ url = 'https://you.com'
model = 'gpt-3.5-turbo'
supports_stream = True
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
diff --git a/g4f/Provider/Providers/Yqcloud.py b/g4f/Provider/Providers/Yqcloud.py
index 488951dd5572df2a05db00387da4c6f44c7b6759..fae446826f15d2a5aaa2f1307f666bb564ad06b2 100644
--- a/g4f/Provider/Providers/Yqcloud.py
+++ b/g4f/Provider/Providers/Yqcloud.py
@@ -9,6 +9,7 @@ model = [
]
supports_stream = True
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
diff --git a/g4f/Provider/Providers/__init__.py b/g4f/Provider/Providers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/g4f/Provider/Providers/__pycache__/AItianhu.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/AItianhu.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..259efefb4ea2fa3e027ad2c5d312ed4d7a4870a8
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/AItianhu.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Acytoo.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/Acytoo.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4df4908271e66d8952b3163a428ee54ea0126f2c
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/Acytoo.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/AiService.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/AiService.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..52d9a455877197ddb9f7b4a4c5b79066da66b04b
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/AiService.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Aichat.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/Aichat.cpython-311.pyc
index e7b1ee4a3f8489819a212964fee6c08ff4a7b1a5..c38744ae71c3351139f4fe3542e96c7168ca6ff1 100644
Binary files a/g4f/Provider/Providers/__pycache__/Aichat.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Aichat.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Aichat.cpython-38.pyc b/g4f/Provider/Providers/__pycache__/Aichat.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4f5027050e108ad1e391a7baabe07f13fc542d87
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/Aichat.cpython-38.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Ails.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/Ails.cpython-311.pyc
index 9d36a458b6fa820a7c8160a6fd8a4951851541d1..3ca61a81b40cca88f7479339b736ac17cd89ad11 100644
Binary files a/g4f/Provider/Providers/__pycache__/Ails.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Ails.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Ails.cpython-38.pyc b/g4f/Provider/Providers/__pycache__/Ails.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c33af766acedffb8188bd34aab41cd4559359b49
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/Ails.cpython-38.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Bard.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/Bard.cpython-311.pyc
index 28fe7520a4051e4a7494349fc383e9b6f9c7a54f..890bb31c41e5a6c656d2fca322cd8b0e90b93d77 100644
Binary files a/g4f/Provider/Providers/__pycache__/Bard.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Bard.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Bard.cpython-38.pyc b/g4f/Provider/Providers/__pycache__/Bard.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fcff5fb83949f0dab6c08dd63c3e8d1836ddc943
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/Bard.cpython-38.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Bing.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/Bing.cpython-311.pyc
index bad386939e35a11ca5d24d736ac9c244a7aa9e42..dbac5a23d429b2b7fe621d11270ae426d3342467 100644
Binary files a/g4f/Provider/Providers/__pycache__/Bing.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Bing.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Bing.cpython-38.pyc b/g4f/Provider/Providers/__pycache__/Bing.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd69b149e52c6972029da079a14f4c492ffe0e07
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/Bing.cpython-38.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/BingHuan.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/BingHuan.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e2cb69a663a93aa6a6cc58a1c879699e2f8853a1
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/BingHuan.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/ChatgptAi.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/ChatgptAi.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..575c432fd61298e52301b315c981fa1f3765a8b6
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/ChatgptAi.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/ChatgptLogin.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/ChatgptLogin.cpython-311.pyc
index 3289e21ab0e5ff878dd81c749fa0925bfb3d193f..fb77255f278b93719f0e80a85d85dbef20e9253b 100644
Binary files a/g4f/Provider/Providers/__pycache__/ChatgptLogin.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/ChatgptLogin.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/DeepAi.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/DeepAi.cpython-311.pyc
index c5951d18adb1f3ac52740f42e89978b913dcdb35..acde15a665eedde305dc31d2cdde04cd70f1e71f 100644
Binary files a/g4f/Provider/Providers/__pycache__/DeepAi.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/DeepAi.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/DfeHub.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/DfeHub.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7dd362e33bb2ed34db4a75be45fa57994d22966a
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/DfeHub.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/EasyChat.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/EasyChat.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6f0afd739feeb37907a5cf14e2803218cd232c21
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/EasyChat.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Forefront.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/Forefront.cpython-311.pyc
index 45e42c53a7628e0510a504950969c0944478e13b..1c69c6897324cc8772b9e398f0b8e03736db3b77 100644
Binary files a/g4f/Provider/Providers/__pycache__/Forefront.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Forefront.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/GetGpt.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/GetGpt.cpython-311.pyc
index 79f2afe2b12e19dda6c1de9e77c5e55889c33f0f..0787012cd0b6bf8f430c3ca5fd100aabe2333570 100644
Binary files a/g4f/Provider/Providers/__pycache__/GetGpt.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/GetGpt.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/H2o.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/H2o.cpython-311.pyc
index 3fc4d6b22de5e9054e21582edc426d9621c1b657..851d9b67b50e77fda9d44c17ef2bf73c1dfa4c4d 100644
Binary files a/g4f/Provider/Providers/__pycache__/H2o.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/H2o.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Liaobots.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/Liaobots.cpython-311.pyc
index 66e07a66f9eb24710e745150c37ba09d85707080..14716465b52fad981490fb533018cb4a743c1489 100644
Binary files a/g4f/Provider/Providers/__pycache__/Liaobots.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Liaobots.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Lockchat.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/Lockchat.cpython-311.pyc
index 0b47b4e83268d94f78d2db511ed29d5cca2ec2f9..f454183a04cf8f125804054f9dc22aeabc5dc2d5 100644
Binary files a/g4f/Provider/Providers/__pycache__/Lockchat.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Lockchat.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Theb.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/Theb.cpython-311.pyc
index 4f072ffe258c67a40b629d7cfe2f9d643c3dafd4..22c8ad81bde4e4ea8d14f9dc5fad767a2754d76f 100644
Binary files a/g4f/Provider/Providers/__pycache__/Theb.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Theb.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Theb.cpython-38.pyc b/g4f/Provider/Providers/__pycache__/Theb.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..92ab307ea7d376277394a7dc411105bbae0f51e7
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/Theb.cpython-38.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Vercel.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/Vercel.cpython-311.pyc
index bc8ec7fbb82b86c2136e40c1fccfdecb1ff7fa55..9b0fb3b67cb36795d206b1e4a3ae26be482b1c9e 100644
Binary files a/g4f/Provider/Providers/__pycache__/Vercel.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Vercel.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Wewordle.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/Wewordle.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ad5050a165e8d5e54b0cd3dfd4d95bd229cb322
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/Wewordle.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/You.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/You.cpython-311.pyc
index 51be1f855940a9d4d6a1af938740f4865fd88f64..6d5fe165ecb2767bb50fb000236eee0e91297838 100644
Binary files a/g4f/Provider/Providers/__pycache__/You.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/You.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/You.cpython-38.pyc b/g4f/Provider/Providers/__pycache__/You.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..56c5e0492ba0dfb1bebe2ac73e647a98c158c01d
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/You.cpython-38.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Yqcloud.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/Yqcloud.cpython-311.pyc
index ea98a778526a40b605edffedb421a880921c15d8..41fcb7ad3cade0e047ff6e22a299f53db5bddb99 100644
Binary files a/g4f/Provider/Providers/__pycache__/Yqcloud.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Yqcloud.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/Yqcloud.cpython-38.pyc b/g4f/Provider/Providers/__pycache__/Yqcloud.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..55ffa56f35f9045d67efe2a85ba0fdad8cbdae09
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/Yqcloud.cpython-38.pyc differ
diff --git a/g4f/Provider/Providers/__pycache__/__init__.cpython-311.pyc b/g4f/Provider/Providers/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..151040940cf41299610cbfc070b8e9759bf5e61d
Binary files /dev/null and b/g4f/Provider/Providers/__pycache__/__init__.cpython-311.pyc differ
diff --git a/g4f/Provider/Providers/helpers/binghuan.py b/g4f/Provider/Providers/helpers/binghuan.py
new file mode 100644
index 0000000000000000000000000000000000000000..203bbe45747a997760a3995a2d311ae5d9f1e716
--- /dev/null
+++ b/g4f/Provider/Providers/helpers/binghuan.py
@@ -0,0 +1,221 @@
+# Original Code From : https://gitler.moe/g4f/gpt4free
+# https://gitler.moe/g4f/gpt4free/src/branch/main/g4f/Provider/Providers/helpers/bing.py
+import sys
+import ssl
+import uuid
+import json
+import time
+import random
+import asyncio
+import certifi
+# import requests
+from curl_cffi import requests
+import websockets
+import browser_cookie3
+
+config = json.loads(sys.argv[1])
+
+ssl_context = ssl.create_default_context()
+ssl_context.load_verify_locations(certifi.where())
+
+
+
+conversationstyles = {
+ 'gpt-4': [ #'precise'
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3precise",
+ "rcsprtsalwlst",
+ "dv3sugg",
+ "autosave",
+ "clgalileo",
+ "gencontentv3"
+ ],
+ 'balanced': [
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "harmonyv3",
+ "rcsprtsalwlst",
+ "dv3sugg",
+ "autosave"
+ ],
+ 'gpt-3.5-turbo': [ #'precise'
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3imaginative",
+ "rcsprtsalwlst",
+ "dv3sugg",
+ "autosave",
+ "gencontentv3"
+ ]
+}
+
+def format(msg: dict) -> str:
+ return json.dumps(msg) + '\x1e'
+
+def get_token():
+ return
+
+ try:
+ cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')}
+ return cookies['_U']
+ except:
+ print('Error: could not find bing _U cookie in edge browser.')
+ exit(1)
+
+class AsyncCompletion:
+ async def create(
+ prompt : str = None,
+ optionSets : list = None,
+ token : str = None): # No auth required anymore
+
+ create = None
+ for _ in range(5):
+ try:
+ create = requests.get('https://b.ai-huan.xyz/turing/conversation/create',
+ headers = {
+ 'host': 'b.ai-huan.xyz',
+ 'accept-encoding': 'gzip, deflate, br',
+ 'connection': 'keep-alive',
+ 'authority': 'b.ai-huan.xyz',
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'max-age=0',
+ 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-full-version': '"110.0.1587.69"',
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': '""',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"15.0.0"',
+ 'sec-fetch-dest': 'document',
+ 'sec-fetch-mode': 'navigate',
+ 'sec-fetch-site': 'none',
+ 'sec-fetch-user': '?1',
+ 'upgrade-insecure-requests': '1',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
+ 'x-edge-shopping-flag': '1',
+ 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
+ }
+ )
+
+ conversationId = create.json()['conversationId']
+ clientId = create.json()['clientId']
+ conversationSignature = create.json()['conversationSignature']
+
+ except Exception as e:
+ time.sleep(0.5)
+ continue
+
+ if create == None: raise Exception('Failed to create conversation.')
+
+ wss: websockets.WebSocketClientProtocol or None = None
+
+ wss = await websockets.connect('wss://sydney.vcanbb.chat/sydney/ChatHub', max_size = None, ssl = ssl_context,
+ extra_headers = {
+ 'accept': 'application/json',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'content-type': 'application/json',
+ 'sec-ch-ua': '"Not_A Brand";v="99", Microsoft Edge";v="110", "Chromium";v="110"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-full-version': '"109.0.1518.78"',
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': "",
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"15.0.0"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'x-ms-client-request-id': str(uuid.uuid4()),
+ 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
+ 'Referer': 'https://b.ai-huan.xyz/search?q=Bing+AI&showconv=1&FORM=hpcodx',
+ 'Referrer-Policy': 'origin-when-cross-origin',
+ 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
+ }
+ )
+
+ await wss.send(format({'protocol': 'json', 'version': 1}))
+ await wss.recv()
+
+ struct = {
+ 'arguments': [
+ {
+ 'source': 'cib',
+ 'optionsSets': optionSets,
+ 'isStartOfSession': True,
+ 'message': {
+ 'author': 'user',
+ 'inputMethod': 'Keyboard',
+ 'text': prompt,
+ 'messageType': 'Chat'
+ },
+ 'conversationSignature': conversationSignature,
+ 'participant': {
+ 'id': clientId
+ },
+ 'conversationId': conversationId
+ }
+ ],
+ 'invocationId': '0',
+ 'target': 'chat',
+ 'type': 4
+ }
+
+ await wss.send(format(struct))
+
+ base_string = ''
+
+ final = False
+ while not final:
+ objects = str(await wss.recv()).split('\x1e')
+ for obj in objects:
+ if obj is None or obj == '':
+ continue
+
+ response = json.loads(obj)
+ #print(response, flush=True, end='')
+ if response.get('type') == 1 and response['arguments'][0].get('messages',):
+ response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
+
+ yield (response_text.replace(base_string, ''))
+ base_string = response_text
+
+ elif response.get('type') == 2:
+ final = True
+
+ await wss.close()
+
+# i thing bing realy donset understand multi message (based on prompt template)
+def convert(messages):
+ context = ""
+ for message in messages:
+ context += "[%s](#message)\n%s\n\n" % (message['role'],
+ message['content'])
+ return context
+
+async def run(optionSets, messages):
+ prompt = messages[-1]['content']
+ if(len(messages) > 1):
+ prompt = convert(messages)
+ async for value in AsyncCompletion.create(prompt=prompt, optionSets=optionSets):
+ try:
+ print(value, flush=True, end='')
+ except UnicodeEncodeError as e:
+ # emoji encoding problem
+ print(value.encode('utf-8'), flush=True, end='')
+
+optionSet = conversationstyles[config['model']]
+asyncio.run(run(optionSet, config['messages']))
\ No newline at end of file
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 269fa17ec0ef0ecf8fee02907187d160e6614419..b64e44f5db3baccdd3fe36bc27000984533d248e 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -14,7 +14,15 @@ from .Providers import (
H2o,
ChatgptLogin,
DeepAi,
- GetGpt
+ GetGpt,
+ AItianhu,
+ EasyChat,
+ Acytoo,
+ DfeHub,
+ AiService,
+ BingHuan,
+ Wewordle,
+ ChatgptAi,
)
Palm = Bard
diff --git a/g4f/Provider/__pycache__/Provider.cpython-311.pyc b/g4f/Provider/__pycache__/Provider.cpython-311.pyc
index ae7af312622820a4f0bb0ae073ed50bc0ab655d9..fcfcbe4fab3e135898c306c56600730b6369471a 100644
Binary files a/g4f/Provider/__pycache__/Provider.cpython-311.pyc and b/g4f/Provider/__pycache__/Provider.cpython-311.pyc differ
diff --git a/g4f/Provider/__pycache__/Provider.cpython-38.pyc b/g4f/Provider/__pycache__/Provider.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8a6b6edb7bc6fb274dc3a35df2323b5c8a50dd7e
Binary files /dev/null and b/g4f/Provider/__pycache__/Provider.cpython-38.pyc differ
diff --git a/g4f/Provider/__pycache__/__init__.cpython-311.pyc b/g4f/Provider/__pycache__/__init__.cpython-311.pyc
index 4852e6d7a62193a8f3a55ac75574a14e89e4fcd7..e4a9975c3143f361ebb29b17d1c5ff95bdec37d4 100644
Binary files a/g4f/Provider/__pycache__/__init__.cpython-311.pyc and b/g4f/Provider/__pycache__/__init__.cpython-311.pyc differ
diff --git a/g4f/Provider/__pycache__/__init__.cpython-38.pyc b/g4f/Provider/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dffbb297aaf7df5bb948211c303c7242b76a4310
Binary files /dev/null and b/g4f/Provider/__pycache__/__init__.cpython-38.pyc differ
diff --git a/g4f/__init__.py b/g4f/__init__.py
index a0b4bac6aa4de9c0449095a3874c2cb9716169d7..09b24b55fcec253df05fa67f166320c62553b22f 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -2,11 +2,14 @@ import sys
from . import Provider
from g4f.models import Model, ModelUtils
+logging = False
class ChatCompletion:
@staticmethod
def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
kwargs['auth'] = auth
+ if provider and provider.working == False:
+ return f'{provider.__name__} is not working'
if provider and provider.needs_auth and not auth:
print(
@@ -27,7 +30,7 @@ class ChatCompletion:
f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr)
sys.exit(1)
- print(f'Using {engine.__name__} provider')
+ if logging: print(f'Using {engine.__name__} provider')
return (engine._create_completion(model.name, messages, stream, **kwargs)
if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs)))
diff --git a/g4f/__pycache__/__init__.cpython-311.pyc b/g4f/__pycache__/__init__.cpython-311.pyc
index 034196ed2edfe84c56963f9598f9f3bf3fb1d874..e4d416b8278358a699e1baf96c3882bc8d8d9207 100644
Binary files a/g4f/__pycache__/__init__.cpython-311.pyc and b/g4f/__pycache__/__init__.cpython-311.pyc differ
diff --git a/g4f/__pycache__/__init__.cpython-38.pyc b/g4f/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..39450f56134a5ac8d4a055144c8884a9d44a6b08
Binary files /dev/null and b/g4f/__pycache__/__init__.cpython-38.pyc differ
diff --git a/g4f/__pycache__/models.cpython-311.pyc b/g4f/__pycache__/models.cpython-311.pyc
index 3ca0c77be2d8b00acf140a89dfca8d27bf253143..3b7d25a892e11534593d20c85a2675f4f979344c 100644
Binary files a/g4f/__pycache__/models.cpython-311.pyc and b/g4f/__pycache__/models.cpython-311.pyc differ
diff --git a/g4f/__pycache__/typing.cpython-38.pyc b/g4f/__pycache__/typing.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1e00b716e93056e82b0b6adc80ce90e0ad4048d
Binary files /dev/null and b/g4f/__pycache__/typing.cpython-38.pyc differ
diff --git a/g4f/models.py b/g4f/models.py
index ecf18e6dffe029d6bbd651428094083c15b77283..95be48493fb0788cae4261b51f770a859c106984 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -152,11 +152,42 @@ class Model:
name: str = 'llama-13b'
base_provider: str = 'huggingface'
best_provider: Provider.Provider = Provider.H2o
+
+ class gpt_35_turbo_16k:
+ name: str = 'gpt-3.5-turbo-16k'
+ base_provider: str = 'openai'
+ best_provider: Provider.Provider = Provider.EasyChat
+
+ class gpt_35_turbo_0613:
+ name: str = 'gpt-3.5-turbo-0613'
+ base_provider: str = 'openai'
+ best_provider: Provider.Provider = Provider.EasyChat
+
+ class gpt_35_turbo_16k_0613:
+ name: str = 'gpt-3.5-turbo-16k-0613'
+ base_provider: str = 'openai'
+ best_provider: Provider.Provider = Provider.EasyChat
+
+ class gpt_4_32k:
+ name: str = 'gpt-4-32k'
+ base_provider: str = 'openai'
+ best_provider = None
+
+ class gpt_4_0613:
+ name: str = 'gpt-4-0613'
+ base_provider: str = 'openai'
+ best_provider = None
class ModelUtils:
convert: dict = {
'gpt-3.5-turbo': Model.gpt_35_turbo,
+ 'gpt-3.6-turbo-16k': Model.gpt_35_turbo_16k,
+ 'gpt-3.5-turbo-0613': Model.gpt_35_turbo_0613,
+ 'gpt-3.5-turbo-16k-0613': Model.gpt_35_turbo_16k_0613,
+
'gpt-4': Model.gpt_4,
+ 'gpt-4-32k': Model.gpt_4_32k,
+ 'gpt-4-0613': Model.gpt_4_0613,
'claude-instant-v1-100k': Model.claude_instant_v1_100k,
'claude-v1-100k': Model.claude_v1_100k,