starsaround
commited on
Commit
•
1b9c6eb
1
Parent(s):
d4e84cf
Upload 204 files
Browse files- g4f/Provider/Providers/Ails.py +6 -4
- g4f/Provider/Providers/Bing.py +13 -12
- g4f/Provider/Providers/DeepAi.py +52 -26
- g4f/Provider/Providers/EasyChat.py +22 -15
- g4f/Provider/Providers/Raycast.py +50 -0
- g4f/Provider/Providers/Vercel.py +110 -0
- g4f/Provider/Providers/__pycache__/Ails.cpython-311.pyc +0 -0
- g4f/Provider/Providers/__pycache__/Bing.cpython-311.pyc +0 -0
- g4f/Provider/Providers/__pycache__/DeepAi.cpython-311.pyc +0 -0
- g4f/Provider/Providers/__pycache__/EasyChat.cpython-311.pyc +0 -0
- g4f/Provider/Providers/__pycache__/Raycast.cpython-311.pyc +0 -0
- g4f/Provider/Providers/__pycache__/Vercel.cpython-311.pyc +0 -0
- g4f/Provider/Providers/__pycache__/opchatgpts.cpython-311.pyc +0 -0
- g4f/Provider/Providers/opchatgpts.py +42 -0
- g4f/Provider/__init__.py +2 -0
- g4f/Provider/__pycache__/__init__.cpython-311.pyc +0 -0
- g4f/__init__.py +4 -4
- g4f/__pycache__/__init__.cpython-311.pyc +0 -0
- g4f/__pycache__/models.cpython-311.pyc +0 -0
- g4f/models.py +239 -228
g4f/Provider/Providers/Ails.py
CHANGED
@@ -37,9 +37,11 @@ class Utils:
|
|
37 |
n = e % 10
|
38 |
r = n + 1 if n % 2 == 0 else n
|
39 |
return str(e - n + r)
|
|
|
|
|
|
|
40 |
|
41 |
-
|
42 |
-
def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False, **kwargs):
|
43 |
|
44 |
headers = {
|
45 |
'authority': 'api.caipacity.com',
|
@@ -47,7 +49,7 @@ def _create_completion(model: str, messages: list, temperature: float = 0.6, str
|
|
47 |
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
48 |
'authorization': 'Bearer free',
|
49 |
'client-id': str(uuid.uuid4()),
|
50 |
-
'client-v':
|
51 |
'content-type': 'application/json',
|
52 |
'origin': 'https://ai.ls',
|
53 |
'referer': 'https://ai.ls/',
|
@@ -90,4 +92,4 @@ def _create_completion(model: str, messages: list, temperature: float = 0.6, str
|
|
90 |
yield token
|
91 |
|
92 |
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
93 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
37 |
n = e % 10
|
38 |
r = n + 1 if n % 2 == 0 else n
|
39 |
return str(e - n + r)
|
40 |
+
def getV():
|
41 |
+
crossref = requests.get("https://ai.ls"+ requests.get("https://ai.ls/?chat=1").text.split('crossorigin href="')[1].split('"')[0]).text.split('G4="')[1].split('"')[0]
|
42 |
+
return crossref
|
43 |
|
44 |
+
def _create_completion(model: str, messages: list, stream: bool = False, temperature: float = 0.6, **kwargs):
|
|
|
45 |
|
46 |
headers = {
|
47 |
'authority': 'api.caipacity.com',
|
|
|
49 |
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
50 |
'authorization': 'Bearer free',
|
51 |
'client-id': str(uuid.uuid4()),
|
52 |
+
'client-v': Utils.getV(),
|
53 |
'content-type': 'application/json',
|
54 |
'origin': 'https://ai.ls',
|
55 |
'referer': 'https://ai.ls/',
|
|
|
92 |
yield token
|
93 |
|
94 |
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
95 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Bing.py
CHANGED
@@ -305,19 +305,20 @@ async def stream_generate(prompt: str, mode: optionsSets.optionSet = optionsSets
|
|
305 |
await session.close()
|
306 |
|
307 |
|
308 |
-
def run(generator):
|
309 |
-
loop = asyncio.
|
310 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
311 |
|
312 |
-
while True:
|
313 |
-
try:
|
314 |
-
next_val = loop.run_until_complete(gen.__anext__())
|
315 |
-
yield next_val
|
316 |
-
|
317 |
-
except StopAsyncIteration:
|
318 |
-
break
|
319 |
-
|
320 |
-
#print('Done')
|
321 |
|
322 |
|
323 |
def convert(messages):
|
|
|
305 |
await session.close()
|
306 |
|
307 |
|
308 |
+
def run(generator):
|
309 |
+
loop = asyncio.new_event_loop()
|
310 |
+
asyncio.set_event_loop(loop)
|
311 |
+
gen = generator.__aiter__()
|
312 |
+
|
313 |
+
while True:
|
314 |
+
try:
|
315 |
+
next_val = loop.run_until_complete(gen.__anext__())
|
316 |
+
yield next_val
|
317 |
+
|
318 |
+
except StopAsyncIteration:
|
319 |
+
break
|
320 |
+
#print('Done')
|
321 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
322 |
|
323 |
|
324 |
def convert(messages):
|
g4f/Provider/Providers/DeepAi.py
CHANGED
@@ -1,48 +1,74 @@
|
|
1 |
-
import os
|
2 |
import json
|
3 |
-
import
|
4 |
-
import hashlib
|
5 |
import requests
|
6 |
-
|
7 |
from ...typing import sha256, Dict, get_type_hints
|
8 |
|
9 |
-
|
|
|
10 |
model = ['gpt-3.5-turbo']
|
11 |
supports_stream = True
|
12 |
needs_auth = False
|
13 |
working = True
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
return hashlib.md5(text.encode()).hexdigest()[::-1]
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
part2 = md5(user_agent + md5(user_agent + md5(user_agent + part1 + "x")))
|
24 |
-
|
25 |
-
return f"tryit-{part1}-{part2}"
|
26 |
|
27 |
-
|
|
|
28 |
|
29 |
-
headers =
|
30 |
-
|
31 |
-
"user-agent": user_agent
|
32 |
-
}
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
}
|
38 |
|
39 |
-
|
|
|
|
|
40 |
|
41 |
-
|
42 |
-
|
|
|
43 |
yield chunk.decode()
|
44 |
|
45 |
-
|
46 |
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
47 |
'(%s)' % ', '.join(
|
48 |
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
1 |
import json
|
2 |
+
import os
|
|
|
3 |
import requests
|
4 |
+
import js2py
|
5 |
from ...typing import sha256, Dict, get_type_hints
|
6 |
|
7 |
+
|
8 |
+
url = "https://api.deepai.org/"
|
9 |
model = ['gpt-3.5-turbo']
|
10 |
supports_stream = True
|
11 |
needs_auth = False
|
12 |
working = True
|
13 |
|
14 |
+
token_js = """
|
15 |
+
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
|
16 |
+
var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
|
17 |
+
h = Math.round(1E11 * Math.random()) + "";
|
18 |
+
f = function () {
|
19 |
+
for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI);
|
20 |
+
|
21 |
+
return function (t) {
|
22 |
+
var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y],
|
23 |
+
Z = [],
|
24 |
+
A = unescape(encodeURI(t)) + "\u0080",
|
25 |
+
z = A.length;
|
26 |
+
t = --z / 4 + 2 | 15;
|
27 |
+
for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--;
|
28 |
+
for (q = A = 0; q < t; q += 16) {
|
29 |
+
for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2];
|
30 |
+
for (A = 4; A;) ea[--A] += z[A]
|
31 |
+
}
|
32 |
+
for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16);
|
33 |
+
return t.split("").reverse().join("")
|
34 |
+
}
|
35 |
+
}();
|
36 |
|
37 |
+
"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x")));
|
38 |
+
"""
|
|
|
39 |
|
40 |
+
uuid4_js = """
|
41 |
+
function uuidv4() {
|
42 |
+
for (var a = [], b = 0; 36 > b; b++) a[b] = "0123456789abcdef".substr(Math.floor(16 * Math.random()), 1);
|
43 |
+
a[14] = "4";
|
44 |
+
a[19] = "0123456789abcdef".substr(a[19] & 3 | 8, 1);
|
45 |
+
a[8] = a[13] = a[18] = a[23] = "-";
|
46 |
+
return a.join("")
|
47 |
+
}
|
48 |
+
uuidv4();"""
|
49 |
|
50 |
+
def create_session():
|
51 |
+
url = "https://api.deepai.org/save_chat_session"
|
|
|
|
|
|
|
52 |
|
53 |
+
payload = {'uuid': js2py.eval_js(uuid4_js), "title":"", "chat_style": "chat", "messages": '[]'}
|
54 |
+
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"}
|
55 |
|
56 |
+
response = requests.request("POST", url, headers=headers, data=payload)
|
57 |
+
return response
|
|
|
|
|
58 |
|
59 |
+
def _create_completion(model: str, messages:list, stream: bool = True, **kwargs):
|
60 |
+
create_session()
|
61 |
+
url = "https://api.deepai.org/make_me_a_pizza"
|
|
|
62 |
|
63 |
+
payload = {'chas_style': "chat", "chatHistory": json.dumps(messages)}
|
64 |
+
api_key = js2py.eval_js(token_js)
|
65 |
+
headers = {"api-key": api_key, "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"}
|
66 |
|
67 |
+
response = requests.request("POST", url, headers=headers, data=payload, stream=True)
|
68 |
+
for chunk in response.iter_content(chunk_size=None):
|
69 |
+
response.raise_for_status()
|
70 |
yield chunk.decode()
|
71 |
|
|
|
72 |
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
73 |
'(%s)' % ', '.join(
|
74 |
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/EasyChat.py
CHANGED
@@ -7,40 +7,47 @@ model = ['gpt-3.5-turbo']
|
|
7 |
supports_stream = True
|
8 |
needs_auth = False
|
9 |
working = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
|
|
|
|
11 |
|
12 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
|
|
|
|
13 |
headers = {
|
14 |
-
'authority': '
|
15 |
'accept': 'text/event-stream',
|
16 |
-
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
17 |
'content-type': 'application/json',
|
18 |
-
'
|
19 |
-
'
|
20 |
-
'plugins': '0',
|
21 |
-
'referer': 'https://free.easychat.work/',
|
22 |
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
23 |
-
'sec-ch-ua-mobile': '?0',
|
24 |
-
'sec-ch-ua-platform': '"macOS"',
|
25 |
-
'sec-fetch-dest': 'empty',
|
26 |
-
'sec-fetch-mode': 'cors',
|
27 |
-
'sec-fetch-site': 'same-origin',
|
28 |
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
29 |
-
'usesearch': 'false',
|
30 |
'x-requested-with': 'XMLHttpRequest',
|
31 |
}
|
32 |
|
33 |
json_data = {
|
34 |
'messages': messages,
|
35 |
'stream': True,
|
36 |
-
'model':
|
37 |
'temperature': kwargs.get('temperature', 0.5),
|
38 |
'presence_penalty': kwargs.get('presence_penalty', 0),
|
39 |
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
40 |
'top_p': kwargs.get('top_p', 1),
|
41 |
}
|
42 |
|
43 |
-
|
|
|
|
|
|
|
44 |
headers=headers, json=json_data)
|
45 |
|
46 |
for chunk in response.iter_lines():
|
@@ -49,4 +56,4 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
|
49 |
yield (data['choices'][0]['delta']['content'])
|
50 |
|
51 |
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
52 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
7 |
supports_stream = True
|
8 |
needs_auth = False
|
9 |
working = True
|
10 |
+
active_servers = [
|
11 |
+
"https://chat10.fastgpt.me",
|
12 |
+
"https://chat9.fastgpt.me",
|
13 |
+
"https://chat1.fastgpt.me",
|
14 |
+
"https://chat2.fastgpt.me",
|
15 |
+
"https://chat3.fastgpt.me",
|
16 |
+
"https://chat4.fastgpt.me"
|
17 |
+
]
|
18 |
|
19 |
+
# Change server if not work current server
|
20 |
+
server = active_servers[0]
|
21 |
|
22 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
23 |
+
req = requests.Session()
|
24 |
+
|
25 |
headers = {
|
26 |
+
'authority': f'{server}'.replace("https://",""),
|
27 |
'accept': 'text/event-stream',
|
28 |
+
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2',
|
29 |
'content-type': 'application/json',
|
30 |
+
'origin': f'{server}',
|
31 |
+
'referer': f'{server}/',
|
|
|
|
|
32 |
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
|
|
|
|
|
|
|
|
|
|
33 |
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
|
|
34 |
'x-requested-with': 'XMLHttpRequest',
|
35 |
}
|
36 |
|
37 |
json_data = {
|
38 |
'messages': messages,
|
39 |
'stream': True,
|
40 |
+
'model': "gpt-3.5-turbo",
|
41 |
'temperature': kwargs.get('temperature', 0.5),
|
42 |
'presence_penalty': kwargs.get('presence_penalty', 0),
|
43 |
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
44 |
'top_p': kwargs.get('top_p', 1),
|
45 |
}
|
46 |
|
47 |
+
# init cookies from server
|
48 |
+
req.get(f'{server}/')
|
49 |
+
|
50 |
+
response = req.post(f'{server}/api/openai/v1/chat/completions',
|
51 |
headers=headers, json=json_data)
|
52 |
|
53 |
for chunk in response.iter_lines():
|
|
|
56 |
yield (data['choices'][0]['delta']['content'])
|
57 |
|
58 |
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
59 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Raycast.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
|
4 |
+
import requests
|
5 |
+
from g4f.typing import get_type_hints
|
6 |
+
|
7 |
+
url = "https://backend.raycast.com/api/v1/ai/chat_completions"
|
8 |
+
model = ['gpt-3.5-turbo', 'gpt-4']
|
9 |
+
supports_stream = True
|
10 |
+
needs_auth = True
|
11 |
+
working = True
|
12 |
+
|
13 |
+
|
14 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
15 |
+
auth = kwargs.get('auth')
|
16 |
+
headers = {
|
17 |
+
'Accept': 'application/json',
|
18 |
+
'Accept-Language': 'en-US,en;q=0.9',
|
19 |
+
'Authorization': f'Bearer {auth}',
|
20 |
+
'Content-Type': 'application/json',
|
21 |
+
'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
|
22 |
+
}
|
23 |
+
parsed_messages = []
|
24 |
+
for message in messages:
|
25 |
+
parsed_messages.append({
|
26 |
+
'author': message['role'],
|
27 |
+
'content': {'text': message['content']}
|
28 |
+
})
|
29 |
+
data = {
|
30 |
+
"debug": False,
|
31 |
+
"locale": "en-CN",
|
32 |
+
"messages": parsed_messages,
|
33 |
+
"model": model,
|
34 |
+
"provider": "openai",
|
35 |
+
"source": "ai_chat",
|
36 |
+
"system_instruction": "markdown",
|
37 |
+
"temperature": 0.5
|
38 |
+
}
|
39 |
+
response = requests.post(url, headers=headers, json=data, stream=True)
|
40 |
+
for token in response.iter_lines():
|
41 |
+
if b'data: ' not in token:
|
42 |
+
continue
|
43 |
+
completion_chunk = json.loads(token.decode().replace('data: ', ''))
|
44 |
+
token = completion_chunk['text']
|
45 |
+
if token != None:
|
46 |
+
yield token
|
47 |
+
|
48 |
+
|
49 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
50 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Vercel.py
CHANGED
@@ -42,6 +42,116 @@ vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant
|
|
42 |
'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}}
|
43 |
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
46 |
return
|
47 |
# conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
|
|
|
42 |
'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}}
|
43 |
|
44 |
|
45 |
+
# import requests
|
46 |
+
# import execjs
|
47 |
+
# import ubox
|
48 |
+
# import json
|
49 |
+
# import re
|
50 |
+
|
51 |
+
|
52 |
+
# html = requests.get('https://sdk.vercel.ai/').text
|
53 |
+
# paths_regex = r'static\/chunks.+?\.js'
|
54 |
+
# separator_regex = r'"\]\)<\/script><script>self\.__next_f\.push\(\[.,"'
|
55 |
+
|
56 |
+
# paths = re.findall(paths_regex, html)
|
57 |
+
# for i in range(len(paths)):
|
58 |
+
# paths[i] = re.sub(separator_regex, "", paths[i])
|
59 |
+
# paths = list(set(paths))
|
60 |
+
# print(paths)
|
61 |
+
|
62 |
+
# scripts = []
|
63 |
+
# threads = []
|
64 |
+
|
65 |
+
# print(f"Downloading and parsing scripts...")
|
66 |
+
# def download_thread(path):
|
67 |
+
# script_url = f"{self.base_url}/_next/{path}"
|
68 |
+
# script = self.session.get(script_url).text
|
69 |
+
# scripts.append(script)
|
70 |
+
|
71 |
+
# for path in paths:
|
72 |
+
# thread = threading.Thread(target=download_thread, args=(path,), daemon=True)
|
73 |
+
# thread.start()
|
74 |
+
# threads.append(thread)
|
75 |
+
|
76 |
+
# for thread in threads:
|
77 |
+
# thread.join()
|
78 |
+
|
79 |
+
# for script in scripts:
|
80 |
+
# models_regex = r'let .="\\n\\nHuman:\",r=(.+?),.='
|
81 |
+
# matches = re.findall(models_regex, script)
|
82 |
+
|
83 |
+
# if matches:
|
84 |
+
# models_str = matches[0]
|
85 |
+
# stop_sequences_regex = r'(?<=stopSequences:{value:\[)\D(?<!\])'
|
86 |
+
# models_str = re.sub(stop_sequences_regex, re.escape('"\\n\\nHuman:"'), models_str)
|
87 |
+
|
88 |
+
# context = quickjs.Context()
|
89 |
+
# json_str = context.eval(f"({models_str})").json()
|
90 |
+
# #return json.loads(json_str)
|
91 |
+
|
92 |
+
# quit()
|
93 |
+
# headers = {
|
94 |
+
# 'authority': 'sdk.vercel.ai',
|
95 |
+
# 'accept': '*/*',
|
96 |
+
# 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
97 |
+
# 'content-type': 'application/json',
|
98 |
+
# 'origin': 'https://sdk.vercel.ai',
|
99 |
+
# 'referer': 'https://sdk.vercel.ai/',
|
100 |
+
# 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
101 |
+
# 'sec-ch-ua-mobile': '?0',
|
102 |
+
# 'sec-ch-ua-platform': '"macOS"',
|
103 |
+
# 'sec-fetch-dest': 'empty',
|
104 |
+
# 'sec-fetch-mode': 'cors',
|
105 |
+
# 'sec-fetch-site': 'same-origin',
|
106 |
+
# 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
|
107 |
+
# }
|
108 |
+
|
109 |
+
# response = requests.get('https://sdk.vercel.ai/openai.jpeg', headers=headers)
|
110 |
+
|
111 |
+
# data = (json.loads(ubox.b64dec(response.text)))
|
112 |
+
|
113 |
+
# script = 'globalThis={data: "sentinel"};a=()=>{return (%s)(%s)}' % (data['c'], data['a'])
|
114 |
+
|
115 |
+
# token_data = execjs.compile(script).call('a')
|
116 |
+
# print(token_data)
|
117 |
+
|
118 |
+
# token = {
|
119 |
+
# 'r': token_data,
|
120 |
+
# 't': data["t"]
|
121 |
+
# }
|
122 |
+
|
123 |
+
# botToken = ubox.b64enc(json.dumps(token, separators=(',', ':')))
|
124 |
+
# print(botToken)
|
125 |
+
|
126 |
+
# import requests
|
127 |
+
|
128 |
+
# headers['custom-encoding'] = botToken
|
129 |
+
|
130 |
+
# json_data = {
|
131 |
+
# 'messages': [
|
132 |
+
# {
|
133 |
+
# 'role': 'user',
|
134 |
+
# 'content': 'hello',
|
135 |
+
# },
|
136 |
+
# ],
|
137 |
+
# 'playgroundId': ubox.uuid4(),
|
138 |
+
# 'chatIndex': 0,
|
139 |
+
# 'model': 'openai:gpt-3.5-turbo',
|
140 |
+
# 'temperature': 0.7,
|
141 |
+
# 'maxTokens': 500,
|
142 |
+
# 'topK': 1,
|
143 |
+
# 'topP': 1,
|
144 |
+
# 'frequencyPenalty': 1,
|
145 |
+
# 'presencePenalty': 1,
|
146 |
+
# 'stopSequences': []
|
147 |
+
# }
|
148 |
+
|
149 |
+
# response = requests.post('https://sdk.vercel.ai/api/generate',
|
150 |
+
# headers=headers, json=json_data, stream=True)
|
151 |
+
|
152 |
+
# for token in response.iter_content(chunk_size=2046):
|
153 |
+
# print(token)
|
154 |
+
|
155 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
156 |
return
|
157 |
# conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
|
g4f/Provider/Providers/__pycache__/Ails.cpython-311.pyc
CHANGED
Binary files a/g4f/Provider/Providers/__pycache__/Ails.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Ails.cpython-311.pyc differ
|
|
g4f/Provider/Providers/__pycache__/Bing.cpython-311.pyc
CHANGED
Binary files a/g4f/Provider/Providers/__pycache__/Bing.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Bing.cpython-311.pyc differ
|
|
g4f/Provider/Providers/__pycache__/DeepAi.cpython-311.pyc
CHANGED
Binary files a/g4f/Provider/Providers/__pycache__/DeepAi.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/DeepAi.cpython-311.pyc differ
|
|
g4f/Provider/Providers/__pycache__/EasyChat.cpython-311.pyc
CHANGED
Binary files a/g4f/Provider/Providers/__pycache__/EasyChat.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/EasyChat.cpython-311.pyc differ
|
|
g4f/Provider/Providers/__pycache__/Raycast.cpython-311.pyc
ADDED
Binary file (2.77 kB). View file
|
|
g4f/Provider/Providers/__pycache__/Vercel.cpython-311.pyc
CHANGED
Binary files a/g4f/Provider/Providers/__pycache__/Vercel.cpython-311.pyc and b/g4f/Provider/Providers/__pycache__/Vercel.cpython-311.pyc differ
|
|
g4f/Provider/Providers/__pycache__/opchatgpts.cpython-311.pyc
ADDED
Binary file (2.67 kB). View file
|
|
g4f/Provider/Providers/opchatgpts.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
from ...typing import sha256, Dict, get_type_hints
|
4 |
+
|
5 |
+
url = 'https://opchatgpts.net'
|
6 |
+
model = ['gpt-3.5-turbo']
|
7 |
+
supports_stream = False
|
8 |
+
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
+
def _create_completion(model: str, messages: list, stream: bool = False, temperature: float = 0.8, max_tokens: int = 1024, system_prompt: str = "Converse as if you were an AI assistant. Be friendly, creative.", **kwargs):
|
12 |
+
|
13 |
+
data = {
|
14 |
+
'env': 'chatbot',
|
15 |
+
'session': 'N/A',
|
16 |
+
'prompt': "\n",
|
17 |
+
'context': system_prompt,
|
18 |
+
'messages': messages,
|
19 |
+
'newMessage': messages[::-1][0]["content"],
|
20 |
+
'userName': '<div class="mwai-name-text">User:</div>',
|
21 |
+
'aiName': '<div class="mwai-name-text">AI:</div>',
|
22 |
+
'model': 'gpt-3.5-turbo',
|
23 |
+
'temperature': temperature,
|
24 |
+
'maxTokens': max_tokens,
|
25 |
+
'maxResults': 1,
|
26 |
+
'apiKey': '',
|
27 |
+
'service': 'openai',
|
28 |
+
'embeddingsIndex': '',
|
29 |
+
'stop': ''
|
30 |
+
}
|
31 |
+
|
32 |
+
response = requests.post('https://opchatgpts.net/wp-json/ai-chatbot/v1/chat', json=data).json()
|
33 |
+
|
34 |
+
if response["success"]:
|
35 |
+
|
36 |
+
return response["reply"] # `yield (response["reply"])` doesn't work
|
37 |
+
|
38 |
+
raise Exception("Request failed: " + response)
|
39 |
+
|
40 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
41 |
+
'(%s)' % ', '.join(
|
42 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/__init__.py
CHANGED
@@ -23,6 +23,8 @@ from .Providers import (
|
|
23 |
BingHuan,
|
24 |
Wewordle,
|
25 |
ChatgptAi,
|
|
|
|
|
26 |
)
|
27 |
|
28 |
Palm = Bard
|
|
|
23 |
BingHuan,
|
24 |
Wewordle,
|
25 |
ChatgptAi,
|
26 |
+
opchatgpts,
|
27 |
+
Raycast,
|
28 |
)
|
29 |
|
30 |
Palm = Bard
|
g4f/Provider/__pycache__/__init__.cpython-311.pyc
CHANGED
Binary files a/g4f/Provider/__pycache__/__init__.cpython-311.pyc and b/g4f/Provider/__pycache__/__init__.cpython-311.pyc differ
|
|
g4f/__init__.py
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
import sys
|
2 |
from . import Provider
|
3 |
-
from g4f
|
4 |
|
5 |
logging = False
|
6 |
|
7 |
class ChatCompletion:
|
8 |
@staticmethod
|
9 |
-
def create(model: Model
|
10 |
kwargs['auth'] = auth
|
11 |
if provider and provider.working == False:
|
12 |
return f'{provider.__name__} is not working'
|
@@ -19,7 +19,7 @@ class ChatCompletion:
|
|
19 |
try:
|
20 |
if isinstance(model, str):
|
21 |
try:
|
22 |
-
model = ModelUtils.convert[model]
|
23 |
except KeyError:
|
24 |
raise Exception(f'The model: {model} does not exist')
|
25 |
|
@@ -39,4 +39,4 @@ class ChatCompletion:
|
|
39 |
arg: str = str(e).split("'")[1]
|
40 |
print(
|
41 |
f"ValueError: {engine.__name__} does not support '{arg}' argument", file=sys.stderr)
|
42 |
-
sys.exit(1)
|
|
|
1 |
import sys
|
2 |
from . import Provider
|
3 |
+
from g4f import models
|
4 |
|
5 |
logging = False
|
6 |
|
7 |
class ChatCompletion:
|
8 |
@staticmethod
|
9 |
+
def create(model: models.Model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
|
10 |
kwargs['auth'] = auth
|
11 |
if provider and provider.working == False:
|
12 |
return f'{provider.__name__} is not working'
|
|
|
19 |
try:
|
20 |
if isinstance(model, str):
|
21 |
try:
|
22 |
+
model = models.ModelUtils.convert[model]
|
23 |
except KeyError:
|
24 |
raise Exception(f'The model: {model} does not exist')
|
25 |
|
|
|
39 |
arg: str = str(e).split("'")[1]
|
40 |
print(
|
41 |
f"ValueError: {engine.__name__} does not support '{arg}' argument", file=sys.stderr)
|
42 |
+
sys.exit(1)
|
g4f/__pycache__/__init__.cpython-311.pyc
CHANGED
Binary files a/g4f/__pycache__/__init__.cpython-311.pyc and b/g4f/__pycache__/__init__.cpython-311.pyc differ
|
|
g4f/__pycache__/models.cpython-311.pyc
CHANGED
Binary files a/g4f/__pycache__/models.cpython-311.pyc and b/g4f/__pycache__/models.cpython-311.pyc differ
|
|
g4f/models.py
CHANGED
@@ -1,232 +1,243 @@
|
|
1 |
-
from
|
|
|
|
|
2 |
|
3 |
|
|
|
4 |
class Model:
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
""
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
class ModelUtils:
|
182 |
-
convert: dict = {
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
'palm2': Model.palm,
|
223 |
-
'palm': Model.palm,
|
224 |
-
'google': Model.palm,
|
225 |
-
'google-bard': Model.palm,
|
226 |
-
'google-palm': Model.palm,
|
227 |
-
'bard': Model.palm,
|
228 |
-
|
229 |
-
'falcon-40b': Model.falcon_40b,
|
230 |
-
'falcon-7b': Model.falcon_7b,
|
231 |
-
'llama-13b': Model.llama_13b,
|
232 |
-
}
|
|
|
1 |
+
from types import ModuleType
|
2 |
+
from . import Provider
|
3 |
+
from dataclasses import dataclass
|
4 |
|
5 |
|
6 |
+
@dataclass
|
7 |
class Model:
|
8 |
+
name: str
|
9 |
+
base_provider: str
|
10 |
+
best_provider: ModuleType or None
|
11 |
+
|
12 |
+
gpt_35_turbo = Model(
|
13 |
+
name="gpt-3.5-turbo",
|
14 |
+
base_provider="openai",
|
15 |
+
best_provider=Provider.GetGpt,
|
16 |
+
)
|
17 |
+
|
18 |
+
gpt_4 = Model(
|
19 |
+
name="gpt-4",
|
20 |
+
base_provider="openai",
|
21 |
+
best_provider=Provider.Bing,
|
22 |
+
)
|
23 |
+
|
24 |
+
claude_instant_v1_100k = Model(
|
25 |
+
name="claude-instant-v1-100k",
|
26 |
+
base_provider="anthropic",
|
27 |
+
best_provider=Provider.Vercel,
|
28 |
+
)
|
29 |
+
|
30 |
+
claude_instant_v1 = Model(
|
31 |
+
name="claude-instant-v1",
|
32 |
+
base_provider="anthropic",
|
33 |
+
best_provider=Provider.Vercel,
|
34 |
+
)
|
35 |
+
|
36 |
+
claude_v1_100k = Model(
|
37 |
+
name="claude-v1-100k",
|
38 |
+
base_provider="anthropic",
|
39 |
+
best_provider=Provider.Vercel,
|
40 |
+
)
|
41 |
+
|
42 |
+
claude_v1 = Model(
|
43 |
+
name="claude-v1",
|
44 |
+
base_provider="anthropic",
|
45 |
+
best_provider=Provider.Vercel,
|
46 |
+
)
|
47 |
+
|
48 |
+
alpaca_7b = Model(
|
49 |
+
name="alpaca-7b",
|
50 |
+
base_provider="replicate",
|
51 |
+
best_provider=Provider.Vercel,
|
52 |
+
)
|
53 |
+
|
54 |
+
stablelm_tuned_alpha_7b = Model(
|
55 |
+
name="stablelm-tuned-alpha-7b",
|
56 |
+
base_provider="replicate",
|
57 |
+
best_provider=Provider.Vercel,
|
58 |
+
)
|
59 |
+
|
60 |
+
bloom = Model(
|
61 |
+
name="bloom",
|
62 |
+
base_provider="huggingface",
|
63 |
+
best_provider=Provider.Vercel,
|
64 |
+
)
|
65 |
+
|
66 |
+
bloomz = Model(
|
67 |
+
name="bloomz",
|
68 |
+
base_provider="huggingface",
|
69 |
+
best_provider=Provider.Vercel,
|
70 |
+
)
|
71 |
+
|
72 |
+
flan_t5_xxl = Model(
|
73 |
+
name="flan-t5-xxl",
|
74 |
+
base_provider="huggingface",
|
75 |
+
best_provider=Provider.Vercel,
|
76 |
+
)
|
77 |
+
|
78 |
+
flan_ul2 = Model(
|
79 |
+
name="flan-ul2",
|
80 |
+
base_provider="huggingface",
|
81 |
+
best_provider=Provider.Vercel,
|
82 |
+
)
|
83 |
+
|
84 |
+
gpt_neox_20b = Model(
|
85 |
+
name="gpt-neox-20b",
|
86 |
+
base_provider="huggingface",
|
87 |
+
best_provider=Provider.Vercel,
|
88 |
+
)
|
89 |
+
|
90 |
+
oasst_sft_4_pythia_12b_epoch_35 = Model(
|
91 |
+
name="oasst-sft-4-pythia-12b-epoch-3.5",
|
92 |
+
base_provider="huggingface",
|
93 |
+
best_provider=Provider.Vercel,
|
94 |
+
)
|
95 |
+
|
96 |
+
santacoder = Model(
|
97 |
+
name="santacoder",
|
98 |
+
base_provider="huggingface",
|
99 |
+
best_provider=Provider.Vercel,
|
100 |
+
)
|
101 |
+
|
102 |
+
command_medium_nightly = Model(
|
103 |
+
name="command-medium-nightly",
|
104 |
+
base_provider="cohere",
|
105 |
+
best_provider=Provider.Vercel,
|
106 |
+
)
|
107 |
+
|
108 |
+
command_xlarge_nightly = Model(
|
109 |
+
name="command-xlarge-nightly",
|
110 |
+
base_provider="cohere",
|
111 |
+
best_provider=Provider.Vercel,
|
112 |
+
)
|
113 |
+
|
114 |
+
code_cushman_001 = Model(
|
115 |
+
name="code-cushman-001",
|
116 |
+
base_provider="openai",
|
117 |
+
best_provider=Provider.Vercel,
|
118 |
+
)
|
119 |
+
|
120 |
+
code_davinci_002 = Model(
|
121 |
+
name="code-davinci-002",
|
122 |
+
base_provider="openai",
|
123 |
+
best_provider=Provider.Vercel,
|
124 |
+
)
|
125 |
+
|
126 |
+
text_ada_001 = Model(
|
127 |
+
name="text-ada-001",
|
128 |
+
base_provider="openai",
|
129 |
+
best_provider=Provider.Vercel,
|
130 |
+
)
|
131 |
+
|
132 |
+
text_babbage_001 = Model(
|
133 |
+
name="text-babbage-001",
|
134 |
+
base_provider="openai",
|
135 |
+
best_provider=Provider.Vercel,
|
136 |
+
)
|
137 |
+
|
138 |
+
text_curie_001 = Model(
|
139 |
+
name="text-curie-001",
|
140 |
+
base_provider="openai",
|
141 |
+
best_provider=Provider.Vercel,
|
142 |
+
)
|
143 |
+
|
144 |
+
text_davinci_002 = Model(
|
145 |
+
name="text-davinci-002",
|
146 |
+
base_provider="openai",
|
147 |
+
best_provider=Provider.Vercel,
|
148 |
+
)
|
149 |
+
|
150 |
+
text_davinci_003 = Model(
|
151 |
+
name="text-davinci-003",
|
152 |
+
base_provider="openai",
|
153 |
+
best_provider=Provider.Vercel,
|
154 |
+
)
|
155 |
+
|
156 |
+
palm = Model(
|
157 |
+
name="palm",
|
158 |
+
base_provider="google",
|
159 |
+
best_provider=Provider.Bard,
|
160 |
+
)
|
161 |
+
|
162 |
+
falcon_40b = Model(
|
163 |
+
name="falcon-40b",
|
164 |
+
base_provider="huggingface",
|
165 |
+
best_provider=Provider.H2o,
|
166 |
+
)
|
167 |
+
|
168 |
+
falcon_7b = Model(
|
169 |
+
name="falcon-7b",
|
170 |
+
base_provider="huggingface",
|
171 |
+
best_provider=Provider.H2o,
|
172 |
+
)
|
173 |
+
|
174 |
+
llama_13b = Model(
|
175 |
+
name="llama-13b",
|
176 |
+
base_provider="huggingface",
|
177 |
+
best_provider=Provider.H2o,
|
178 |
+
)
|
179 |
+
|
180 |
+
gpt_35_turbo_16k = Model(
|
181 |
+
name="gpt-3.5-turbo-16k",
|
182 |
+
base_provider="openai",
|
183 |
+
best_provider=Provider.EasyChat,
|
184 |
+
)
|
185 |
+
|
186 |
+
gpt_35_turbo_0613 = Model(
|
187 |
+
name="gpt-3.5-turbo-0613",
|
188 |
+
base_provider="openai",
|
189 |
+
best_provider=Provider.EasyChat,
|
190 |
+
)
|
191 |
+
|
192 |
+
gpt_35_turbo_16k_0613 = Model(
|
193 |
+
name="gpt-3.5-turbo-16k-0613",
|
194 |
+
base_provider="openai",
|
195 |
+
best_provider=Provider.EasyChat,
|
196 |
+
)
|
197 |
+
|
198 |
+
gpt_4_32k = Model(name="gpt-4-32k", base_provider="openai", best_provider=None)
|
199 |
+
|
200 |
+
gpt_4_0613 = Model(name="gpt-4-0613", base_provider="openai", best_provider=None)
|
201 |
+
|
202 |
+
|
203 |
class ModelUtils:
|
204 |
+
convert: dict[str, Model] = {
|
205 |
+
"gpt-3.5-turbo": gpt_35_turbo,
|
206 |
+
"gpt-3.5-turbo-16k": gpt_35_turbo_16k,
|
207 |
+
"gpt-3.5-turbo-0613": gpt_35_turbo_0613,
|
208 |
+
"gpt-3.5-turbo-16k-0613": gpt_35_turbo_16k_0613,
|
209 |
+
"gpt-4": gpt_4,
|
210 |
+
"gpt-4-32k": gpt_4_32k,
|
211 |
+
"gpt-4-0613": gpt_4_0613,
|
212 |
+
"claude-instant-v1-100k": claude_instant_v1_100k,
|
213 |
+
"claude-v1-100k": claude_v1_100k,
|
214 |
+
"claude-instant-v1": claude_instant_v1,
|
215 |
+
"claude-v1": claude_v1,
|
216 |
+
"alpaca-7b": alpaca_7b,
|
217 |
+
"stablelm-tuned-alpha-7b": stablelm_tuned_alpha_7b,
|
218 |
+
"bloom": bloom,
|
219 |
+
"bloomz": bloomz,
|
220 |
+
"flan-t5-xxl": flan_t5_xxl,
|
221 |
+
"flan-ul2": flan_ul2,
|
222 |
+
"gpt-neox-20b": gpt_neox_20b,
|
223 |
+
"oasst-sft-4-pythia-12b-epoch-3.5": oasst_sft_4_pythia_12b_epoch_35,
|
224 |
+
"santacoder": santacoder,
|
225 |
+
"command-medium-nightly": command_medium_nightly,
|
226 |
+
"command-xlarge-nightly": command_xlarge_nightly,
|
227 |
+
"code-cushman-001": code_cushman_001,
|
228 |
+
"code-davinci-002": code_davinci_002,
|
229 |
+
"text-ada-001": text_ada_001,
|
230 |
+
"text-babbage-001": text_babbage_001,
|
231 |
+
"text-curie-001": text_curie_001,
|
232 |
+
"text-davinci-002": text_davinci_002,
|
233 |
+
"text-davinci-003": text_davinci_003,
|
234 |
+
"palm2": palm,
|
235 |
+
"palm": palm,
|
236 |
+
"google": palm,
|
237 |
+
"google-bard": palm,
|
238 |
+
"google-palm": palm,
|
239 |
+
"bard": palm,
|
240 |
+
"falcon-40b": falcon_40b,
|
241 |
+
"falcon-7b": falcon_7b,
|
242 |
+
"llama-13b": llama_13b,
|
243 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|