Spaces:
Runtime error
Runtime error
File size: 5,091 Bytes
2b91026 65106f3 2b91026 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
import json
import requests
import openai
import tiktoken
import os
import time
from functools import wraps
import threading
def retry(exception_to_check, tries=3, delay=5, backoff=1):
"""
Decorator used to automatically retry a failed function. Parameters:
exception_to_check: The type of exception to catch.
tries: Maximum number of retry attempts.
delay: Waiting time between each retry.
backoff: Multiplicative factor to increase the waiting time after each retry.
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except exception_to_check as e:
print(f"{str(e)}, Retrying in {mdelay} seconds...")
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
def timeout_decorator(timeout):
class TimeoutException(Exception):
pass
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = [TimeoutException('Function call timed out')] # Nonlocal mutable variable
def target():
try:
result[0] = func(*args, **kwargs)
except Exception as e:
result[0] = e
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print(f"Function {func.__name__} timed out, retrying...")
return wrapper(*args, **kwargs)
if isinstance(result[0], Exception):
raise result[0]
return result[0]
return wrapper
return decorator
def send_chat_request(request):
endpoint = 'http://10.15.82.10:8006/v1/chat/completions'
model = 'gpt-3.5-turbo'
# gpt4 gpt4-32k和gpt-3.5-turbo
headers = {
'Content-Type': 'application/json',
}
temperature = 0.7
top_p = 0.95
frequency_penalty = 0
presence_penalty = 0
max_tokens = 8000
stream = False
stop = None
messages = [{"role": "user", "content": request}]
data = {
'model': model,
'messages': messages,
'temperature': temperature,
'top_p': top_p,
'frequency_penalty': frequency_penalty,
'presence_penalty': presence_penalty,
'max_tokens': max_tokens,
'stream': stream,
'stop': stop,
}
response = requests.post(endpoint, headers=headers, data=json.dumps(data))
if response.status_code == 200:
data = json.loads(response.text)
data_res = data['choices'][0]['message']
return data_res
else:
raise Exception(f"Request failed with status code {response.status_code}: {response.text}")
def num_tokens_from_string(string: str, encoding_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
print('num_tokens:',num_tokens)
return num_tokens
@timeout_decorator(45)
def send_chat_request_Azure(query, openai_key, api_base, engine):
openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"
openai.api_base = api_base
openai.api_key = openai_key
max_token_num = 8000 - num_tokens_from_string(query,'cl100k_base')
#
openai.api_request_timeout = 1 # 设置超时时间为10秒
response = openai.ChatCompletion.create(
engine = engine,
messages=[{"role": "system", "content": "You are an useful AI assistant that helps people solve the problem step by step."},
{"role": "user", "content": "" + query}],
temperature=0.0,
max_tokens=max_token_num,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=None)
data_res = response['choices'][0]['message']['content']
return data_res
#Note: The openai-python library support for Azure OpenAI is in preview.
@retry(Exception, tries=4, delay=20, backoff=2)
@timeout_decorator(45)
def send_official_call(query, openai_key='', api_base='', engine=''):
start = time.time()
# 转换成可阅读的时间
start = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))
print(start)
openai.api_key = openai_key
response = openai.ChatCompletion.create(
# engine="gpt35",
model="gpt-3.5-turbo",
messages = [{"role": "system", "content": "You are an useful AI assistant that helps people solve the problem step by step."},
{"role": "user", "content": "" + query}],
#max_tokens=max_token_num,
temperature=0.1,
top_p=0.1,
frequency_penalty=0,
presence_penalty=0,
stop=None)
data_res = response['choices'][0]['message']['content']
return data_res
|