fruitpicker01 commited on
Commit
c72380a
1 Parent(s): d9ef52d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -55
app.py CHANGED
@@ -20,7 +20,6 @@ from transformers import AutoTokenizer, AutoModel
20
  from utils import best_text_choice
21
  import asyncio
22
  import inspect
23
- from openai import AsyncOpenAI
24
 
25
  tokenizer = AutoTokenizer.from_pretrained("ai-forever/ru-en-RoSBERTa")
26
  model = AutoModel.from_pretrained("ai-forever/ru-en-RoSBERTa")
@@ -34,9 +33,6 @@ MISTRAL_API_KEY_4 = os.getenv('MISTRAL_API_KEY_4')
34
  MISTRAL_API_KEY_5 = os.getenv('MISTRAL_API_KEY_5')
35
  token = os.getenv('GITHUB_TOKEN')
36
 
37
- openai_api_key = os.getenv("GPT_KEY")
38
- client = AsyncOpenAI(api_key=openai_api_key)
39
-
40
  # Клиент для генерации сообщений
41
  client_mistral_generate = Mistral(api_key=MISTRAL_API_KEY)
42
 
@@ -202,20 +198,31 @@ def clean_message(message):
202
  # return f"Ошибка при обращении к GigaChat-Pro: {e}"
203
 
204
  async def generate_message_mistral_generate(prompt, max_retries=5):
 
 
 
 
 
 
 
 
 
 
205
  retries = 0
206
  while retries < max_retries:
207
  try:
208
- # Call to OpenAI's GPT-4o-mini model
209
- chat_response = await client.chat.completions.create(
210
- model="gpt-4o-mini",
211
- messages=[{"role": "user", "content": prompt}],
212
- temperature=0.7,
213
- max_tokens=70,
214
- tools=None,
215
- tool_choice=None
 
 
 
216
  )
217
-
218
- # Properly access the message content
219
  cleaned_message = clean_message(chat_response.choices[0].message.content.strip())
220
  return cleaned_message
221
  except Exception as e:
@@ -226,49 +233,10 @@ async def generate_message_mistral_generate(prompt, max_retries=5):
226
  await asyncio.sleep(wait_time)
227
  retries += 1
228
  else:
229
- print(f"Error calling GPT-4o-mini: {e}")
230
  return None
231
- print("Failed to get response from GPT-4o-mini after maximum retries.")
232
  return None
233
-
234
- #def generate_message_mistral_generate(prompt):
235
- # try:
236
- # messages = [SystemMessage(content=prompt)]
237
- # res = chat_pro(messages)
238
- # cleaned_message = clean_message(res.content.strip())
239
- # return cleaned_message
240
- # except Exception as e:
241
- # return f"Ошибка при обращении к GigaChat-Pro: {e}"
242
-
243
- # retries = 0
244
- # while retries < max_retries:
245
- # try:
246
- # chat_response = await client_mistral_generate.chat.complete_async(
247
- # model="mistral-large-latest",
248
- # temperature=1.0,
249
- # min_tokens=81,
250
- # max_tokens=106,
251
- # messages=[
252
- # {
253
- # "role": "user",
254
- # "content": prompt
255
- # },
256
- # ]
257
- # )
258
- # cleaned_message = clean_message(chat_response.choices[0].message.content.strip())
259
- # return cleaned_message
260
- # except Exception as e:
261
- # error_message = str(e)
262
- # if "Status 429" in error_message or "Server disconnected without sending a response" in error_message:
263
- # wait_time = 3
264
- # print(f"Rate limit exceeded or server did not respond. Waiting {wait_time} seconds before retrying...")
265
- # await asyncio.sleep(wait_time)
266
- # retries += 1
267
- # else:
268
- # print(f"Error calling Mistral: {e}")
269
- # return None
270
- # print("Failed to get response from Mistral after maximum retries.")
271
- # return None
272
 
273
  async def generate_message_mistral_check(prompt, max_retries=5):
274
  #def generate_message_mistral_check(prompt):
 
20
  from utils import best_text_choice
21
  import asyncio
22
  import inspect
 
23
 
24
  tokenizer = AutoTokenizer.from_pretrained("ai-forever/ru-en-RoSBERTa")
25
  model = AutoModel.from_pretrained("ai-forever/ru-en-RoSBERTa")
 
33
  MISTRAL_API_KEY_5 = os.getenv('MISTRAL_API_KEY_5')
34
  token = os.getenv('GITHUB_TOKEN')
35
 
 
 
 
36
  # Клиент для генерации сообщений
37
  client_mistral_generate = Mistral(api_key=MISTRAL_API_KEY)
38
 
 
198
  # return f"Ошибка при обращении к GigaChat-Pro: {e}"
199
 
200
  async def generate_message_mistral_generate(prompt, max_retries=5):
201
+
202
+ #def generate_message_mistral_generate(prompt):
203
+ # try:
204
+ # messages = [SystemMessage(content=prompt)]
205
+ # res = chat_pro(messages)
206
+ # cleaned_message = clean_message(res.content.strip())
207
+ # return cleaned_message
208
+ # except Exception as e:
209
+ # return f"Ошибка при обращении к GigaChat-Pro: {e}"
210
+
211
  retries = 0
212
  while retries < max_retries:
213
  try:
214
+ chat_response = await client_mistral_generate.chat.complete_async(
215
+ model="open-mistral-nemo",
216
+ temperature=1.0,
217
+ min_tokens=81,
218
+ max_tokens=106,
219
+ messages=[
220
+ {
221
+ "role": "user",
222
+ "content": prompt
223
+ },
224
+ ]
225
  )
 
 
226
  cleaned_message = clean_message(chat_response.choices[0].message.content.strip())
227
  return cleaned_message
228
  except Exception as e:
 
233
  await asyncio.sleep(wait_time)
234
  retries += 1
235
  else:
236
+ print(f"Error calling Mistral: {e}")
237
  return None
238
+ print("Failed to get response from Mistral after maximum retries.")
239
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
 
241
  async def generate_message_mistral_check(prompt, max_retries=5):
242
  #def generate_message_mistral_check(prompt):