Spaces:
Sleeping
Sleeping
fruitpicker01
commited on
Commit
•
4478f14
1
Parent(s):
9d1f81b
Update app.py
Browse files
app.py
CHANGED
@@ -216,10 +216,10 @@ async def generate_message_mistral_generate(prompt, max_retries=5):
|
|
216 |
try:
|
217 |
# Отправляем запрос к модели Qwen через Together API
|
218 |
response = await async_client.chat.completions.create(
|
219 |
-
model="
|
220 |
messages=[{"role": "user", "content": prompt}],
|
221 |
temperature=0.8, # Параметры можно настроить по необходимости
|
222 |
-
max_tokens=
|
223 |
)
|
224 |
|
225 |
# Извлекаем и очищаем ответ
|
@@ -287,7 +287,7 @@ async def generate_message_mistral_check(prompt, max_retries=5):
|
|
287 |
try:
|
288 |
# Отправляем запрос к модели Qwen через Together API
|
289 |
response = await async_client.chat.completions.create(
|
290 |
-
model="
|
291 |
messages=[{"role": "user", "content": prompt}],
|
292 |
)
|
293 |
|
@@ -327,7 +327,7 @@ async def generate_message_mistral_check_2(prompt, max_retries=5):
|
|
327 |
try:
|
328 |
# Отправляем запрос к модели Qwen через Together API
|
329 |
response = await async_client.chat.completions.create(
|
330 |
-
model="
|
331 |
messages=[{"role": "user", "content": prompt}],
|
332 |
)
|
333 |
|
@@ -395,7 +395,7 @@ async def generate_message_mistral_check_3(prompt, max_retries=5):
|
|
395 |
try:
|
396 |
# Отправляем запрос к модели Qwen через Together API
|
397 |
response = await async_client.chat.completions.create(
|
398 |
-
model="
|
399 |
messages=[{"role": "user", "content": prompt}],
|
400 |
)
|
401 |
|
@@ -435,7 +435,7 @@ async def generate_message_mistral_check_4(prompt, max_retries=5):
|
|
435 |
try:
|
436 |
# Отправляем запрос к модели Qwen через Together API
|
437 |
response = await async_client.chat.completions.create(
|
438 |
-
model="
|
439 |
messages=[{"role": "user", "content": prompt}],
|
440 |
)
|
441 |
|
@@ -2040,4 +2040,4 @@ with gr.Blocks() as demo:
|
|
2040 |
outputs=[non_personalized_messages, personalized_messages]
|
2041 |
)
|
2042 |
|
2043 |
-
demo.launch()
|
|
|
216 |
try:
|
217 |
# Отправляем запрос к модели Qwen через Together API
|
218 |
response = await async_client.chat.completions.create(
|
219 |
+
model="Meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
220 |
messages=[{"role": "user", "content": prompt}],
|
221 |
temperature=0.8, # Параметры можно настроить по необходимости
|
222 |
+
max_tokens=76
|
223 |
)
|
224 |
|
225 |
# Извлекаем и очищаем ответ
|
|
|
287 |
try:
|
288 |
# Отправляем запрос к модели Qwen через Together API
|
289 |
response = await async_client.chat.completions.create(
|
290 |
+
model="Meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
291 |
messages=[{"role": "user", "content": prompt}],
|
292 |
)
|
293 |
|
|
|
327 |
try:
|
328 |
# Отправляем запрос к модели Qwen через Together API
|
329 |
response = await async_client.chat.completions.create(
|
330 |
+
model="Meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
331 |
messages=[{"role": "user", "content": prompt}],
|
332 |
)
|
333 |
|
|
|
395 |
try:
|
396 |
# Отправляем запрос к модели Qwen через Together API
|
397 |
response = await async_client.chat.completions.create(
|
398 |
+
model="Meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
399 |
messages=[{"role": "user", "content": prompt}],
|
400 |
)
|
401 |
|
|
|
435 |
try:
|
436 |
# Отправляем запрос к модели Qwen через Together API
|
437 |
response = await async_client.chat.completions.create(
|
438 |
+
model="Meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
439 |
messages=[{"role": "user", "content": prompt}],
|
440 |
)
|
441 |
|
|
|
2040 |
outputs=[non_personalized_messages, personalized_messages]
|
2041 |
)
|
2042 |
|
2043 |
+
demo.launch()
|