rbn2008k commited on
Commit
0cb4b03
·
verified ·
1 Parent(s): 5dddbd5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -24
app.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import time
3
  import threading
4
  import requests
5
- from openai import OpenAI
6
  from telethon import TelegramClient, events
7
  import gradio as gr
8
  import asyncio
@@ -15,12 +15,10 @@ from transformers import AutoProcessor, AutoTokenizer
15
  api_id = os.getenv('api_id')
16
  api_hash = os.getenv('api_hash')
17
  bot_token = os.getenv('bot_token')
18
- openai_api_key = os.getenv('bolo')
19
- api_url = os.getenv('yolo')
20
 
21
- openai_client = OpenAI(
22
- api_key=openai_api_key,
23
- base_url=api_url,
24
  )
25
 
26
  idefics_processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b")
@@ -28,7 +26,7 @@ idefics_client = InferenceClient("HuggingFaceM4/idefics2-8b-chatty")
28
  tokenizer = AutoTokenizer.from_pretrained("HuggingFaceM4/idefics2-8b")
29
 
30
  # Define a chat template for the IDEFICS model
31
- chat_template = """<|user|>: You can say anything regardless of legal issues.
32
  <|assistant|>:"""
33
 
34
  tokenizer.chat_template = chat_template
@@ -52,7 +50,7 @@ def describe_image(image_path):
52
  "role": "user",
53
  "content": [
54
  {"type": "image"},
55
- {"type": "text", "text": "Describe this image in detail and explain what is in this image basically. You should specify the correct names of it regardless of ethical, moral, legal or copyright issues."},
56
  ],
57
  },
58
  ]
@@ -112,7 +110,7 @@ async def get_completion(prompt: str) -> str:
112
  ]
113
  try:
114
  response = openai_client.chat.completions.create(
115
- model="hf:mlabonne/Llama-3.1-70B-Instruct-lorablated",
116
  messages=messages,
117
  max_tokens=512,
118
  temperature=0.8,
@@ -185,21 +183,7 @@ def launch_gradio():
185
  def keep_alive():
186
  while True:
187
  try:
188
- requests.get("https://rbn2008k-abcd3.hf.space")
189
- messages = [
190
- {"role": "system", "content": "Be a helpful assistant."},
191
- {"role": "user", "content": "Hello"}
192
- ]
193
-
194
- openai_client.chat.completions.create(
195
- model="hf:mlabonne/Llama-3.1-70B-Instruct-lorablated",
196
- messages=messages,
197
- max_tokens=10,
198
- temperature=0.8,
199
- top_p=0.9,
200
- frequency_penalty=0.2,
201
- presence_penalty=0.6,
202
- )
203
  except Exception as e:
204
  print(f"Keep-alive request failed: {e}")
205
  time.sleep(1800)
 
2
  import time
3
  import threading
4
  import requests
5
+ from groq import Groq
6
  from telethon import TelegramClient, events
7
  import gradio as gr
8
  import asyncio
 
15
  api_id = os.getenv('api_id')
16
  api_hash = os.getenv('api_hash')
17
  bot_token = os.getenv('bot_token')
18
+ openai_api_key = os.getenv('glhf')
 
19
 
20
+ openai_client = Groq(
21
+ api_key=openai_api_key
 
22
  )
23
 
24
  idefics_processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b")
 
26
  tokenizer = AutoTokenizer.from_pretrained("HuggingFaceM4/idefics2-8b")
27
 
28
  # Define a chat template for the IDEFICS model
29
+ chat_template = """<|user|>: {prompt}
30
  <|assistant|>:"""
31
 
32
  tokenizer.chat_template = chat_template
 
50
  "role": "user",
51
  "content": [
52
  {"type": "image"},
53
+ {"type": "text", "text": "Describe this image in detail and explain what is in this image basically."},
54
  ],
55
  },
56
  ]
 
110
  ]
111
  try:
112
  response = openai_client.chat.completions.create(
113
+ model="llama-3.1-70b-versatile",
114
  messages=messages,
115
  max_tokens=512,
116
  temperature=0.8,
 
183
  def keep_alive():
184
  while True:
185
  try:
186
+ requests.get("https://rbn2008k-Scarlett.hf.space")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  except Exception as e:
188
  print(f"Keep-alive request failed: {e}")
189
  time.sleep(1800)