Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -9,12 +9,10 @@ import asyncio
|
|
9 |
from PIL import Image
|
10 |
import base64
|
11 |
from io import BytesIO
|
12 |
-
from huggingface_hub import InferenceClient
|
13 |
-
from transformers import AutoProcessor, AutoTokenizer
|
14 |
|
15 |
def load_system_prompt():
|
16 |
-
|
17 |
-
|
18 |
|
19 |
system_prompt = load_system_prompt()
|
20 |
|
@@ -25,178 +23,173 @@ openai_api_key = os.getenv('glhf')
|
|
25 |
yolo = os.getenv('yolo')
|
26 |
|
27 |
openai_client = OpenAI(
|
28 |
-
|
29 |
-
|
30 |
)
|
31 |
|
32 |
-
idefics_processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b")
|
33 |
-
idefics_client = InferenceClient("HuggingFaceM4/idefics2-8b-chatty")
|
34 |
-
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceM4/idefics2-8b")
|
35 |
-
|
36 |
-
chat_template = """<|user|>: Describe this image at its finest, mentioning the exact names of the objects present in it.
|
37 |
-
<|assistant|>:"""
|
38 |
-
|
39 |
-
tokenizer.chat_template = chat_template
|
40 |
-
|
41 |
def encode_local_image(image):
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
def describe_image(image_path):
|
49 |
-
image_string = encode_local_image(image_path)
|
50 |
-
messages = [
|
51 |
-
{
|
52 |
-
"role": "user",
|
53 |
-
"content": [
|
54 |
-
{"type": "image"},
|
55 |
-
{"type": "text", "text": "Describe this image in detail and explain what is in this image basically."},
|
56 |
-
],
|
57 |
-
},
|
58 |
-
]
|
59 |
-
prompt_with_template = idefics_processor.apply_chat_template(
|
60 |
-
messages, add_generation_prompt=True
|
61 |
-
)
|
62 |
-
prompt_with_images = prompt_with_template.replace("<image>", " ").format(image_string)
|
63 |
-
payload = {
|
64 |
-
"inputs": prompt_with_images,
|
65 |
-
"parameters": {
|
66 |
-
"return_full_text": False,
|
67 |
-
"max_new_tokens": 2048,
|
68 |
-
},
|
69 |
-
}
|
70 |
-
response = idefics_client.post(json=payload).decode()
|
71 |
-
return response
|
72 |
|
73 |
client = TelegramClient('bot', api_id, api_hash).start(bot_token=bot_token)
|
74 |
|
75 |
class CircularBuffer:
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
101 |
user_histories = {}
|
102 |
|
103 |
def get_user_history(user_id):
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
@client.on(events.NewMessage(pattern='/start'))
|
138 |
async def start(event):
|
139 |
-
|
140 |
|
141 |
@client.on(events.NewMessage(pattern='/help'))
|
142 |
async def help(event):
|
143 |
-
|
144 |
|
145 |
@client.on(events.NewMessage(pattern='/reset'))
|
146 |
async def reset(event):
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
|
151 |
@client.on(events.NewMessage)
|
152 |
async def handle_message(event):
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
|
159 |
-
|
160 |
-
|
|
|
161 |
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
user_message = event.raw_text
|
168 |
|
169 |
-
|
170 |
-
|
171 |
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
|
176 |
def launch_gradio():
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
|
190 |
def keep_alive():
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
|
199 |
if __name__ == "__main__":
|
200 |
-
|
201 |
-
|
202 |
-
|
|
|
9 |
from PIL import Image
|
10 |
import base64
|
11 |
from io import BytesIO
|
|
|
|
|
12 |
|
13 |
def load_system_prompt():
|
14 |
+
with open('prompt.txt', 'r') as file:
|
15 |
+
return file.read()
|
16 |
|
17 |
system_prompt = load_system_prompt()
|
18 |
|
|
|
23 |
yolo = os.getenv('yolo')
|
24 |
|
25 |
openai_client = OpenAI(
|
26 |
+
base_url=f"https://api.cloudflare.com/client/v4/accounts/{yolo}/ai/v1",
|
27 |
+
api_key=openai_api_key,
|
28 |
)
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
def encode_local_image(image):
|
31 |
+
pil_image = Image.open(image)
|
32 |
+
buffer = BytesIO()
|
33 |
+
pil_image.save(buffer, format="JPEG")
|
34 |
+
base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
|
35 |
+
return f"data:image/jpeg;base64,{base64_image}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
client = TelegramClient('bot', api_id, api_hash).start(bot_token=bot_token)
|
38 |
|
39 |
class CircularBuffer:
|
40 |
+
def __init__(self, size: int):
|
41 |
+
self.size = size
|
42 |
+
self.buffer = [None] * size
|
43 |
+
self.start = 0
|
44 |
+
self.end = 0
|
45 |
+
|
46 |
+
def add(self, role: str, content: str):
|
47 |
+
self.buffer[self.end] = {'role': role, 'content': content}
|
48 |
+
self.end = (self.end + 1) % self.size
|
49 |
+
if self.end == self.start:
|
50 |
+
self.start = (self.start + 1) % self.size
|
51 |
+
|
52 |
+
def get_history(self):
|
53 |
+
history = []
|
54 |
+
i = self.start
|
55 |
+
while i != self.end:
|
56 |
+
history.append(self.buffer[i])
|
57 |
+
i = (i + 1) % self.size
|
58 |
+
return history
|
59 |
+
|
60 |
+
def reset(self):
|
61 |
+
self.buffer = [None] * self.size
|
62 |
+
self.start = 0
|
63 |
+
self.end = 0
|
64 |
+
|
65 |
+
# Only store history for 3 users
|
66 |
user_histories = {}
|
67 |
|
68 |
def get_user_history(user_id):
|
69 |
+
if user_id not in user_histories:
|
70 |
+
# If more than 3 users, remove the oldest one
|
71 |
+
if len(user_histories) >= 3:
|
72 |
+
oldest_user_id = list(user_histories.keys())[0]
|
73 |
+
del user_histories[oldest_user_id]
|
74 |
+
user_histories[user_id] = CircularBuffer(99)
|
75 |
+
return user_histories[user_id]
|
76 |
+
|
77 |
+
async def fetch_telegram_history(user_id):
|
78 |
+
messages = await client.get_messages(user_id, limit=50) # Fetch the last 10 messages
|
79 |
+
user_history = get_user_history(user_id)
|
80 |
+
for message in messages:
|
81 |
+
role = 'user' if message.sender_id == user_id else 'assistant'
|
82 |
+
user_history.add(role, message.text)
|
83 |
+
|
84 |
+
async def get_completion(prompt: str, user_id, image_base64=None) -> str:
|
85 |
+
user_history = get_user_history(user_id)
|
86 |
+
|
87 |
+
# If the user has no history, fetch from Telegram
|
88 |
+
if not user_history.get_history():
|
89 |
+
await fetch_telegram_history(user_id)
|
90 |
+
|
91 |
+
# Prepare message content
|
92 |
+
if image_base64:
|
93 |
+
user_message = [
|
94 |
+
{"type": "text", "text": prompt},
|
95 |
+
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_base64}"}}
|
96 |
+
]
|
97 |
+
else:
|
98 |
+
user_message = [{"type": "text", "text": prompt}]
|
99 |
+
|
100 |
+
messages = [
|
101 |
+
{"role": "system", "content": system_prompt},
|
102 |
+
*user_history.get_history(),
|
103 |
+
{"role": "user", "content": user_message},
|
104 |
+
]
|
105 |
+
|
106 |
+
try:
|
107 |
+
completion = openai_client.chat.completions.create(
|
108 |
+
model="@cf/meta/llama-3.2-11b-vision-instruct",
|
109 |
+
messages=messages,
|
110 |
+
stream=True,
|
111 |
+
temperature=0.8,
|
112 |
+
top_p=0.9,
|
113 |
+
max_tokens=4096,
|
114 |
+
frequency_penalty=0.2,
|
115 |
+
presence_penalty=0.6
|
116 |
+
)
|
117 |
+
message = ''
|
118 |
+
for chunk in completion:
|
119 |
+
if chunk.choices[0].delta.content is not None:
|
120 |
+
message += chunk.choices[0].delta.content
|
121 |
+
except Exception as e:
|
122 |
+
message = f"Error: {str(e)}"
|
123 |
+
print(e)
|
124 |
+
|
125 |
+
# Add the user prompt and assistant response to the history
|
126 |
+
user_history.add("user", prompt)
|
127 |
+
user_history.add("assistant", message)
|
128 |
+
return message
|
129 |
|
130 |
@client.on(events.NewMessage(pattern='/start'))
|
131 |
async def start(event):
|
132 |
+
await event.respond("Hello! I am your boo.")
|
133 |
|
134 |
@client.on(events.NewMessage(pattern='/help'))
|
135 |
async def help(event):
|
136 |
+
await event.respond("Here is how I can help you:\n/start - Start the bot\n/help - Get help\n/reset - Reset chat history")
|
137 |
|
138 |
@client.on(events.NewMessage(pattern='/reset'))
|
139 |
async def reset(event):
|
140 |
+
user_history = get_user_history(event.sender_id)
|
141 |
+
user_history.reset()
|
142 |
+
await event.respond("History has been reset.")
|
143 |
|
144 |
@client.on(events.NewMessage)
|
145 |
async def handle_message(event):
|
146 |
+
if event.raw_text.startswith('/start') or event.raw_text.startswith('/help') or event.raw_text.startswith('/reset'):
|
147 |
+
return
|
148 |
+
try:
|
149 |
+
async with client.action(event.chat_id, 'typing'):
|
150 |
+
await asyncio.sleep(3)
|
151 |
|
152 |
+
user_id = event.sender_id
|
153 |
+
user_history = get_user_history(user_id)
|
154 |
+
user_message = event.raw_text
|
155 |
|
156 |
+
# Check if the message contains an image
|
157 |
+
image_base64 = None
|
158 |
+
if event.photo:
|
159 |
+
image = await event.download_media(file=BytesIO())
|
160 |
+
image_base64 = encode_local_image(image)
|
|
|
161 |
|
162 |
+
response = await get_completion(user_message, user_id, image_base64)
|
163 |
+
await event.respond(response)
|
164 |
|
165 |
+
except Exception as e:
|
166 |
+
print(f"An error occurred: {e}")
|
167 |
+
await event.respond("Whoopsie 🤭")
|
168 |
|
169 |
def launch_gradio():
|
170 |
+
welcome_message = """
|
171 |
+
# Meet Scarlett!
|
172 |
+
- Scarlett is a girl residing in Telegram
|
173 |
+
- She can chat with you without any censorship.
|
174 |
+
- Head over to https://t.me/Scarlett_Rogue_isNotBot
|
175 |
+
"""
|
176 |
+
with gr.Blocks() as demo:
|
177 |
+
gr.Markdown(f"""
|
178 |
+
# Follow me: https://example.com
|
179 |
+
{welcome_message}
|
180 |
+
""")
|
181 |
+
demo.launch(show_api=False)
|
182 |
|
183 |
def keep_alive():
|
184 |
+
while True:
|
185 |
+
try:
|
186 |
+
requests.get("https://rbn2008k-Scarlett.hf.space")
|
187 |
+
print("Ping success")
|
188 |
+
except Exception as e:
|
189 |
+
print(f"Keep-alive request failed: {e}")
|
190 |
+
time.sleep(180)
|
191 |
|
192 |
if __name__ == "__main__":
|
193 |
+
threading.Thread(target=launch_gradio).start()
|
194 |
+
threading.Thread(target=keep_alive).start()
|
195 |
+
client.run_until_disconnected()
|