Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -1,189 +1,224 @@
|
|
1 |
import os
|
2 |
import time
|
3 |
import threading
|
|
|
|
|
|
|
|
|
|
|
4 |
import requests
|
|
|
5 |
from telethon import TelegramClient, events
|
6 |
-
|
7 |
-
import
|
8 |
-
from
|
|
|
9 |
from pymongo import MongoClient
|
10 |
|
11 |
-
#
|
12 |
-
mongo_uri = os.getenv('MONGO_URI')
|
13 |
-
mongo_client = MongoClient(mongo_uri)
|
14 |
-
db = mongo_client['Scarlett']
|
15 |
-
user_histories_collection = db['chats']
|
16 |
-
|
17 |
def load_system_prompt():
|
18 |
with open('prompt.txt', 'r') as file:
|
19 |
return file.read()
|
20 |
|
21 |
system_prompt = load_system_prompt()
|
22 |
|
|
|
23 |
api_id = os.getenv('api_id')
|
24 |
api_hash = os.getenv('api_hash')
|
25 |
bot_token = os.getenv('bot_token')
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
|
|
|
|
|
|
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
-
def add(self, role: str, content: str):
|
37 |
-
user_history = user_histories_collection.find_one({'user_id': self.user_id})
|
38 |
-
if user_history:
|
39 |
-
messages = user_history['history']
|
40 |
-
else:
|
41 |
-
messages = []
|
42 |
-
|
43 |
-
messages.append({'role': role, 'content': content})
|
44 |
-
if len(messages) > self.size:
|
45 |
-
messages = messages[-self.size:]
|
46 |
-
|
47 |
-
user_histories_collection.update_one(
|
48 |
-
{'user_id': self.user_id},
|
49 |
-
{'$set': {'history': messages}},
|
50 |
-
upsert=True
|
51 |
-
)
|
52 |
-
|
53 |
-
def get_history(self):
|
54 |
-
user_history = user_histories_collection.find_one({'user_id': self.user_id})
|
55 |
-
if user_history:
|
56 |
-
return user_history['history']
|
57 |
-
return []
|
58 |
-
|
59 |
-
def reset(self):
|
60 |
-
user_histories_collection.delete_one({'user_id': self.user_id})
|
61 |
-
|
62 |
-
# Only store history for 3 users
|
63 |
-
user_histories = {}
|
64 |
-
|
65 |
-
def get_user_history(user_id):
|
66 |
-
if user_id not in user_histories:
|
67 |
-
user_histories[user_id] = MongoDBHistory(user_id)
|
68 |
-
return user_histories[user_id]
|
69 |
-
|
70 |
-
def fetch_image_as_u8(image_binary):
|
71 |
-
return list(image_binary)
|
72 |
-
|
73 |
-
async def get_completion(prompt: str, user_id, image_u8=None) -> str:
|
74 |
-
user_history = get_user_history(user_id)
|
75 |
-
|
76 |
-
# Prepare the message content
|
77 |
messages = [
|
78 |
-
{"role": "
|
79 |
-
*user_history.get_history(),
|
80 |
-
{"role": "user", "content": prompt}
|
81 |
]
|
82 |
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
"messages": messages,
|
87 |
-
"temperature": 0.5, # Adjust creativity
|
88 |
-
"top_p": 0.9, # Nucleus sampling parameter
|
89 |
-
"max_tokens": 512, # Maximum token limit
|
90 |
-
"frequency_penalty": 1.6, # Penalize repeated tokens
|
91 |
-
"presence_penalty": 1.9 # Encourage new topics
|
92 |
-
}
|
93 |
-
|
94 |
-
# If there is an image, include it as an array of unsigned 8-bit integers (u8)
|
95 |
-
if image_u8:
|
96 |
-
print(image_u8)
|
97 |
-
data["image"] = image_u8 # Send as array of integers
|
98 |
-
|
99 |
-
headers = {
|
100 |
-
"Authorization": f"Bearer {cloudflare_api_key}"
|
101 |
-
}
|
102 |
-
|
103 |
-
# Send request to Cloudflare API
|
104 |
-
request = requests.post(
|
105 |
-
url=cloudflare_base_url,
|
106 |
-
json=data,
|
107 |
-
headers=headers
|
108 |
-
)
|
109 |
-
response = request.json()
|
110 |
-
print(response)
|
111 |
-
|
112 |
-
if response['success'] == True:
|
113 |
-
message = response["result"]["response"]
|
114 |
-
else:
|
115 |
-
message = f"Error: {response['errors'][0]['message']}"
|
116 |
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
@client.on(events.NewMessage(pattern='/start'))
|
127 |
async def start(event):
|
128 |
-
await event.respond("Hello!")
|
129 |
|
130 |
@client.on(events.NewMessage(pattern='/help'))
|
131 |
async def help(event):
|
132 |
-
await event.respond("Here is how I can help you:\n/start - Start\n/help -
|
133 |
|
134 |
@client.on(events.NewMessage(pattern='/reset'))
|
135 |
async def reset(event):
|
136 |
-
|
137 |
-
|
|
|
|
|
138 |
await event.respond("History has been reset.")
|
139 |
|
140 |
@client.on(events.NewMessage)
|
141 |
async def handle_message(event):
|
142 |
-
if event.raw_text.startswith('/start') or event.raw_text.startswith('/help') or event.raw_text.startswith('/reset'):
|
143 |
-
return
|
144 |
try:
|
145 |
-
|
146 |
-
await asyncio.sleep(3)
|
147 |
|
148 |
-
user_id = event.sender_id
|
149 |
-
user_message = event.raw_text
|
150 |
-
|
151 |
-
# Check if the message contains an image
|
152 |
-
image_u8 = None
|
153 |
if event.photo:
|
154 |
-
|
155 |
-
|
|
|
|
|
|
|
156 |
|
157 |
-
|
158 |
-
|
159 |
|
|
|
|
|
160 |
except Exception as e:
|
161 |
print(f"An error occurred: {e}")
|
162 |
await event.respond("Whoopsie 🤭")
|
163 |
|
164 |
-
|
165 |
-
welcome_message = """
|
166 |
-
# Meet Scarlett!
|
167 |
-
- Scarlett is a girl residing in Telegram
|
168 |
-
- She can chat with you without any censorship.
|
169 |
-
- Head over to https://t.me/Scarlett_Rogue_isNotBot
|
170 |
-
"""
|
171 |
-
with gr.Blocks() as demo:
|
172 |
-
gr.Markdown(f"""
|
173 |
-
# Follow me: https://example.com
|
174 |
-
{welcome_message}
|
175 |
-
""")
|
176 |
-
demo.launch(show_api=False)
|
177 |
-
|
178 |
def keep_alive():
|
|
|
179 |
while True:
|
180 |
try:
|
181 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
except Exception as e:
|
183 |
print(f"Keep-alive request failed: {e}")
|
184 |
-
time.sleep(
|
185 |
|
186 |
if __name__ == "__main__":
|
187 |
-
threading.Thread(target=launch_gradio).start()
|
188 |
threading.Thread(target=keep_alive).start()
|
189 |
client.run_until_disconnected()
|
|
|
1 |
import os
|
2 |
import time
|
3 |
import threading
|
4 |
+
import base64
|
5 |
+
from io import BytesIO
|
6 |
+
from collections import OrderedDict
|
7 |
+
from datetime import datetime
|
8 |
+
|
9 |
import requests
|
10 |
+
from openai import OpenAI
|
11 |
from telethon import TelegramClient, events
|
12 |
+
from PIL import Image
|
13 |
+
from huggingface_hub import InferenceClient
|
14 |
+
from transformers import AutoProcessor, AutoTokenizer
|
15 |
+
import pymongo
|
16 |
from pymongo import MongoClient
|
17 |
|
18 |
+
# Load system prompt from a file
|
|
|
|
|
|
|
|
|
|
|
19 |
def load_system_prompt():
|
20 |
with open('prompt.txt', 'r') as file:
|
21 |
return file.read()
|
22 |
|
23 |
system_prompt = load_system_prompt()
|
24 |
|
25 |
+
# Environment variables
|
26 |
api_id = os.getenv('api_id')
|
27 |
api_hash = os.getenv('api_hash')
|
28 |
bot_token = os.getenv('bot_token')
|
29 |
+
openai_api_key = os.getenv('glhf')
|
30 |
+
ping_key = os.getenv('bolo')
|
31 |
+
api_url = os.getenv('yolo')
|
32 |
+
model = os.getenv('model')
|
33 |
+
model1 = os.getenv('model1')
|
34 |
+
model2 = os.getenv('model2')
|
35 |
+
mongoURI = os.getenv('MONGO_URI')
|
36 |
+
|
37 |
+
# Initialize OpenAI and MongoDB clients
|
38 |
+
openai_client = OpenAI(api_key=openai_api_key, base_url=api_url)
|
39 |
+
mongo_client = MongoClient(mongoURI)
|
40 |
+
db = mongo_client['Scarlett']
|
41 |
+
chat_collection = db['chats']
|
42 |
+
|
43 |
+
# Initialize Hugging Face models for image processing
|
44 |
+
idefics_processor = AutoProcessor.from_pretrained(model1)
|
45 |
+
idefics_client = InferenceClient(model2)
|
46 |
+
tokenizer = AutoTokenizer.from_pretrained(model1)
|
47 |
+
|
48 |
+
# Local cache for up to 5 users
|
49 |
+
local_chat_history = OrderedDict()
|
50 |
+
MAX_LOCAL_USERS = 5
|
51 |
+
|
52 |
+
# Retrieve chat history from MongoDB
|
53 |
+
def get_history_from_mongo(user_id):
|
54 |
+
result = chat_collection.find_one({"user_id": user_id})
|
55 |
+
return result.get("messages", []) if result else []
|
56 |
+
|
57 |
+
# Store message in MongoDB (limit to last 99 messages)
|
58 |
+
def store_message_in_mongo(user_id, role, content):
|
59 |
+
chat_collection.update_one(
|
60 |
+
{"user_id": user_id},
|
61 |
+
{
|
62 |
+
"$push": {
|
63 |
+
"messages": {
|
64 |
+
"$each": [{"role": role, "content": content}],
|
65 |
+
"$slice": -99
|
66 |
+
}
|
67 |
+
}
|
68 |
+
},
|
69 |
+
upsert=True
|
70 |
+
)
|
71 |
+
|
72 |
+
# Get chat history from local cache or MongoDB
|
73 |
+
def get_chat_history(user_id):
|
74 |
+
if user_id in local_chat_history:
|
75 |
+
local_chat_history.move_to_end(user_id) # Mark as most recently used
|
76 |
+
return local_chat_history[user_id]
|
77 |
+
|
78 |
+
# Load from MongoDB if not in local cache
|
79 |
+
history = get_history_from_mongo(user_id)
|
80 |
+
local_chat_history[user_id] = history
|
81 |
+
|
82 |
+
if len(local_chat_history) > MAX_LOCAL_USERS:
|
83 |
+
local_chat_history.popitem(last=False) # Remove LRU user
|
84 |
+
|
85 |
+
return history
|
86 |
|
87 |
+
# Update chat history (both local and MongoDB)
|
88 |
+
def update_chat_history(user_id, role, content):
|
89 |
+
if user_id not in local_chat_history:
|
90 |
+
local_chat_history[user_id] = get_history_from_mongo(user_id)
|
91 |
|
92 |
+
local_chat_history[user_id].append({"role": role, "content": content})
|
93 |
+
local_chat_history.move_to_end(user_id)
|
94 |
+
|
95 |
+
if len(local_chat_history) > MAX_LOCAL_USERS:
|
96 |
+
local_chat_history.popitem(last=False)
|
97 |
+
|
98 |
+
store_message_in_mongo(user_id, role, content)
|
99 |
+
|
100 |
+
# Encode image to base64
|
101 |
+
def encode_local_image(image):
|
102 |
+
pil_image = Image.open(image)
|
103 |
+
buffer = BytesIO()
|
104 |
+
pil_image.save(buffer, format="JPEG")
|
105 |
+
return f"data:image/jpeg;base64,{base64.b64encode(buffer.getvalue()).decode('utf-8')}"
|
106 |
+
|
107 |
+
# Describe image using the model
|
108 |
+
def describe_image(image_path):
|
109 |
+
image_string = encode_local_image(image_path)
|
110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
messages = [
|
112 |
+
{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": os.getenv('USER_PROMPT')}]}
|
|
|
|
|
113 |
]
|
114 |
|
115 |
+
prompt_with_template = idefics_processor.apply_chat_template(
|
116 |
+
messages, add_generation_prompt=True, chat_template=os.getenv('CHAT_TEMPLATE')
|
117 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
+
prompt_with_images = prompt_with_template.replace("<image>", f"")
|
120 |
+
|
121 |
+
payload = {
|
122 |
+
"inputs": prompt_with_images,
|
123 |
+
"parameters": {"return_full_text": False, "max_new_tokens": 2048},
|
124 |
+
}
|
125 |
+
|
126 |
+
response = idefics_client.post(json=payload).decode()
|
127 |
+
return response
|
128 |
+
|
129 |
+
# Telegram bot client
|
130 |
+
client = TelegramClient('bot', api_id, api_hash).start(bot_token=bot_token)
|
131 |
+
|
132 |
+
# Async function to get OpenAI completion
|
133 |
+
async def get_completion(event, user_id, prompt):
|
134 |
+
async with client.action(event.chat_id, 'typing'):
|
135 |
+
history = get_chat_history(user_id)
|
136 |
+
messages = [
|
137 |
+
{"role": "system", "content": system_prompt},
|
138 |
+
*history,
|
139 |
+
{"role": "user", "content": prompt},
|
140 |
+
]
|
141 |
|
142 |
+
try:
|
143 |
+
response = openai_client.chat.completions.create(
|
144 |
+
model=model,
|
145 |
+
messages=messages,
|
146 |
+
max_tokens=512,
|
147 |
+
temperature=0.5,
|
148 |
+
top_p=1.0,
|
149 |
+
frequency_penalty=0.9,
|
150 |
+
presence_penalty=0.9,
|
151 |
+
)
|
152 |
+
message = response.choices[0].message.content
|
153 |
+
except Exception as e:
|
154 |
+
message = f"Error: {str(e)}"
|
155 |
+
print(e)
|
156 |
+
|
157 |
+
update_chat_history(user_id, "user", prompt) # Update history
|
158 |
+
update_chat_history(user_id, "assistant", message) # Update assistant's response
|
159 |
+
return message
|
160 |
+
|
161 |
+
# Telegram bot events
|
162 |
@client.on(events.NewMessage(pattern='/start'))
|
163 |
async def start(event):
|
164 |
+
await event.respond("Hello! I am your chat assistant.")
|
165 |
|
166 |
@client.on(events.NewMessage(pattern='/help'))
|
167 |
async def help(event):
|
168 |
+
await event.respond("Here is how I can help you:\n/start - Start the bot\n/help - Get help\n/reset - Reset chat history")
|
169 |
|
170 |
@client.on(events.NewMessage(pattern='/reset'))
|
171 |
async def reset(event):
|
172 |
+
user_id = event.chat_id
|
173 |
+
chat_collection.delete_one({"user_id": user_id}) # Reset MongoDB chat history for the user
|
174 |
+
if user_id in local_chat_history:
|
175 |
+
del local_chat_history[user_id] # Remove from local cache if present
|
176 |
await event.respond("History has been reset.")
|
177 |
|
178 |
@client.on(events.NewMessage)
|
179 |
async def handle_message(event):
|
|
|
|
|
180 |
try:
|
181 |
+
user_id = event.chat_id # Use chat_id to distinguish between users
|
|
|
182 |
|
|
|
|
|
|
|
|
|
|
|
183 |
if event.photo:
|
184 |
+
photo = await event.download_media()
|
185 |
+
image_description = describe_image(photo)
|
186 |
+
user_message = f"{event.raw_text}\n\nContent of the image: {image_description}"
|
187 |
+
else:
|
188 |
+
user_message = event.raw_text
|
189 |
|
190 |
+
if user_message.startswith('/start') or user_message.startswith('/help') or user_message.startswith('/reset'):
|
191 |
+
return
|
192 |
|
193 |
+
response = await get_completion(event, user_id, user_message)
|
194 |
+
await event.respond(response)
|
195 |
except Exception as e:
|
196 |
print(f"An error occurred: {e}")
|
197 |
await event.respond("Whoopsie 🤭")
|
198 |
|
199 |
+
# Keep-alive function to keep the bot running
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
def keep_alive():
|
201 |
+
ping_client = OpenAI(api_key=ping_key, base_url=api_url)
|
202 |
while True:
|
203 |
try:
|
204 |
+
messages = [
|
205 |
+
{"role": "system", "content": "Be a helpful assistant."},
|
206 |
+
{"role": "user", "content": "Hello"}
|
207 |
+
]
|
208 |
+
request = ping_client.chat.completions.create(
|
209 |
+
model=model,
|
210 |
+
messages=messages,
|
211 |
+
max_tokens=10,
|
212 |
+
temperature=0.6,
|
213 |
+
top_p=0.9,
|
214 |
+
frequency_penalty=0.2,
|
215 |
+
presence_penalty=0.6,
|
216 |
+
)
|
217 |
+
print(request)
|
218 |
except Exception as e:
|
219 |
print(f"Keep-alive request failed: {e}")
|
220 |
+
time.sleep(1800) # Ping every 30 minutes
|
221 |
|
222 |
if __name__ == "__main__":
|
|
|
223 |
threading.Thread(target=keep_alive).start()
|
224 |
client.run_until_disconnected()
|