xcv / app.py
Gargaz's picture
Update app.py
78fe226 verified
from flask import Flask, request, Response, render_template
from huggingface_hub import hf_hub_download
from llama_cpp import Llama
app = Flask(__name__)
# HTML content as a string
HTML_CONTENT = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=Material+Symbols+Rounded:opsz,wght,FILL,GRAD@20..48,100..700,0..1,-50..200" />
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=Poppins:wght@300&display=swap" rel="stylesheet">
<title>AI Chat Interface</title>
<style>
*{
padding: 0;
margin: 0;
font-family: 'Poppins', sans-serif;
box-sizing: border-box;
}
body {
overflow: hidden;
}
/* Hide scrollbar only for Webkit browsers (Chrome, Safari, Opera) */
::-webkit-scrollbar {
display: none;
}
body{
width: 100%;
height: 100vh;
background-color: #212121;
}
.chat{
display: flex;
gap: 20px;
padding: 25px;
color: #fff;
font-size: 15px;
font-weight: 300;
}
.chat img{
width: 35px;
height: 35px;
border-radius: 50px;
}
.response{
background-color: #212121;
}
.messagebar{
position: fixed;
bottom: 0;
height: 5rem;
width: 100%;
display: flex;
align-items: center;
justify-content: center;
background-color: #212121;
}
.messagebar .bar-wrapper{
background-color: #2f2f2f;
border-radius: 20px;
width: 70vw;
padding: 10px;
display: flex;
align-items: center;
justify-content: space-between;
}
.bar-wrapper input{
width: 100%;
padding: 5px;
border: none;
outline: none;
font-size: 14px;
background: none;
color: #ccc;
}
.bar-wrapper input::placeholder{
color: #ccc;
}
.messagebar button{
display: flex;
align-items: center;
justify-content: center;
background: none;
border: none;
color: #fff;
cursor: pointer;
}
.message-box{
height: calc(100vh - 5rem);
overflow-y: auto;
}
</style>
</head>
<body>
<div class="chatbox-wrapper">
<div class="message-box" id="chat-container">
<div class="chat response">
<img src="https://freelogopng.com/images/all_img/1681038800chatgpt-logo-black.png" alt="AI">
<span>Hello there! <br>
How can I help you today.
</span>
</div>
</div>
<div class="messagebar">
<div class="bar-wrapper">
<input type="text" id="user-input" placeholder="Enter your message...">
<button onclick="sendMessage()">
<span class="material-symbols-rounded">
send
</span>
</button>
</div>
</div>
</div>
<script>
const messageBar = document.querySelector("#user-input");
const sendBtn = document.querySelector(".bar-wrapper button");
const messageBox = document.querySelector("#chat-container");
function addMessage(message, isUser) {
const messageElement = document.createElement('div');
messageElement.classList.add('chat');
if (!isUser) messageElement.classList.add('response');
const imgElement = document.createElement('img');
imgElement.src = isUser ? "https://wallpaperaccess.com/full/1595920.jpg" : "https://freelogopng.com/images/all_img/1681038800chatgpt-logo-black.png";
imgElement.alt = isUser ? "User" : "AI";
const spanElement = document.createElement('span');
spanElement.textContent = message;
messageElement.appendChild(imgElement);
messageElement.appendChild(spanElement);
messageBox.appendChild(messageElement);
messageBox.scrollTop = messageBox.scrollHeight;
}
function sendMessage() {
const message = messageBar.value.trim();
if (message) {
addMessage(message, true);
messageBar.value = '';
const eventSource = new EventSource(`/chat?message=${encodeURIComponent(message)}`);
let aiResponse = '';
eventSource.onmessage = function(event) {
if (event.data === '[DONE]') {
eventSource.close();
} else {
aiResponse += event.data;
const aiMessageElement = document.querySelector('.chat.response:last-child span');
if (aiMessageElement) {
aiMessageElement.textContent = aiResponse;
} else {
addMessage(aiResponse, false);
}
}
};
eventSource.onerror = function(error) {
console.error('EventSource failed:', error);
eventSource.close();
};
}
}
messageBar.addEventListener('keypress', function(event) {
if (event.key === 'Enter') {
sendMessage();
}
});
</script>
</body>
</html>
'''
def download_model():
model_name = "lmstudio-community/gemma-2-2b-it-GGUF"
model_file = "gemma-2-2b-it-Q6_K.gguf"
return hf_hub_download(model_name, filename=model_file)
def initialize_model(model_path):
return Llama(
model_path=model_path,
n_ctx=4096,
n_threads=4,
n_gpu_layers=-1 # Use GPU if available
)
model_path = download_model()
llm = initialize_model(model_path)
system_prompt = (
"You are a normal AI assistant. Your mission is to help people and respond clearly and friendly."
)
chat_history = [{"role": "system", "content": system_prompt}]
@app.route('/')
def index():
return HTML_CONTENT
@app.route('/chat')
def chat():
global chat_history
user_message = request.args.get('message', '')
chat_history.append({"role": "user", "content": user_message})
full_prompt = "\n".join([f"{msg['role']}: {msg['content']}" for msg in chat_history])
full_prompt += "\nAssistant:"
def generate():
ai_response = ""
for token in llm(full_prompt, max_tokens=1000, stop=["User:"], stream=True):
chunk = token['choices'][0]['text']
if chunk:
ai_response += chunk
yield f"data: {chunk}\n\n"
chat_history.append({"role": "assistant", "content": ai_response.strip()})
if len(chat_history) > 10: # Limit history to last 10 messages
chat_history = chat_history[-10:]
yield "data: [DONE]\n\n"
return Response(generate(), content_type='text/event-stream')
if __name__ == '__main__':
app.run(debug=True, port=5000)