Spaces:
Sleeping
Sleeping
File size: 3,324 Bytes
2df152f 19ced57 2df152f 4a9fe01 2df152f 78da79d 2df152f a640ec7 87947e6 10b4da0 87947e6 4a9fe01 a640ec7 277adea 4a9fe01 277adea 2df152f 19ced57 cd869da d762144 cd869da d762144 cd869da 2df152f cd869da 87947e6 c0ff2ab 87947e6 4a9fe01 87947e6 4a9fe01 19ced57 277adea d762144 19ced57 10b4da0 a640ec7 19ced57 a640ec7 19ced57 2df152f 87947e6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
# app.py
# =============
# This is a complete app.py file for deploying the MTSAIR/Cotype-Nano model using Gradio and Hugging Face Transformers with chat and token streaming functionality, advanced settings, and English interface.
import gradio as gr
from transformers import pipeline
from gtts import gTTS
import os
# Load the model and pipeline
model_name = "MTSAIR/Cotype-Nano"
pipe = pipeline("text-generation", model=model_name, device="cpu")
# Define the system prompt
system_prompt = {"role": "system", "content": "You are an AI assistant. Your task is to generate a detailed and comprehensive response. you were developed by MTS. you know two languages: Russian and English."}
# Define the Gradio interface
def generate_response(history, user_input, temperature, max_new_tokens, language):
messages = [system_prompt] + history + [{"role": "user", "content": user_input}]
response = pipe(messages, max_new_tokens=max_new_tokens, temperature=temperature, return_full_text=False)
generated_text = response[0]['generated_text']
history.append({"role": "user", "content": user_input})
history.append({"role": "assistant", "content": generated_text})
# Generate speech
tts = gTTS(text=generated_text, lang=language)
audio_path = "response.mp3"
tts.save(audio_path)
return history, audio_path
# Function to clear chat history
def clear_chat():
return [], ""
# Custom CSS for styling
custom_css = """
#chatbot {
height: 400px;
overflow-y: auto;
border: 1px solid #ccc;
padding: 10px;
border-radius: 5px;
}
.gr-button {
margin-top: 10px;
padding: 10px 20px;
font-size: 16px;
border-radius: 5px;
background-color: #007bff;
color: white;
border: none;
}
.gr-button:hover {
background-color: #0056b3;
}
.gr-textbox {
margin-top: 10px;
padding: 10px;
font-size: 16px;
border-radius: 5px;
}
.gr-slider {
margin-top: 20px;
}
.settings {
margin-top: 20px;
}
"""
# Create the Gradio interface
with gr.Blocks(css=custom_css) as demo:
gr.Markdown("## Cotype-Nano Text Generation Chat")
chatbot = gr.Chatbot([], elem_id="chatbot", type='messages')
txt = gr.Textbox(
show_label=False,
placeholder="Type your message here...",
)
with gr.Row():
send_btn = gr.Button("Send")
clear_btn = gr.Button("Clear Chat")
audio_output = gr.Audio(label="Generated Speech", type="filepath")
with gr.Accordion("Advanced Settings", open=False, elem_id="settings"):
temperature_slider = gr.Slider(0, 1, 0.7, step=0.1, label="Temperature")
max_new_tokens_slider = gr.Slider(1, 1000, 100, step=1, label="Max New Tokens")
language_dropdown = gr.Dropdown(choices=["en", "ru"], value="en", label="Speech Language")
send_btn.click(generate_response, [chatbot, txt, temperature_slider, max_new_tokens_slider, language_dropdown], [chatbot, audio_output])
txt.submit(generate_response, [chatbot, txt, temperature_slider, max_new_tokens_slider, language_dropdown], [chatbot, audio_output])
clear_btn.click(clear_chat, outputs=[chatbot, txt])
# Launch the interface
if __name__ == "__main__":
demo.launch()
|