Genaiapp / app.py
GuruCharan
Create app.py
6b96a86 verified
import gradio as gr
from diffusers import StableDiffusionPipeline
import torch
from transformers import MarianMTModel, MarianTokenizer, AutoModelForCausalLM, AutoTokenizer
# Load the pre-trained Stable Diffusion model for text-to-image
model_id = "CompVis/stable-diffusion-v1-4"
pipe = StableDiffusionPipeline.from_pretrained(model_id)
pipe = pipe.to("cuda") # Use GPU if available
# Load the MarianMT translation model
translation_model_name = "Helsinki-NLP/opus-mt-en-de" # English to German model
tokenizer = MarianTokenizer.from_pretrained(translation_model_name)
translation_model = MarianMTModel.from_pretrained(translation_model_name)
# Load the chatbot model (DialoGPT small model)
chatbot_model_name = "microsoft/DialoGPT-small"
chatbot_tokenizer = AutoTokenizer.from_pretrained(chatbot_model_name)
chatbot_model = AutoModelForCausalLM.from_pretrained(chatbot_model_name)
# Text-to-image function
def generate_image(prompt):
image = pipe(prompt).images[0]
return image
# Text translation function
def translate_text(text, src_lang, tgt_lang):
# Load the appropriate translation model based on selected languages
translation_model_name = f"Helsinki-NLP/opus-mt-{src_lang}-{tgt_lang}"
tokenizer = MarianTokenizer.from_pretrained(translation_model_name)
translation_model = MarianMTModel.from_pretrained(translation_model_name)
# Tokenize input text
inputs = tokenizer(text, return_tensors="pt", padding=True)
# Generate translation
translated = translation_model.generate(**inputs)
# Decode and return the translated text
translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
return translated_text
# Chatbot function (using DialoGPT)
chat_history = [] # To store the conversation history
def chatbot_response(user_input):
global chat_history
# Handle questions like "explain what's ai" or "what is ai"
if "explain what's ai" in user_input.lower() or "what is ai" in user_input.lower():
response = "AI, or Artificial Intelligence, is a branch of computer science that aims to create machines capable of intelligent behavior. This includes tasks like learning, problem-solving, and decision-making, traditionally performed by humans."
else:
# Tokenize user input
new_input_ids = chatbot_tokenizer.encode(user_input + chatbot_tokenizer.eos_token, return_tensors="pt")
# Append new user input to chat history
bot_input_ids = torch.cat([torch.tensor(chat_history), new_input_ids], dim=-1) if chat_history else new_input_ids
# Generate a response from the model
chat_history_ids = chatbot_model.generate(bot_input_ids, max_length=1000, pad_token_id=chatbot_tokenizer.eos_token_id)
# Decode the response
response = chatbot_tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
# Update chat history with the current conversation
chat_history.append(new_input_ids)
return response
# Create Gradio interface
with gr.Blocks() as interface:
gr.Markdown("# **The AI Playground** πŸ€–βœ¨")
with gr.Tab("Text-to-Image"):
gr.Markdown("## **Text-to-Image Wizard** πŸͺ„")
with gr.Row():
text_input = gr.Textbox(label="Imagine...", lines=2, placeholder="A whimsical robot dancing in a rainbow field...")
image_output = gr.Image(label="Behold!")
generate_button = gr.Button("Summon Image")
generate_button.click(fn=generate_image, inputs=text_input, outputs=image_output)
with gr.Tab("Text Translator"):
gr.Markdown("## **Global Translator** 🌍")
with gr.Row():
text_input_translate = gr.Textbox(label="Say something...", placeholder="Type here...")
src_lang = gr.Dropdown(label="From", choices=["en", "de", "fr", "es"], value="en")
tgt_lang = gr.Dropdown(label="To", choices=["en", "de", "fr", "es"], value="de")
translated_text = gr.Textbox(label="Translation...", lines=2)
translate_button = gr.Button("Translate!")
translate_button.click(fn=translate_text, inputs=[text_input_translate, src_lang, tgt_lang], outputs=translated_text)
with gr.Tab("Chatbot"):
gr.Markdown("## **Chat with the AI** πŸ’¬")
with gr.Row():
chatbot_input = gr.Textbox(label="Your Message", placeholder="Let's chat! Ask anything...")
chatbot_output = gr.Textbox(label="AI's Wisdom", lines=5)
chat_button = gr.Button("Send ✨")
chat_button.click(fn=chatbot_response, inputs=chatbot_input, outputs=chatbot_output)
# Launch the Gradio app
interface.launch(share=True)