GuruCharan commited on
Commit
6b96a86
Β·
verified Β·
1 Parent(s): 5fe8310

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -0
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from diffusers import StableDiffusionPipeline
4
+ import torch
5
+ from transformers import MarianMTModel, MarianTokenizer, AutoModelForCausalLM, AutoTokenizer
6
+
7
+
8
+ # Load the pre-trained Stable Diffusion model for text-to-image
9
+ model_id = "CompVis/stable-diffusion-v1-4"
10
+ pipe = StableDiffusionPipeline.from_pretrained(model_id)
11
+ pipe = pipe.to("cuda") # Use GPU if available
12
+
13
+ # Load the MarianMT translation model
14
+ translation_model_name = "Helsinki-NLP/opus-mt-en-de" # English to German model
15
+ tokenizer = MarianTokenizer.from_pretrained(translation_model_name)
16
+ translation_model = MarianMTModel.from_pretrained(translation_model_name)
17
+
18
+ # Load the chatbot model (DialoGPT small model)
19
+ chatbot_model_name = "microsoft/DialoGPT-small"
20
+ chatbot_tokenizer = AutoTokenizer.from_pretrained(chatbot_model_name)
21
+ chatbot_model = AutoModelForCausalLM.from_pretrained(chatbot_model_name)
22
+
23
+ # Text-to-image function
24
+ def generate_image(prompt):
25
+ image = pipe(prompt).images[0]
26
+ return image
27
+
28
+ # Text translation function
29
+ def translate_text(text, src_lang, tgt_lang):
30
+ # Load the appropriate translation model based on selected languages
31
+ translation_model_name = f"Helsinki-NLP/opus-mt-{src_lang}-{tgt_lang}"
32
+ tokenizer = MarianTokenizer.from_pretrained(translation_model_name)
33
+ translation_model = MarianMTModel.from_pretrained(translation_model_name)
34
+
35
+ # Tokenize input text
36
+ inputs = tokenizer(text, return_tensors="pt", padding=True)
37
+
38
+ # Generate translation
39
+ translated = translation_model.generate(**inputs)
40
+
41
+ # Decode and return the translated text
42
+ translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
43
+ return translated_text
44
+
45
+ # Chatbot function (using DialoGPT)
46
+ chat_history = [] # To store the conversation history
47
+
48
+ def chatbot_response(user_input):
49
+ global chat_history
50
+
51
+ # Handle questions like "explain what's ai" or "what is ai"
52
+ if "explain what's ai" in user_input.lower() or "what is ai" in user_input.lower():
53
+ response = "AI, or Artificial Intelligence, is a branch of computer science that aims to create machines capable of intelligent behavior. This includes tasks like learning, problem-solving, and decision-making, traditionally performed by humans."
54
+ else:
55
+ # Tokenize user input
56
+ new_input_ids = chatbot_tokenizer.encode(user_input + chatbot_tokenizer.eos_token, return_tensors="pt")
57
+
58
+ # Append new user input to chat history
59
+ bot_input_ids = torch.cat([torch.tensor(chat_history), new_input_ids], dim=-1) if chat_history else new_input_ids
60
+
61
+ # Generate a response from the model
62
+ chat_history_ids = chatbot_model.generate(bot_input_ids, max_length=1000, pad_token_id=chatbot_tokenizer.eos_token_id)
63
+
64
+ # Decode the response
65
+ response = chatbot_tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
66
+
67
+ # Update chat history with the current conversation
68
+ chat_history.append(new_input_ids)
69
+
70
+ return response
71
+
72
+ # Create Gradio interface
73
+ with gr.Blocks() as interface:
74
+ gr.Markdown("# **The AI Playground** πŸ€–βœ¨")
75
+
76
+ with gr.Tab("Text-to-Image"):
77
+ gr.Markdown("## **Text-to-Image Wizard** πŸͺ„")
78
+ with gr.Row():
79
+ text_input = gr.Textbox(label="Imagine...", lines=2, placeholder="A whimsical robot dancing in a rainbow field...")
80
+ image_output = gr.Image(label="Behold!")
81
+ generate_button = gr.Button("Summon Image")
82
+ generate_button.click(fn=generate_image, inputs=text_input, outputs=image_output)
83
+
84
+ with gr.Tab("Text Translator"):
85
+ gr.Markdown("## **Global Translator** 🌍")
86
+ with gr.Row():
87
+ text_input_translate = gr.Textbox(label="Say something...", placeholder="Type here...")
88
+ src_lang = gr.Dropdown(label="From", choices=["en", "de", "fr", "es"], value="en")
89
+ tgt_lang = gr.Dropdown(label="To", choices=["en", "de", "fr", "es"], value="de")
90
+ translated_text = gr.Textbox(label="Translation...", lines=2)
91
+ translate_button = gr.Button("Translate!")
92
+ translate_button.click(fn=translate_text, inputs=[text_input_translate, src_lang, tgt_lang], outputs=translated_text)
93
+
94
+ with gr.Tab("Chatbot"):
95
+ gr.Markdown("## **Chat with the AI** πŸ’¬")
96
+ with gr.Row():
97
+ chatbot_input = gr.Textbox(label="Your Message", placeholder="Let's chat! Ask anything...")
98
+ chatbot_output = gr.Textbox(label="AI's Wisdom", lines=5)
99
+ chat_button = gr.Button("Send ✨")
100
+ chat_button.click(fn=chatbot_response, inputs=chatbot_input, outputs=chatbot_output)
101
+
102
+
103
+ # Launch the Gradio app
104
+ interface.launch(share=True)