gm-chat / app.py
Tomoniai's picture
Update app.py
0d792db
raw
history blame
3.32 kB
# import required packages
import google.generativeai as genai
import os
import PIL.Image
import gradio as gr
from gradio_multimodalchatbot import MultimodalChatbot
from gradio.data_classes import FileData
# Fetch an environment variable.
GG_API_KEY = os.environ.get('GG_API_KEY')
oaiusr = os.environ.get("OAI_USR")
oaipwd = os.environ.get("OAI_PWD")
genai.configure(api_key=GG_API_KEY)
model = genai.GenerativeModel('gemini-pro')
modelvis = genai.GenerativeModel('gemini-pro-vision')
def gemini(input, file, chatbot=[]):
messages = []
print(chatbot)
# Process previous chatbot messages if present
if len(chatbot) != 0:
for user, bot in chatbot:
user, bot = user.text, bot.text
messages.extend([
{'role': 'user', 'parts': [user]},
{'role': 'model', 'parts': [bot]}
])
messages.append({'role': 'user', 'parts': [input]})
else:
messages.append({'role': 'user', 'parts': [input]})
try:
# Process image if file is provided
if file is not None:
with PIL.Image.open(file.name) as img:
message = [{'role': 'user', 'parts': [input, img]}]
response = modelvis.generate_content(message)
gemini_video_resp = response.text
messages.append({'role': 'model', 'parts': [gemini_video_resp]})
# Construct list of messages in the required format
user_msg = {"text": input, "files": [{"file": FileData(path=file.name)}]}
bot_msg = {"text": gemini_video_resp, "files": []}
chatbot.append([user_msg, bot_msg])
else:
response = model.generate_content(messages)
gemini_resp = response.text
# Construct list of messages in the required format
user_msg = {"text": input, "files": []}
bot_msg = {"text": gemini_resp, "files": []}
chatbot.append([user_msg, bot_msg])
except Exception as e:
# Handling exceptions and raising error to the modal
print(f"An error occurred: {e}")
raise gr.Error(e)
return chatbot, "", None
# Define the Gradio Blocks interface
with gr.Blocks() as demo:
# Add a centered header using HTML
gr.HTML("<center><h1>Tomoniai's Gemini-PRO Chat</h1></center>")
# Initialize the MultimodalChatbot component
multi = MultimodalChatbot(value=[], height=500, avatar_images=["./user.png", "./botg.png"], show_label=False, bubble_full_width=False, show_copy_button=True, likeable=True)
with gr.Row():
# Textbox for user input with increased scale for better visibility
tb = gr.Textbox(show_label=False, scale=4)
# Upload button for image files
up = gr.UploadButton("Upload Image", file_types=["image"], scale=1)
# Define the behavior on text submission
tb.submit(gemini, [tb, up, multi], [multi, tb, up])
# Define the behavior on image upload
# Using chained then() calls to update the upload button's state
up.upload(lambda: gr.UploadButton("Uploading Image..."), [], up) \
.then(lambda: gr.UploadButton("Image Uploaded"), [], up) \
.then(lambda: gr.UploadButton("Upload Image"), [], up)
demo.queue().launch(auth=(oaiusr, oaipwd),show_api=False)