Spaces:
Sleeping
Sleeping
INIFanalitica
commited on
Commit
•
a345bb6
1
Parent(s):
d5fe3be
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
import textwrap
|
4 |
+
import google.generativeai as genai
|
5 |
+
|
6 |
+
# Function to display formatted Markdown text
|
7 |
+
def to_markdown(text):
|
8 |
+
text = text.replace('•', ' *')
|
9 |
+
return textwrap.indent(text, '> ', predicate=lambda _: True)
|
10 |
+
|
11 |
+
# Function to generate content using Gemini API
|
12 |
+
def generate_gemini_content(prompt, model_name='gemini-pro-vision', image=None):
|
13 |
+
model = genai.GenerativeModel(model_name)
|
14 |
+
if not image:
|
15 |
+
st.warning("Por favor, agrega una imagen para usar el modelo gemini-pro-vision.")
|
16 |
+
return None
|
17 |
+
|
18 |
+
response = model.generate_content([prompt, image])
|
19 |
+
return response
|
20 |
+
|
21 |
+
# Streamlit app
|
22 |
+
def main():
|
23 |
+
st.set_page_config(page_title="Gemini Chatbot", page_icon="🤖")
|
24 |
+
st.title("Gemini Chatbot")
|
25 |
+
st.sidebar.title("Configuración de Gemini")
|
26 |
+
|
27 |
+
# Configurar la API key de Gemini (reemplazar con tu clave de API de Gemini)
|
28 |
+
genai.configure(api_key='TU_CLAVE_API_DE_GEMINI')
|
29 |
+
|
30 |
+
# Seleccionar el modelo Gemini
|
31 |
+
select_model = st.sidebar.selectbox("Selecciona el modelo", ["gemini-pro", "gemini-pro-vision"])
|
32 |
+
|
33 |
+
# Inicializar la sesión de chat
|
34 |
+
chat = genai.GenerativeModel(select_model).start_chat(history=[])
|
35 |
+
|
36 |
+
# Definir función para obtener respuesta del modelo Gemini
|
37 |
+
def get_response(messages):
|
38 |
+
response = chat.send_message(messages, stream=True)
|
39 |
+
return response
|
40 |
+
|
41 |
+
# Historial del chat
|
42 |
+
if "messages" not in st.session_state:
|
43 |
+
st.session_state["messages"] = []
|
44 |
+
|
45 |
+
messages = st.session_state["messages"]
|
46 |
+
|
47 |
+
# Mostrar mensajes del historial
|
48 |
+
if messages:
|
49 |
+
for message in messages:
|
50 |
+
role, parts = message.values()
|
51 |
+
if role.lower() == "user":
|
52 |
+
st.markdown(f"Tú: {parts[0]}")
|
53 |
+
elif role.lower() == "model":
|
54 |
+
st.markdown(f"Assistant: {to_markdown(parts[0])}")
|
55 |
+
|
56 |
+
# Entrada del usuario
|
57 |
+
user_input = st.text_area("Tú:")
|
58 |
+
|
59 |
+
# Get optional image input if the model selected is 'gemini-pro-vision'
|
60 |
+
image_file = None
|
61 |
+
if select_model == 'gemini-pro-vision':
|
62 |
+
image_file = st.file_uploader("Sube una imagen (si aplica):", type=["jpg", "jpeg", "png"])
|
63 |
+
|
64 |
+
# Display image if provided
|
65 |
+
if image_file:
|
66 |
+
st.image(image_file, caption="Imagen subida", use_column_width=True)
|
67 |
+
|
68 |
+
# Botón para enviar mensaje o generar contenido según el modelo seleccionado
|
69 |
+
if st.button("Enviar / Generar Contenido"):
|
70 |
+
if user_input:
|
71 |
+
messages.append({"role": "user", "parts": [user_input]})
|
72 |
+
if select_model == 'gemini-pro-vision':
|
73 |
+
# Modelo Gemini Vision Pro seleccionado
|
74 |
+
if not image_file:
|
75 |
+
st.warning("Por favor, proporciona una imagen para el modelo gemini-pro-vision.")
|
76 |
+
else:
|
77 |
+
image = Image.open(image_file)
|
78 |
+
response = generate_gemini_content(user_input, model_name=select_model, image=image)
|
79 |
+
if response:
|
80 |
+
if response.candidates:
|
81 |
+
parts = response.candidates[0].content.parts
|
82 |
+
generated_text = parts[0].text if parts else "No se generó contenido."
|
83 |
+
st.markdown(f"Assistant: {to_markdown(generated_text)}")
|
84 |
+
messages.append({"role": "model", "parts": [generated_text]})
|
85 |
+
else:
|
86 |
+
st.warning("No se encontraron candidatos en la respuesta.")
|
87 |
+
else:
|
88 |
+
# Otros modelos Gemini seleccionados
|
89 |
+
response = get_response(user_input)
|
90 |
+
|
91 |
+
# Mostrar respuesta del modelo solo una vez
|
92 |
+
res_text = ""
|
93 |
+
for chunk in response:
|
94 |
+
res_text += chunk.text
|
95 |
+
st.markdown(f"Assistant: {to_markdown(res_text)}")
|
96 |
+
messages.append({"role": "model", "parts": [res_text]})
|
97 |
+
|
98 |
+
# Actualizar historial de mensajes en la sesión de Streamlit
|
99 |
+
st.session_state["messages"] = messages
|
100 |
+
|
101 |
+
if __name__ == "__main__":
|
102 |
+
main()
|