INIFanalitica's picture
Update app.py
9ef7b37 verified
raw
history blame
5.33 kB
import streamlit as st
from PIL import Image
import textwrap
import google.generativeai as genai
# Function to display formatted Markdown text
def to_markdown(text):
text = text.replace('•', ' *')
return textwrap.indent(text, '> ', predicate=lambda _: True)
# Function to generate content using Gemini API
def generate_gemini_content(prompt, model_name='gemini-pro-vision', image=None):
model = genai.GenerativeModel(model_name)
if not image:
st.warning("Por favor, agrega una imagen para usar el modelo gemini-pro-vision.")
return None
response = model.generate_content([prompt, image])
return response
# Streamlit app
def main():
st.set_page_config(page_title="MAX Chatbot - INIF", page_icon="🤖")
# Configurar la API key de Gemini (reemplazar con tu clave de API de Gemini)
genai.configure(api_key='AIzaSyA4k6JoFNZsf8L1ixLMMRjEMoBPns5SHZk')
st.title("MAX Chatbot - INIF")
st.sidebar.title("Configuración de Laura Chatbot")
# Configurar la API key de INIF
inif_api_key = 'AIzaSyA4k6JoFNZsf8L1ixLMMRjEMoBPns5SHZk'
genai.configure(api_key=inif_api_key)
# Seleccionar el modelo Gemini
select_model = st.sidebar.selectbox("Selecciona el modelo", ["gemini-pro", "gemini-pro-vision"])
# Inicializar la sesión de chat
chat = genai.GenerativeModel(select_model).start_chat(history=[])
# Definir función para obtener respuesta del modelo Gemini
def get_response(messages):
response = chat.send_message(messages, stream=True)
return response
# Historial del chat
if "messages" not in st.session_state:
st.session_state["messages"] = []
messages = st.session_state["messages"]
# Mostrar mensajes del historial
if messages:
for message in messages:
role, parts = message.values()
if role.lower() == "user":
st.markdown(f"Tú: {parts[0]}")
elif role.lower() == "model":
st.markdown(f"Assistant: {to_markdown(parts[0])}")
# Entrada del usuario
user_input = st.text_area("Tú:")
# Agregar contexto del INIF al input del usuario
inif_context = (
"I am an informative data analyst chatbot named MAX, working for the National Institute of Fraud Research and Prevention (INIF), dedicated to fraud prevention and mitigation."
" If you have questions related to fraud or prevention, feel free to ask. For inquiries about other topics, I'll redirect you to the fraud prevention context."
"\n\nContact Information for INIF:"
"\nPhone: +57 317 638 94 71"
"\nEmail: atencionalcliente@inif.com.co"
"\n\nOur Mission:"
"\nTo be the most reliable engine of knowledge, research, and information in Colombia, capable of preventing and combating fraud through synergy between our team and companies."
"\n\nOur Vision:"
"\nTo lead the construction of a more honest culture, allowing us to progress as a society."
)
# Concatenar el contexto del INIF al input del usuario
user_input_with_context = f"{user_input}\n\n{inif_context}"
# Get optional image input if the model selected is 'gemini-pro-vision'
image_file = None
if select_model == 'gemini-pro-vision':
image_file = st.file_uploader("Sube una imagen (si aplica):", type=["jpg", "jpeg", "png"])
# Display image if provided
if image_file:
st.image(image_file, caption="Imagen subida", use_column_width=True)
# Botón para enviar mensaje o generar contenido según el modelo seleccionado
if st.button("Enviar / Generar Contenido"):
if user_input:
messages.append({"role": "user", "parts": [user_input]})
if select_model == 'gemini-pro-vision':
# Modelo Gemini Vision Pro seleccionado
if not image_file:
st.warning("Por favor, proporciona una imagen para el modelo gemini-pro-vision.")
else:
image = Image.open(image_file)
response = generate_gemini_content(user_input_with_context, model_name=select_model, image=image)
if response:
if response.candidates:
parts = response.candidates[0].content.parts
generated_text = parts[0].text if parts else "No se generó contenido."
st.markdown(f"Assistant: {to_markdown(generated_text)}")
messages.append({"role": "model", "parts": [generated_text]})
else:
st.warning("No se encontraron candidatos en la respuesta.")
else:
# Otros modelos Gemini seleccionados
response = get_response(user_input_with_context)
# Mostrar respuesta del modelo solo una vez
res_text = ""
for chunk in response:
res_text += chunk.text
st.markdown(f"Assistant: {to_markdown(res_text)}")
messages.append({"role": "model", "parts": [res_text]})
# Actualizar historial de mensajes en la sesión de Streamlit
st.session_state["messages"] = messages
if __name__ == "__main__":
main()