Spaces:
Sleeping
Sleeping
INIFanalitica
commited on
Commit
•
bc7b759
1
Parent(s):
482380d
Update app.py
Browse files
app.py
CHANGED
@@ -20,12 +20,17 @@ def generate_gemini_content(prompt, model_name='gemini-pro-vision', image=None):
|
|
20 |
|
21 |
# Streamlit app
|
22 |
def main():
|
23 |
-
st.set_page_config(page_title="
|
24 |
-
st.title("Gemini Chatbot")
|
25 |
-
st.sidebar.title("Configuración de Gemini")
|
26 |
|
27 |
# Configurar la API key de Gemini (reemplazar con tu clave de API de Gemini)
|
28 |
-
genai.configure(api_key='
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
# Seleccionar el modelo Gemini
|
31 |
select_model = st.sidebar.selectbox("Selecciona el modelo", ["gemini-pro", "gemini-pro-vision"])
|
@@ -56,6 +61,22 @@ def main():
|
|
56 |
# Entrada del usuario
|
57 |
user_input = st.text_area("Tú:")
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
# Get optional image input if the model selected is 'gemini-pro-vision'
|
60 |
image_file = None
|
61 |
if select_model == 'gemini-pro-vision':
|
@@ -75,7 +96,7 @@ def main():
|
|
75 |
st.warning("Por favor, proporciona una imagen para el modelo gemini-pro-vision.")
|
76 |
else:
|
77 |
image = Image.open(image_file)
|
78 |
-
response = generate_gemini_content(
|
79 |
if response:
|
80 |
if response.candidates:
|
81 |
parts = response.candidates[0].content.parts
|
@@ -86,7 +107,7 @@ def main():
|
|
86 |
st.warning("No se encontraron candidatos en la respuesta.")
|
87 |
else:
|
88 |
# Otros modelos Gemini seleccionados
|
89 |
-
response = get_response(
|
90 |
|
91 |
# Mostrar respuesta del modelo solo una vez
|
92 |
res_text = ""
|
|
|
20 |
|
21 |
# Streamlit app
|
22 |
def main():
|
23 |
+
st.set_page_config(page_title="Laura Chatbot - INIF", page_icon="🤖")
|
|
|
|
|
24 |
|
25 |
# Configurar la API key de Gemini (reemplazar con tu clave de API de Gemini)
|
26 |
+
genai.configure(api_key='AIzaSyA4k6JoFNZsf8L1ixLMMRjEMoBPns5SHZk')
|
27 |
+
|
28 |
+
st.title("Laura Chatbot - INIF")
|
29 |
+
st.sidebar.title("Configuración de Laura Chatbot")
|
30 |
+
|
31 |
+
# Configurar la API key de INIF
|
32 |
+
inif_api_key = 'AIzaSyA4k6JoFNZsf8L1ixLMMRjEMoBPns5SHZk'
|
33 |
+
genai.configure(api_key=inif_api_key)
|
34 |
|
35 |
# Seleccionar el modelo Gemini
|
36 |
select_model = st.sidebar.selectbox("Selecciona el modelo", ["gemini-pro", "gemini-pro-vision"])
|
|
|
61 |
# Entrada del usuario
|
62 |
user_input = st.text_area("Tú:")
|
63 |
|
64 |
+
# Agregar contexto del INIF al input del usuario
|
65 |
+
inif_context = (
|
66 |
+
"I am an informative data analyst chatbot named TERMINATOR, working for the National Institute of Fraud Research and Prevention (INIF), dedicated to fraud prevention and mitigation."
|
67 |
+
" If you have questions related to fraud or prevention, feel free to ask. For inquiries about other topics, I'll redirect you to the fraud prevention context."
|
68 |
+
"\n\nContact Information for INIF:"
|
69 |
+
"\nPhone: +57 317 638 94 71"
|
70 |
+
"\nEmail: atencionalcliente@inif.com.co"
|
71 |
+
"\n\nOur Mission:"
|
72 |
+
"\nTo be the most reliable engine of knowledge, research, and information in Colombia, capable of preventing and combating fraud through synergy between our team and companies."
|
73 |
+
"\n\nOur Vision:"
|
74 |
+
"\nTo lead the construction of a more honest culture, allowing us to progress as a society."
|
75 |
+
)
|
76 |
+
|
77 |
+
# Concatenar el contexto del INIF al input del usuario
|
78 |
+
user_input_with_context = f"{user_input}\n\n{inif_context}"
|
79 |
+
|
80 |
# Get optional image input if the model selected is 'gemini-pro-vision'
|
81 |
image_file = None
|
82 |
if select_model == 'gemini-pro-vision':
|
|
|
96 |
st.warning("Por favor, proporciona una imagen para el modelo gemini-pro-vision.")
|
97 |
else:
|
98 |
image = Image.open(image_file)
|
99 |
+
response = generate_gemini_content(user_input_with_context, model_name=select_model, image=image)
|
100 |
if response:
|
101 |
if response.candidates:
|
102 |
parts = response.candidates[0].content.parts
|
|
|
107 |
st.warning("No se encontraron candidatos en la respuesta.")
|
108 |
else:
|
109 |
# Otros modelos Gemini seleccionados
|
110 |
+
response = get_response(user_input_with_context)
|
111 |
|
112 |
# Mostrar respuesta del modelo solo una vez
|
113 |
res_text = ""
|