Spaces:
Sleeping
Sleeping
martianband1t
commited on
Commit
•
ccffbaa
1
Parent(s):
8610103
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,42 @@
|
|
1 |
import streamlit as st
|
2 |
import replicate
|
|
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
-
st.title("ayoye c'est bien hot sa!!")
|
6 |
-
output = replicate.run(
|
7 |
-
"meta/meta-llama-3-8b-instruct",
|
8 |
-
input={
|
9 |
-
"top_p": 0.9,
|
10 |
-
"prompt": "Write me three poems about llamas, the first in AABB format, the second in ABAB, the third without any rhyming",
|
11 |
-
"max_tokens": 512,
|
12 |
-
"min_tokens": 0,
|
13 |
-
"temperature": 0.6,
|
14 |
-
"prompt_template": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
|
15 |
-
"presence_penalty": 0,
|
16 |
-
"frequency_penalty": 0
|
17 |
-
}
|
18 |
-
)
|
19 |
-
print(output)
|
|
|
1 |
import streamlit as st
|
2 |
import replicate
|
3 |
+
import numpy as np
|
4 |
|
5 |
+
st.title("Ayoye, c'est bien hot ça!!")
|
6 |
+
|
7 |
+
# Création d'un conteneur pour les messages du chat
|
8 |
+
with st.container():
|
9 |
+
# Initialisation d'une liste pour stocker les messages
|
10 |
+
if 'messages' not in st.session_state:
|
11 |
+
st.session_state['messages'] = []
|
12 |
+
|
13 |
+
# Formulaire pour entrer une question
|
14 |
+
with st.form("chat_input"):
|
15 |
+
user_input = st.text_input("Entrez votre question ici:", "")
|
16 |
+
submitted = st.form_submit_button("Envoyer")
|
17 |
+
if submitted and user_input:
|
18 |
+
# Ajout de la question de l'utilisateur
|
19 |
+
st.session_state['messages'].append(f"Vous: {user_input}")
|
20 |
+
|
21 |
+
# Génération de la réponse via le modèle Replicate
|
22 |
+
output = replicate.run(
|
23 |
+
"meta/meta-llama-3-8b-instruct",
|
24 |
+
input={
|
25 |
+
"top_p": 0.9,
|
26 |
+
"prompt": user_input,
|
27 |
+
"max_tokens": 2053,
|
28 |
+
"min_tokens": 50,
|
29 |
+
"temperature": 0.3,
|
30 |
+
"prompt_template": "system\n\nYou are a helpful assistantuser\n\n{prompt}assistant\n\n",
|
31 |
+
"presence_penalty": 0,
|
32 |
+
"frequency_penalty": 0
|
33 |
+
}
|
34 |
+
)
|
35 |
+
|
36 |
+
# Affichage de la réponse du modèle
|
37 |
+
st.session_state['messages'].append(f"Bot: {output}")
|
38 |
+
|
39 |
+
# Affichage des messages précédents
|
40 |
+
for message in st.session_state['messages']:
|
41 |
+
st.text(message)
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|