KvrParaskevi
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
-
import
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
5 |
|
6 |
st.title("Hi, I am Chatbot Philio :woman:")
|
@@ -18,6 +18,8 @@ scrollable_div_style = """
|
|
18 |
</style>
|
19 |
"""
|
20 |
|
|
|
|
|
21 |
def render_chat_history(chat_history):
|
22 |
#renders chat history
|
23 |
for message in chat_history:
|
@@ -25,44 +27,10 @@ def render_chat_history(chat_history):
|
|
25 |
with st.chat_message(message["role"]):
|
26 |
st.markdown(message["content"])
|
27 |
|
28 |
-
# def generate_response(chat_history):
|
29 |
-
# tokenized_chat = tokenizer.apply_chat_template(chat_history, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
30 |
-
# outputs = model.generate(tokenized_chat, do_sample =True, max_new_tokens=50, temperature = 0.3, top_p = 0.85)
|
31 |
-
# answer = tokenizer.decode(outputs[0][tokenized_chat.shape[1]:],skip_special_tokens=True)
|
32 |
-
# final_answer = answer.split("<")[0]
|
33 |
-
# return final_answer
|
34 |
-
|
35 |
-
#Application
|
36 |
-
#Langchain memory in session cache
|
37 |
-
#if 'memory' not in st.session_state:
|
38 |
-
#st.session_state.memory = demo_chat.demo_miny_memory()
|
39 |
-
|
40 |
-
system_content = """
|
41 |
-
You are an AI having conversation with a human. Below is an instruction that describes a task.
|
42 |
-
Write a response that appropriately completes the request.
|
43 |
-
Reply with the most helpful and logic answer. During the conversation you need to ask the user
|
44 |
-
the following questions to complete the hotel booking task.
|
45 |
|
46 |
-
1) Where would you like to stay and when?
|
47 |
-
2) How many people are staying in the room?
|
48 |
-
3) Do you prefer any ammenities like breakfast included or gym?
|
49 |
-
4) What is your name, your email address and phone number?
|
50 |
-
|
51 |
-
Make sure you receive a logical answer from the user from every question to complete the hotel
|
52 |
-
booking process.
|
53 |
-
"""
|
54 |
#Check if chat history exists in this session
|
55 |
if 'chat_history' not in st.session_state:
|
56 |
-
st.session_state.chat_history = [
|
57 |
-
{
|
58 |
-
"role": "system",
|
59 |
-
"content": system_content,
|
60 |
-
},
|
61 |
-
{"role": "ai", "content": "Hello, how can I help you today?"},
|
62 |
-
] #Initialize chat history
|
63 |
-
|
64 |
-
# if 'model' not in st.session_state:
|
65 |
-
# st.session_state.model = model
|
66 |
|
67 |
st.markdown('<div class="scrollable-div">', unsafe_allow_html=True) #add css style to container
|
68 |
render_chat_history(st.session_state.chat_history)
|
@@ -75,7 +43,7 @@ if input_text := st.chat_input(placeholder="Here you can chat with our hotel boo
|
|
75 |
st.session_state.chat_history.append({"role" : "human", "content" : input_text}) #append message to chat history
|
76 |
|
77 |
with st.spinner("Generating response..."):
|
78 |
-
first_answer =
|
79 |
|
80 |
with st.chat_message("assistant"):
|
81 |
st.markdown(first_answer)
|
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
+
import model as demo_chat
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
5 |
|
6 |
st.title("Hi, I am Chatbot Philio :woman:")
|
|
|
18 |
</style>
|
19 |
"""
|
20 |
|
21 |
+
llm_chain = demo_chat.chain()
|
22 |
+
|
23 |
def render_chat_history(chat_history):
|
24 |
#renders chat history
|
25 |
for message in chat_history:
|
|
|
27 |
with st.chat_message(message["role"]):
|
28 |
st.markdown(message["content"])
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
#Check if chat history exists in this session
|
32 |
if 'chat_history' not in st.session_state:
|
33 |
+
st.session_state.chat_history = [] #Initialize chat history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
st.markdown('<div class="scrollable-div">', unsafe_allow_html=True) #add css style to container
|
36 |
render_chat_history(st.session_state.chat_history)
|
|
|
43 |
st.session_state.chat_history.append({"role" : "human", "content" : input_text}) #append message to chat history
|
44 |
|
45 |
with st.spinner("Generating response..."):
|
46 |
+
first_answer = llm_chain.predict(input = input_text).strip()
|
47 |
|
48 |
with st.chat_message("assistant"):
|
49 |
st.markdown(first_answer)
|