Mykes commited on
Commit
7fa4911
β€’
1 Parent(s): bb43e92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -5
app.py CHANGED
@@ -48,7 +48,14 @@ def load_model():
48
 
49
  llm = load_model()
50
 
51
- basic_prompt = "Q: {question}\nA:"
 
 
 
 
 
 
 
52
 
53
  # Initialize chat history
54
  if "messages" not in st.session_state:
@@ -66,7 +73,11 @@ if prompt := st.chat_input("What is your question?"):
66
  # Add user message to chat history
67
  st.session_state.messages.append({"role": "user", "content": prompt})
68
 
69
- model_input = basic_prompt.format(question=prompt)
 
 
 
 
70
 
71
  # Display assistant response in chat message container
72
  with st.chat_message("assistant"):
@@ -76,16 +87,19 @@ if prompt := st.chat_input("What is your question?"):
76
  for token in llm(
77
  model_input,
78
  max_tokens=None,
79
- stop=["<end_of_turn>"],
80
  echo=True,
81
  stream=True
82
  ):
83
  full_response += token['choices'][0]['text']
84
  message_placeholder.markdown(full_response + "β–Œ")
85
- message_placeholder.markdown(full_response)
 
 
 
86
 
87
  # Add assistant response to chat history
88
- st.session_state.messages.append({"role": "assistant", "content": full_response})
89
 
90
  st.sidebar.title("Chat with AI")
91
  st.sidebar.markdown("This is a simple chat interface using Streamlit and an AI model.")
 
48
 
49
  llm = load_model()
50
 
51
+ def format_context(messages):
52
+ context = ""
53
+ for message in messages:
54
+ if message["role"] == "user":
55
+ context += f"Human: {message['content']}\n"
56
+ else:
57
+ context += f"Assistant: {message['content']}\n"
58
+ return context
59
 
60
  # Initialize chat history
61
  if "messages" not in st.session_state:
 
73
  # Add user message to chat history
74
  st.session_state.messages.append({"role": "user", "content": prompt})
75
 
76
+ # Format the context with the last 5 messages
77
+ context = format_context(st.session_state.messages[-5:])
78
+
79
+ # Prepare the model input
80
+ model_input = f"{context}Human: {prompt}\nAssistant:"
81
 
82
  # Display assistant response in chat message container
83
  with st.chat_message("assistant"):
 
87
  for token in llm(
88
  model_input,
89
  max_tokens=None,
90
+ stop=["Human:", "<end_of_turn>"],
91
  echo=True,
92
  stream=True
93
  ):
94
  full_response += token['choices'][0]['text']
95
  message_placeholder.markdown(full_response + "β–Œ")
96
+
97
+ # Remove the initial context and prompt from the response
98
+ assistant_response = full_response.split("Assistant:")[-1].strip()
99
+ message_placeholder.markdown(assistant_response)
100
 
101
  # Add assistant response to chat history
102
+ st.session_state.messages.append({"role": "assistant", "content": assistant_response})
103
 
104
  st.sidebar.title("Chat with AI")
105
  st.sidebar.markdown("This is a simple chat interface using Streamlit and an AI model.")