pvyas96 commited on
Commit
b444723
1 Parent(s): 4319640

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -40
app.py CHANGED
@@ -1,51 +1,39 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
- # Create a class for the session state
5
- class SessionState:
6
- def __init__(self):
7
- self.conversation_history = []
8
 
9
- # Initialize the session state
10
- session_state = SessionState()
11
 
12
- # Sidebar for setting parameters
13
- st.sidebar.title("Model Parameters")
14
- # You can add more parameters here as needed
15
- max_length = st.sidebar.slider("Max Length", 10, 100, 50)
16
- temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.7)
17
 
18
- # Load the model and tokenizer with a loading message
19
- with st.spinner('Wait for model to respond..'):
20
- model_name = "llmware/bling-red-pajamas-3b-0.1"
21
- model = AutoModelForCausalLM.from_pretrained(model_name)
22
- tokenizer = AutoTokenizer.from_pretrained(model_name)
23
 
24
- # Create a chat input for the user
25
- input_text = st.chat_input("Enter your message:")
 
 
 
 
26
 
27
- # Check if the user has entered a message
28
- if input_text:
29
- # Add the user's message to the conversation history
30
- session_state.conversation_history.append(input_text)
31
 
32
- # Display the user's message
33
- st.write("**User:**", input_text)
34
 
35
- # Create conversation history string
36
- history_string = "\n".join(session_state.conversation_history)
37
 
38
- # Tokenize the input text and history
39
- inputs = tokenizer.encode_plus(history_string, input_text, return_tensors="pt")
 
 
40
 
41
- # Generate the response from the model with additional parameters
42
- outputs = model.generate(**inputs, max_length=max_length, temperature=temperature)
43
-
44
- # Decode the response
45
- response = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
46
-
47
- # Add the model's response to the conversation history
48
- session_state.conversation_history.append(response)
49
-
50
- # Display the model's response
51
- st.write("**Assistant:**", response)
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
 
4
+ # Choose an appropriate Hugging Face model for your chat application (replace with your desired model)
5
+ model_name = "facebook/bart-base"
 
 
6
 
7
+ # Initialize the conversational AI pipeline
8
+ chat_pipeline = pipeline("conversational", model=model_name)
9
 
10
+ # Initialize session state to store chat history
11
+ if "chat_history" not in st.session_state:
12
+ st.session_state["chat_history"] = []
 
 
13
 
14
+ def display_chat_history():
15
+ """Displays the chat history in the Streamlit app."""
16
+ for message in st.session_state["chat_history"]:
17
+ st.write(f"{message['user']}: {message['text']}")
 
18
 
19
+ def process_user_input(user_input):
20
+ """Processes user input using the conversational AI model and updates chat history."""
21
+ if user_input:
22
+ bot_response = chat_pipeline(user_input, max_length=1000)[0]["generated_text"]
23
+ st.session_state["chat_history"].append({"user": "You", "text": user_input})
24
+ st.session_state["chat_history"].append({"user": "Bot", "text": bot_response})
25
 
26
+ st.title("Streamlit Chat App with Hugging Face Model")
 
 
 
27
 
28
+ # Display chat history
29
+ display_chat_history()
30
 
31
+ # User input using st.chat_input
32
+ user_input = st.chat_input("Type your message here...", key="user_input")
33
 
34
+ # Process user input on Enter key press
35
+ if st.session_state.get("user_input", "") != user_input:
36
+ process_user_input(user_input)
37
+ st.session_state["user_input"] = "" # Clear input field
38
 
39
+ st.write("**Note:** This is a simple demonstration. For more advanced features, consider using a dedicated chatbot framework.")