Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +52 -0
- requirements.txt +3 -0
app.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain_huggingface import HuggingFaceEndpoint
|
3 |
+
from langchain.prompts import PromptTemplate
|
4 |
+
from langchain.chains import LLMChain
|
5 |
+
from langchain.memory import ConversationBufferMemory
|
6 |
+
import os
|
7 |
+
|
8 |
+
# Set up your Hugging Face API token
|
9 |
+
sec_key = os.getenv('HUGGINGFACE_API_TOKEN')
|
10 |
+
os.environ['HUGGINGFACE_API_TOKEN'] = sec_key
|
11 |
+
|
12 |
+
# Define your Hugging Face model
|
13 |
+
repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
14 |
+
llm = HuggingFaceEndpoint(repo_id=repo_id, temperature=0.7)
|
15 |
+
|
16 |
+
# Define the prompt template
|
17 |
+
template = """The following is a conversation between a user and an AI assistant.
|
18 |
+
history:{history}
|
19 |
+
Final Message by Human: {user_input}
|
20 |
+
Final Message by AI: """
|
21 |
+
|
22 |
+
prompt = PromptTemplate(
|
23 |
+
template=template,
|
24 |
+
input_variables=["history", "user_input"],
|
25 |
+
)
|
26 |
+
|
27 |
+
# Initialize memory
|
28 |
+
memory = ConversationBufferMemory()
|
29 |
+
|
30 |
+
# Create the LLM chain
|
31 |
+
llm_chain = LLMChain(
|
32 |
+
prompt=prompt,
|
33 |
+
llm=llm,
|
34 |
+
memory=memory
|
35 |
+
)
|
36 |
+
|
37 |
+
# Streamlit app
|
38 |
+
st.title("AI Chatbot")
|
39 |
+
st.write("Welcome to the AI Chatbot! Ask anything you like.")
|
40 |
+
|
41 |
+
# User input
|
42 |
+
user_input = st.text_input("You:", key="input")
|
43 |
+
|
44 |
+
if st.button("Send"):
|
45 |
+
if user_input:
|
46 |
+
# Generate response
|
47 |
+
response = llm_chain.invoke({"history": memory.chat_memory.messages, 'user_input': user_input})
|
48 |
+
response_text = response['text']
|
49 |
+
# Display the response
|
50 |
+
st.text_area("ChatBot:", response_text, height=100)
|
51 |
+
# Save the interaction in memory
|
52 |
+
memory.save_context({"input": user_input}, {"output": response_text})
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
langchain
|
3 |
+
langchain_huggingface
|