Spaces:
Running
Running
File size: 2,474 Bytes
479188c 156be56 479188c 156be56 479188c 73eb3e6 479188c 73eb3e6 479188c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import streamlit as st
import os
st.title("Legal Advisor π")
os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
os.environ["PINECONE_API_KEY"] = st.secrets["PINECONE_API_KEY"]
# Sidebar for selecting the chatbot
selected_chatbot = st.sidebar.radio("Select Chatbot", ("OpenAI", "Llama 2"))
if selected_chatbot == "OpenAI":
from openai_call import openai_call
elif selected_chatbot == "Llama 2":
st.warning(
"It might take some time to get response becuase of the size of Llama 2 model β οΈ"
)
from llama_call import llama_call
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
st.info("""
**Legal Advisor Bot:**
- **Objective:** Develop a conversational AI chatbot to provide legal advice and assistance. π€πΌ
- **Technology Stack:** Utilizes Streamlit for the user interface, integrates with external chatbot APIs (such as OpenAI and Llama 2) for natural language processing. π₯οΈπ‘
- **Features:**
- Allows users to select between different chatbot models for varied responses. π
- Provides a chat history feature to track user interactions. π
- Displays loading spinner while fetching responses from the selected chatbot. β³
- Offers a user-friendly interface for asking legal questions. π¬
- **Emphasis:** Focuses on simplicity, efficiency, and accessibility in delivering legal information and support through conversational AI. π―
""")
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# React to user input
if prompt := st.chat_input("Ask something about law"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Add a loading spinner while waiting for response
with st.spinner("Thinking β¨..."):
if selected_chatbot == "Llama 2":
response = llama_call(prompt)
elif selected_chatbot == "OpenAI":
response = openai_call(prompt)
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
|