import streamlit as st from langchain.prompts import PromptTemplate from langchain_community.llms import CTransformers from src.helper import download_hf_embeddings, text_split, download_hf_model from langchain_community.vectorstores import Pinecone as LangchainPinecone import os from dotenv import load_dotenv from src.prompt import prompt_template from langchain.chains import RetrievalQA import time from pinecone import Pinecone # Load environment variables load_dotenv() PINECONE_API_KEY = os.getenv('PINECONE_API_KEY') index_name = "medicure-chatbot" # Set page configuration st.set_page_config(page_title="Medical Chatbot", page_icon="🏥", layout="wide") # Custom CSS for styling st.markdown(""" """, unsafe_allow_html=True) # Initialize session state for chat history if 'chat_history' not in st.session_state: st.session_state.chat_history = [] # Header st.title("🏥 Medicure RAG Chatbot") # Display welcome message st.write("Welcome to Medicure Chatbot! Ask any medical question and I'll do my best to help you.") st.write("#### Built with 🤗 Ctransformers, Langchain, and Pinecone. Powered by Metal-llama2-7b-chat quantized LLM") # Initialize the chatbot components @st.cache_resource def initialize_chatbot(): embeddings = download_hf_embeddings() # model_name_or_path = "TheBloke/Llama-2-7B-Chat-GGML" # model_basename = "llama-2-7b-chat.ggmlv3.q4_0.bin" # model_path = download_hf_model(model_name_or_path, model_basename) model_path = "TheBloke/Llama-2-7B-Chat-GGML" llm = CTransformers(model=model_path, model_type="llama", config={'max_new_tokens': 512, 'temperature': 0.8}) # initiaize pinecone pc = Pinecone(api_key=PINECONE_API_KEY) index = pc.Index(index_name) PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"]) chain_type_kwargs = {"prompt": PROMPT} docsearch = LangchainPinecone(index, embeddings.embed_query, "text") qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=docsearch.as_retriever(search_kwargs={'k': 2}), return_source_documents=True, chain_type_kwargs=chain_type_kwargs) return qa qa = initialize_chatbot() # Chat interface user_input = st.text_input("Ask your medical question:") if st.button("Send", key="send"): if user_input: with st.spinner("Thinking..."): result = qa({"query": user_input}) response = result["result"] st.session_state.chat_history.append(("You", user_input)) st.session_state.chat_history.append(("Bot", response)) # Display chat history st.subheader("Chat History") for role, message in st.session_state.chat_history: if role == "You": st.markdown(f"**You:** {message}") else: st.markdown(f"**Bot:** {message}") # Animated loading for visual appeal def load_animation(): with st.empty(): for i in range(3): for j in ["⋅", "⋅⋅", "⋅⋅⋅", "⋅⋅⋅⋅"]: st.write(f"Loading{j}") time.sleep(0.2) st.write("") # Footer with social links st.markdown("""
""", unsafe_allow_html=True) # Load Font Awesome for icons st.markdown('', unsafe_allow_html=True)