File size: 5,387 Bytes
4b7893b 1caab63 ba25116 1caab63 1bc894f 1caab63 29eec6c 2b4e2ce fe9f836 17dad20 1caab63 1bc894f fe9f836 4b7893b 1caab63 15f0e8d 1caab63 4b7893b 1caab63 4cc5acb 1caab63 4cc5acb 1bc894f 4b7893b 29eec6c fe9f836 29eec6c 1bc894f 1caab63 64aa216 1caab63 6450be1 1caab63 5817080 1caab63 2b4e2ce 1caab63 1bc894f 4b7893b 1bc894f 2b4e2ce 29eec6c 4b7893b 29eec6c 1bc894f 29eec6c 4b7893b 29eec6c 4b7893b 1caab63 1bc894f 4b7893b 1caab63 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
import streamlit as st
import os
import asyncio
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_community.vectorstores import Chroma
from langchain_together import Together
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
from langchain_community.document_loaders import WebBaseLoader
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
# Initialize the LLMs
llm = Together(
model="mistralai/Mixtral-8x22B-Instruct-v0.1",
temperature=0.2,
top_k=12,
max_tokens=22048,
together_api_key=os.environ['pilotikval']
)
# Function to store chat history
store = {}
model_name = "BAAI/bge-base-en"
encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity
embedding_function = HuggingFaceBgeEmbeddings(
model_name=model_name,
encode_kwargs=encode_kwargs
)
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = StreamlitChatMessageHistory(key=session_id)
return store[session_id]
# Define the Streamlit app
def app():
with st.sidebar:
st.title("dochatter")
option = st.selectbox(
'Which retriever would you like to use?',
('General Medicine', 'RespiratoryFishman', 'RespiratoryMurray', 'MedMRCP2', 'OldMedicine')
)
# Define retrievers based on option
persist_directory = {
'General Medicine': "./oxfordmedbookdir/",
'Respiratory1': "./respfishmandbcud/",
'Respiratory2': "./respmurray/",
'Med2.2': "./medmrcp2store/",
'Med2.1': "./mrcpchromadb/"
}.get(option, "./mrcpchromadb/")
collection_name = {
'General Medicine': "oxfordmed",
'Respiratory1': "fishmannotescud",
'Respiratory2': "respmurraynotes",
'Med2.2': "medmrcp2notes",
'Med2.1': "mrcppassmednotes"
}.get(option, "mrcppassmednotes")
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_function, collection_name=collection_name)
retriever = vectordb.as_retriever(search_kwargs={"k": 5})
# Define the prompt templates
contextualize_q_system_prompt = (
"Given a chat history and the latest user question "
"which might reference context in the chat history, "
"formulate a standalone question which can be understood "
"without the chat history. Do NOT answer the question, "
"just reformulate it if needed and otherwise return it as is."
)
contextualize_q_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
history_aware_retriever = create_history_aware_retriever(
llm, retriever, contextualize_q_prompt
)
system_prompt = (
"You are helping a doctor. Be as detailed and thorough as possible "
"Use the following pieces of retrieved context to answer "
"the question. If you don't know the answer, say that you "
"don't know."
"\n\n"
"{context}"
)
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
# Statefully manage chat history
conversational_rag_chain = RunnableWithMessageHistory(
rag_chain,
get_session_history,
input_messages_key="input",
history_messages_key="chat_history",
output_messages_key="answer",
)
# Session State
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
st.header("Hello Doc!")
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
prompts2 = st.chat_input("Say something")
if prompts2:
st.session_state.messages.append({"role": "user", "content": prompts2})
with st.chat_message("user"):
st.write(prompts2)
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
final_response = conversational_rag_chain.invoke(
{
"input": prompts2,
},
config={"configurable": {"session_id": "current_session"}}
)
st.write(final_response['answer'])
st.session_state.messages.append({"role": "assistant", "content": final_response['answer']})
if __name__ == '__main__':
app()
|