lingyit1108 commited on
Commit
db694c4
β€’
1 Parent(s): 5e00c39

added all relevant assets for streamlit deployment

Browse files
Files changed (6) hide show
  1. .gitignore +3 -0
  2. .streamlit/secrets.toml +0 -0
  3. bin/clean.sh +5 -0
  4. main.py +40 -0
  5. streamlit_app.py +77 -0
  6. utils.py +4 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .DS_Store
2
+
3
+ raw_documents/
.streamlit/secrets.toml ADDED
File without changes
bin/clean.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ find . -name __pycache__ | xargs rm -rf
4
+ find . -name .pytest_cache | xargs rm -rf
5
+ find . -name .ipynb_checkpoints | xargs rm -rf
main.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import utils
2
+ import os
3
+
4
+ import openai
5
+ from llama_index import SimpleDirectoryReader
6
+ from llama_index import Document
7
+ from llama_index import VectorStoreIndex
8
+ from llama_index import ServiceContext
9
+ from llama_index.llms import OpenAI
10
+
11
+ from llama_index.embeddings import HuggingFaceEmbedding
12
+
13
+
14
+ openai.api_key = utils.get_openai_api_key()
15
+
16
+ if __name__ == "__main__":
17
+
18
+ documents = SimpleDirectoryReader(
19
+ input_files=["./raw_documents/HI_knowledge_base.pdf"]
20
+ ).load_data()
21
+
22
+ document = Document(text="\n\n".join([doc.text for doc in documents]))
23
+
24
+ ### gpt-4-1106-preview
25
+ ### gpt-3.5-turbo-1106 / gpt-3.5-turbo
26
+ llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1)
27
+ embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
28
+
29
+ service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
30
+ index = VectorStoreIndex.from_documents([document], service_context=service_context)
31
+
32
+ query_engine = index.as_query_engine()
33
+
34
+ response = query_engine.query(
35
+ ("Intermediate and Long Term Care (ILTC) services are for those who need further care and"
36
+ "treatment after discharge from the hospital, who may need assistance with their activities of"
37
+ "daily living. This can be through"
38
+ )
39
+ )
40
+ print(str(response))
streamlit_app.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+
4
+ import openai
5
+ from openai import OpenAI
6
+
7
+ # App title
8
+ st.set_page_config(page_title="πŸ’¬ Open AI Chatbot")
9
+
10
+ # Replicate Credentials
11
+ with st.sidebar:
12
+ st.title('πŸ’¬ Open AI Chatbot')
13
+ st.write('This chatbot is created using the GPT model from Open AI.')
14
+ if 'OPENAI_API_KEY' in st.secrets:
15
+ st.success('API key already provided!', icon='βœ…')
16
+ openai_api = st.secrets['OPENAI_API_KEY']
17
+ else:
18
+ openai_api = st.text_input('Enter OpenAI API token:', type='password')
19
+ if not (openai_api.startswith('sk-') and len(openai_api)==51):
20
+ st.warning('Please enter your credentials!', icon='⚠️')
21
+ else:
22
+ st.success('Proceed to entering your prompt message!', icon='πŸ‘‰')
23
+ os.environ['OPENAI_API_KEY'] = openai_api
24
+
25
+ st.subheader('Models and parameters')
26
+ selected_model = st.sidebar.selectbox('Choose an OpenAI model', ['gpt-3.5-turbo-1106', 'gpt-4-1106-preview'], key='selected_model')
27
+ temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=5.0, value=0.1, step=0.01)
28
+ st.markdown('πŸ“– Reach out to Sakimilo to learn how to create this app!')
29
+
30
+ # Store LLM generated responses
31
+ if "messages" not in st.session_state.keys():
32
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
33
+
34
+ # Display or clear chat messages
35
+ for message in st.session_state.messages:
36
+ with st.chat_message(message["role"]):
37
+ st.write(message["content"])
38
+
39
+ def clear_chat_history():
40
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
41
+ st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
42
+
43
+ def generate_llm_response(prompt_input):
44
+ system_content = ("You are a helpful assistant. "
45
+ "You do not respond as 'User' or pretend to be 'User'. "
46
+ "You only respond once as 'Assistant'."
47
+ )
48
+
49
+ completion = client.chat.completions.create(
50
+ model=selected_model,
51
+ messages=[
52
+ {"role": "system", "content": system_content},
53
+ ] + st.session_state.messages,
54
+ temperature=temperature
55
+ )
56
+ return completion.choices[0].message.content
57
+
58
+ # User-provided prompt
59
+ if prompt := st.chat_input(disabled=not openai_api):
60
+ client = OpenAI()
61
+ st.session_state.messages.append({"role": "user", "content": prompt})
62
+ with st.chat_message("user"):
63
+ st.write(prompt)
64
+
65
+ # Generate a new response if last message is not from assistant
66
+ if st.session_state.messages[-1]["role"] != "assistant":
67
+ with st.chat_message("assistant"):
68
+ with st.spinner("Thinking..."):
69
+ response = generate_llm_response(prompt)
70
+ placeholder = st.empty()
71
+ full_response = ''
72
+ for item in response:
73
+ full_response += item
74
+ placeholder.markdown(full_response)
75
+ placeholder.markdown(full_response)
76
+ message = {"role": "assistant", "content": full_response}
77
+ st.session_state.messages.append(message)
utils.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ import os
2
+
3
+ def get_openai_api_key():
4
+ return os.getenv("OPENAI_API_KEY")