acloudfan commited on
Commit
4df8c22
·
verified ·
1 Parent(s): d3de079

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +132 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Manages user & assistant messages in the session state.
2
+
3
+ ### 1. Import the libraries
4
+ import streamlit as st
5
+ import time
6
+ import os
7
+ from dotenv import load_dotenv
8
+
9
+ from langchain.memory import ConversationSummaryMemory
10
+ from langchain.chains import ConversationChain
11
+ from langchain_openai import ChatOpenAI
12
+ from langchain.prompts import PromptTemplate
13
+ from langchain_core.messages import HumanMessage, AIMessage
14
+
15
+ # This is to simplify local development
16
+ # Without this you will need to copy/paste the API key with every change
17
+ try:
18
+ # CHANGE the location of the file
19
+ load_dotenv('C:\\Users\\raj\\.jupyter\\.env')
20
+ # Add the API key to the session - use it for populating the interface
21
+ if os.getenv('OPENAI_API_KEY'):
22
+ st.session_state['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')
23
+ except:
24
+ print("Environment file not found !! Copy & paste your OpenAI API key.")
25
+
26
+
27
+ ### 1. Setup the title & input text element for the OpenAI API key
28
+ # Set the title
29
+ # Populate API key from session if it is available
30
+ st.title("LangChain ConversationSummaryMemory !!!")
31
+
32
+ # If the key is already available, initialize its value on the UI
33
+ if 'OPENAI_API_KEY' in st.session_state:
34
+ openai_api_key = st.sidebar.text_input('OpenAI API key',value=st.session_state['OPENAI_API_KEY'])
35
+ else:
36
+ openai_api_key = st.sidebar.text_input('OpenAI API key',placeholder='copy & paste your OpenAI API key')
37
+
38
+ ### 2. Define utility functions to invoke the LLM
39
+
40
+ # Create an instance of the LLM for summarization
41
+ @st.cache_resource
42
+ def get_summarization_llm():
43
+ model = 'gpt-3.5-turbo-0125'
44
+ return ChatOpenAI(model=model, openai_api_key=openai_api_key)
45
+
46
+ # Create an instance of the LLM for chatbot responses
47
+ @st.cache_resource
48
+ def get_llm():
49
+ model = 'gpt-3.5-turbo-0125'
50
+ return ChatOpenAI(model=model, openai_api_key=openai_api_key)
51
+
52
+ @st.cache_resource
53
+ def get_llm_chain():
54
+ memory = st.session_state['MEMORY']
55
+ conversation = ConversationChain(
56
+ llm=get_llm(),
57
+ # prompt=prompt_template,
58
+ # verbose=True,
59
+ memory=memory
60
+ )
61
+ return conversation
62
+
63
+ # Create the context by concatenating the messages
64
+ def get_chat_context():
65
+ memory = st.session_state['MEMORY']
66
+ return memory.buffer
67
+
68
+ # Generate the response and return
69
+ def get_llm_response(prompt):
70
+ # llm = get_llm()
71
+ llm = get_llm_chain()
72
+
73
+ # Show spinner, while we are waiting for the response
74
+ with st.spinner('Invoking LLM ... '):
75
+ # get the context
76
+ chat_context = get_chat_context()
77
+
78
+ # Prefix the query with context
79
+ query_payload = chat_context +'\n\n Question: ' + prompt
80
+
81
+ response = llm.invoke(query_payload)
82
+
83
+ return response
84
+
85
+ # Initialize the session state memory
86
+ if 'MEMORY' not in st.session_state:
87
+ memory = ConversationSummaryMemory(
88
+ llm = get_summarization_llm(),
89
+ human_prefix='user',
90
+ ai_prefix = 'assistant',
91
+ return_messages=True
92
+ )
93
+ # add to the session
94
+ st.session_state['MEMORY'] = memory
95
+
96
+ ### 3. Write the messages to chat_message container
97
+ # Write messages to the chat_message element
98
+ # This is needed as streamlit re-runs the entire script when user provides input in a widget
99
+ # https://docs.streamlit.io/develop/api-reference/chat/st.chat_message
100
+
101
+ for msg in st.session_state['MEMORY'].chat_memory.messages:
102
+
103
+ if (isinstance(msg, HumanMessage)):
104
+ st.chat_message('user').write(msg.content)
105
+ elif (isinstance(msg, AIMessage)):
106
+ st.chat_message('ai').write(msg.content)
107
+ else:
108
+ print('System message: ', msg.content)
109
+
110
+
111
+ ### 4. Create the *chat_input* element to get the user query
112
+ # Interface for user input
113
+ prompt = st.chat_input(placeholder='Your input here')
114
+
115
+ ### 5. Process the query received from user
116
+ if prompt:
117
+
118
+ # Write the user prompt as chat message
119
+ st.chat_message('user').write(prompt)
120
+
121
+ # Invoke the LLM
122
+ response = get_llm_response(prompt)
123
+
124
+ # Write the response as chat_message
125
+ st.chat_message('ai').write(response['response'])
126
+
127
+ ### 6. Write out the current content of the context
128
+ st.divider()
129
+ st.subheader('Context/Summary:')
130
+
131
+ # Print the state of the buffer
132
+ st.session_state['MEMORY'].buffer
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ python-dotenv
2
+ langchain
3
+ openai