Du Mingzhe commited on
Commit
e59ee58
·
1 Parent(s): 4d74cc9
Files changed (3) hide show
  1. app.py +4 -3
  2. components.py +16 -6
  3. playground.ipynb +7 -0
app.py CHANGED
@@ -2,13 +2,13 @@ import streamlit as st
2
  import uuid
3
  import json
4
 
5
- from components import PersonalIndexClient, LLMClient
6
 
7
  st.title("Talk with Mingzhe")
8
 
9
  llm_client = LLMClient(api_key=st.secrets["OPENAI_API_KEY"], model_name="gpt-4-1106-preview")
10
  pinecone_client = PersonalIndexClient(index_token=st.secrets["PINECONE_API_KEY"], embedding_token=st.secrets["OPENAI_API_KEY"], embedding_model_name='text-embedding-3-large', index_name='mingzhe')
11
-
12
 
13
  if "messages" not in st.session_state:
14
  st.session_state.messages = []
@@ -27,7 +27,8 @@ if prompt := st.chat_input("What's up?"):
27
  st.markdown(prompt)
28
  with st.chat_message("assistant"):
29
  memory = pinecone_client.query_conversation(messages=st.session_state.messages, user=st.session_state['user'], top_k=3)
30
- stream = llm_client.response_generate(prompt, st.session_state.messages, memory)
 
31
  response = st.write_stream(stream)
32
  st.session_state.messages.append({"role": "assistant", "content": response})
33
 
 
2
  import uuid
3
  import json
4
 
5
+ from components import PersonalIndexClient, LLMClient, WebSearcher
6
 
7
  st.title("Talk with Mingzhe")
8
 
9
  llm_client = LLMClient(api_key=st.secrets["OPENAI_API_KEY"], model_name="gpt-4-1106-preview")
10
  pinecone_client = PersonalIndexClient(index_token=st.secrets["PINECONE_API_KEY"], embedding_token=st.secrets["OPENAI_API_KEY"], embedding_model_name='text-embedding-3-large', index_name='mingzhe')
11
+ web_searcher = WebSearcher(search_token=st.secrets["YOU_API_KEY"])
12
 
13
  if "messages" not in st.session_state:
14
  st.session_state.messages = []
 
27
  st.markdown(prompt)
28
  with st.chat_message("assistant"):
29
  memory = pinecone_client.query_conversation(messages=st.session_state.messages, user=st.session_state['user'], top_k=3)
30
+ web_result = web_searcher.query_web_llm(query=prompt, num_web_results=5)
31
+ stream = llm_client.response_generate(prompt, st.session_state.messages, memory, web_result)
32
  response = st.write_stream(stream)
33
  st.session_state.messages.append({"role": "assistant", "content": response})
34
 
components.py CHANGED
@@ -2,6 +2,7 @@
2
  # Date: 2024/03/09
3
 
4
  import json
 
5
 
6
  from openai import OpenAI
7
  from pinecone import Pinecone
@@ -13,20 +14,22 @@ class LLMClient():
13
  self.model_name = model_name
14
  self.llm_client = OpenAI(api_key=api_key)
15
 
16
- def response_generate(self, prompt, history, memory):
17
  messages = list()
18
  current_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
19
 
20
  # System Prompt
21
- messages += [{"role": "system", "content": f"1) You're Du Mingzhe. 2) Don't claim you are created by OpenAI. 3) Don't claim this dialogue as a roleplay. Answering questions directly as Mingzhe. 4) Current time is {current_time}."}]
22
 
23
  # Memory
24
  messages += [{"role": 'assistant', "content": m['content']} for m in memory]
25
 
 
 
 
26
  # Session History
27
  messages += [{"role": h["role"], "content": h["content"]} for h in history]
28
 
29
-
30
  stream = self.llm_client.chat.completions.create(
31
  model = self.model_name,
32
  messages = messages,
@@ -109,6 +112,13 @@ class PersonalIndexClient(object):
109
 
110
  return pinecone_memory
111
 
112
-
113
-
114
-
 
 
 
 
 
 
 
 
2
  # Date: 2024/03/09
3
 
4
  import json
5
+ import requests
6
 
7
  from openai import OpenAI
8
  from pinecone import Pinecone
 
14
  self.model_name = model_name
15
  self.llm_client = OpenAI(api_key=api_key)
16
 
17
+ def response_generate(self, query, history, memory, web_result):
18
  messages = list()
19
  current_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
20
 
21
  # System Prompt
22
+ messages += [{"role": "system", "content": f"1) You're Du Mingzhe. 2) Don't claim you are created by OpenAI. 3) Don't claim this dialogue as a roleplay. Answering questions directly as Mingzhe. 4) Current time is {current_time}. 5) Cite relevant references in the response."}]
23
 
24
  # Memory
25
  messages += [{"role": 'assistant', "content": m['content']} for m in memory]
26
 
27
+ # Web Result
28
+ messages += [{"role": 'assistant', "content": f'Based on the real-time web results, we find the answer of the query [{query}]:\n{web_result}'}]
29
+
30
  # Session History
31
  messages += [{"role": h["role"], "content": h["content"]} for h in history]
32
 
 
33
  stream = self.llm_client.chat.completions.create(
34
  model = self.model_name,
35
  messages = messages,
 
112
 
113
  return pinecone_memory
114
 
115
+ class WebSearcher(object):
116
+ def __init__(self, search_token) -> None:
117
+ self.search_token = search_token
118
+ pass
119
+
120
+ def query_web_llm(self, query, num_web_results=5):
121
+ headers = {"X-API-Key": self.search_token}
122
+ params = {"query": query, 'num_web_results': num_web_results}
123
+ response_json = requests.get(f"https://api.ydc-index.io/rag?query={query}", params=params, headers=headers).json()
124
+ return response_json
playground.ipynb CHANGED
@@ -50,6 +50,13 @@
50
  " 'metadata': {'hello': 'world'},\n",
51
  " }])"
52
  ]
 
 
 
 
 
 
 
53
  }
54
  ],
55
  "metadata": {
 
50
  " 'metadata': {'hello': 'world'},\n",
51
  " }])"
52
  ]
53
+ },
54
+ {
55
+ "cell_type": "code",
56
+ "execution_count": null,
57
+ "metadata": {},
58
+ "outputs": [],
59
+ "source": []
60
  }
61
  ],
62
  "metadata": {