Spaces:
Running
on
Zero
Running
on
Zero
NGUYEN, Xuan Phi
commited on
Commit
•
cdef4d5
1
Parent(s):
358b242
update with web search
Browse files
multipurpose_chatbot/demos/langchain_web_search.py
ADDED
@@ -0,0 +1,755 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
langchain_install_guide = """pip install --upgrade langchain langchain-community"""
|
5 |
+
try:
|
6 |
+
from langchain_core.agents import AgentAction, AgentFinish
|
7 |
+
from typing import List, Optional, Any, Mapping, Union, Dict, Type
|
8 |
+
from langchain_core.callbacks import CallbackManagerForLLMRun
|
9 |
+
from langchain_core.language_models.llms import BaseLLM
|
10 |
+
from langchain_core.outputs import Generation, LLMResult
|
11 |
+
|
12 |
+
from langchain_community.llms import HuggingFaceHub
|
13 |
+
from langchain_community.llms.huggingface_hub import HuggingFaceHub
|
14 |
+
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline, VALID_TASKS
|
15 |
+
from langchain_community.chat_models.huggingface import ChatHuggingFace
|
16 |
+
from langchain_core.pydantic_v1 import root_validator
|
17 |
+
|
18 |
+
# react style prompt
|
19 |
+
from langchain import hub
|
20 |
+
from langchain.agents import AgentExecutor, load_tools
|
21 |
+
from langchain.agents.format_scratchpad import format_log_to_str
|
22 |
+
from langchain.agents.output_parsers import (
|
23 |
+
ReActJsonSingleInputOutputParser, ToolsAgentOutputParser
|
24 |
+
)
|
25 |
+
from langchain.tools.render import render_text_description, render_text_description_and_args
|
26 |
+
from langchain_community.utilities import SerpAPIWrapper
|
27 |
+
from langchain_core.prompts import ChatPromptTemplate
|
28 |
+
|
29 |
+
|
30 |
+
from langchain_core.utils.function_calling import (
|
31 |
+
convert_to_openai_function,
|
32 |
+
convert_to_openai_tool,
|
33 |
+
)
|
34 |
+
from langchain_core.exceptions import OutputParserException
|
35 |
+
from langchain_core.agents import AgentAction, AgentFinish
|
36 |
+
|
37 |
+
from langchain_core.callbacks.manager import (
|
38 |
+
AsyncCallbackManagerForLLMRun,
|
39 |
+
CallbackManagerForLLMRun,
|
40 |
+
)
|
41 |
+
from langchain_core.language_models.chat_models import BaseChatModel
|
42 |
+
from langchain_core.messages import (
|
43 |
+
AIMessage,
|
44 |
+
BaseMessage,
|
45 |
+
HumanMessage,
|
46 |
+
SystemMessage,
|
47 |
+
)
|
48 |
+
from langchain_core.outputs import ChatGeneration, ChatResult, LLMResult
|
49 |
+
from langchain_core.pydantic_v1 import root_validator
|
50 |
+
from langchain_core.pydantic_v1 import Extra
|
51 |
+
# from langchain_community.tools.tavily_search import TavilySearchResults
|
52 |
+
# from langchain_community.tools.tavily_search import (
|
53 |
+
# TavilySearchResults,
|
54 |
+
# TavilySearchAPIWrapper,
|
55 |
+
# Type,
|
56 |
+
# TavilyInput,
|
57 |
+
# CallbackManagerForToolRun,
|
58 |
+
# AsyncCallbackManagerForToolRun,
|
59 |
+
# )
|
60 |
+
# from langchain_core.pydantic_v1 import BaseModel, Field
|
61 |
+
|
62 |
+
# from langchain_core.callbacks import (
|
63 |
+
# AsyncCallbackManagerForToolRun,
|
64 |
+
# CallbackManagerForToolRun,
|
65 |
+
# )
|
66 |
+
# from langchain_core.pydantic_v1 import BaseModel, Field
|
67 |
+
# from langchain_core.tools import BaseTool
|
68 |
+
|
69 |
+
# ===
|
70 |
+
from langchain_core.callbacks import (
|
71 |
+
AsyncCallbackManagerForToolRun,
|
72 |
+
CallbackManagerForToolRun,
|
73 |
+
)
|
74 |
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
75 |
+
from langchain_core.tools import BaseTool
|
76 |
+
|
77 |
+
from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper
|
78 |
+
# from langchain_community.tools.tavily_search import (
|
79 |
+
# TavilySearchResults,
|
80 |
+
# )
|
81 |
+
|
82 |
+
LANGCHAIN_AVAILABLE = True
|
83 |
+
|
84 |
+
except Exception as e:
|
85 |
+
print(f'{str(e)}\nNeed to install langchain: `{langchain_install_guide}`')
|
86 |
+
|
87 |
+
LANGCHAIN_AVAILABLE = False
|
88 |
+
|
89 |
+
|
90 |
+
import logging
|
91 |
+
import importlib
|
92 |
+
|
93 |
+
logger = logging.getLogger(__name__)
|
94 |
+
|
95 |
+
DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful, and honest assistant."""
|
96 |
+
|
97 |
+
|
98 |
+
class AnyEnginePipeline(BaseLLM):
|
99 |
+
engine: Any #: :meta private:
|
100 |
+
# model_id: str = DEFAULT_MODEL_ID
|
101 |
+
"""Model name to use."""
|
102 |
+
model_kwargs: Optional[dict] = None
|
103 |
+
"""Keyword arguments passed to the model."""
|
104 |
+
pipeline_kwargs: Optional[dict] = None
|
105 |
+
"""Keyword arguments passed to the pipeline."""
|
106 |
+
batch_size: int = 1
|
107 |
+
"""Batch size to use when passing multiple documents to generate."""
|
108 |
+
streaming: bool = False
|
109 |
+
|
110 |
+
class Config:
|
111 |
+
"""Configuration for this pydantic object."""
|
112 |
+
|
113 |
+
extra = Extra.forbid
|
114 |
+
|
115 |
+
@classmethod
|
116 |
+
def from_engine(
|
117 |
+
cls,
|
118 |
+
engine: Any,
|
119 |
+
model_kwargs: Optional[dict] = None,
|
120 |
+
**kwargs
|
121 |
+
):
|
122 |
+
return cls(engine=engine, model_kwargs=model_kwargs, **kwargs)
|
123 |
+
|
124 |
+
@property
|
125 |
+
def _identifying_params(self) -> Mapping[str, Any]:
|
126 |
+
"""Get the identifying parameters."""
|
127 |
+
return {
|
128 |
+
# "model_id": self.model_id,
|
129 |
+
"model_kwargs": self.model_kwargs,
|
130 |
+
# "pipeline_kwargs": self.pipeline_kwargs,
|
131 |
+
}
|
132 |
+
|
133 |
+
@property
|
134 |
+
def _llm_type(self) -> str:
|
135 |
+
return "engine_pipeline"
|
136 |
+
|
137 |
+
def _generate(
|
138 |
+
self,
|
139 |
+
prompts: List[str],
|
140 |
+
stop: Optional[List[str]] = None,
|
141 |
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
142 |
+
**kwargs: Any,
|
143 |
+
) -> LLMResult:
|
144 |
+
# List to hold all results
|
145 |
+
text_generations: List[str] = []
|
146 |
+
stop_strings = stop
|
147 |
+
for i in range(0, len(prompts), self.batch_size):
|
148 |
+
batch_prompts = prompts[i : i + self.batch_size]
|
149 |
+
responses = []
|
150 |
+
for p in batch_prompts:
|
151 |
+
output = self.engine.generate_yield_string_final(p, stop_strings=stop_strings, **kwargs)
|
152 |
+
responses.append(output[0])
|
153 |
+
for j, (prompt, response) in enumerate(zip(batch_prompts, responses)):
|
154 |
+
text = response
|
155 |
+
if text.startswith(prompt):
|
156 |
+
text = text[len(prompt):]
|
157 |
+
if stop is not None and any(x in text for x in stop):
|
158 |
+
text = text[:text.index(stop[0])]
|
159 |
+
# print(f">>{text}")
|
160 |
+
text_generations.append(text)
|
161 |
+
return LLMResult(
|
162 |
+
generations=[[Generation(text=text)] for text in text_generations]
|
163 |
+
)
|
164 |
+
|
165 |
+
|
166 |
+
class ChatAnyEnginePipeline(BaseChatModel):
|
167 |
+
"""
|
168 |
+
Wrapper for engine
|
169 |
+
"""
|
170 |
+
llm: AnyEnginePipeline
|
171 |
+
"""LLM, must be of type HuggingFaceTextGenInference, HuggingFaceEndpoint, or
|
172 |
+
HuggingFaceHub."""
|
173 |
+
system_message: SystemMessage = SystemMessage(content=DEFAULT_SYSTEM_PROMPT)
|
174 |
+
tokenizer: Any = None
|
175 |
+
model_id: Optional[str] = None
|
176 |
+
|
177 |
+
def __init__(self, **kwargs: Any):
|
178 |
+
super().__init__(**kwargs)
|
179 |
+
self.tokenizer = self.llm.engine.tokenizer
|
180 |
+
|
181 |
+
@root_validator()
|
182 |
+
def validate_llm(cls, values: dict) -> dict:
|
183 |
+
# if not isinstance(
|
184 |
+
# values["llm"],
|
185 |
+
# (HuggingFaceTextGenInference, HuggingFaceEndpoint, HuggingFaceHub),
|
186 |
+
# ):
|
187 |
+
# raise TypeError(
|
188 |
+
# "Expected llm to be one of HuggingFaceTextGenInference, "
|
189 |
+
# f"HuggingFaceEndpoint, HuggingFaceHub, received {type(values['llm'])}"
|
190 |
+
# )
|
191 |
+
return values
|
192 |
+
|
193 |
+
def _generate(
|
194 |
+
self,
|
195 |
+
messages: List[BaseMessage],
|
196 |
+
stop: Optional[List[str]] = None,
|
197 |
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
198 |
+
**kwargs: Any,
|
199 |
+
) -> ChatResult:
|
200 |
+
llm_input = self._to_chat_prompt(messages)
|
201 |
+
llm_result = self.llm._generate(
|
202 |
+
prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs
|
203 |
+
)
|
204 |
+
return self._to_chat_result(llm_result)
|
205 |
+
|
206 |
+
async def _agenerate(
|
207 |
+
self,
|
208 |
+
messages: List[BaseMessage],
|
209 |
+
stop: Optional[List[str]] = None,
|
210 |
+
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
211 |
+
**kwargs: Any,
|
212 |
+
) -> ChatResult:
|
213 |
+
llm_input = self._to_chat_prompt(messages)
|
214 |
+
llm_result = await self.llm._agenerate(
|
215 |
+
prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs
|
216 |
+
)
|
217 |
+
return self._to_chat_result(llm_result)
|
218 |
+
|
219 |
+
def _to_chat_prompt(
|
220 |
+
self,
|
221 |
+
messages: List[BaseMessage],
|
222 |
+
) -> str:
|
223 |
+
"""Convert a list of messages into a prompt format expected by wrapped LLM."""
|
224 |
+
if not messages:
|
225 |
+
raise ValueError("At least one HumanMessage must be provided!")
|
226 |
+
|
227 |
+
if not isinstance(messages[-1], HumanMessage):
|
228 |
+
raise ValueError("Last message must be a HumanMessage!")
|
229 |
+
|
230 |
+
messages_dicts = [self._to_chatml_format(m) for m in messages]
|
231 |
+
|
232 |
+
return self.tokenizer.apply_chat_template(
|
233 |
+
messages_dicts, tokenize=False, add_generation_prompt=True
|
234 |
+
)
|
235 |
+
|
236 |
+
def _to_chatml_format(self, message: BaseMessage) -> dict:
|
237 |
+
"""Convert LangChain message to ChatML format."""
|
238 |
+
|
239 |
+
if isinstance(message, SystemMessage):
|
240 |
+
role = "system"
|
241 |
+
elif isinstance(message, AIMessage):
|
242 |
+
role = "assistant"
|
243 |
+
elif isinstance(message, HumanMessage):
|
244 |
+
role = "user"
|
245 |
+
else:
|
246 |
+
raise ValueError(f"Unknown message type: {type(message)}")
|
247 |
+
|
248 |
+
return {"role": role, "content": message.content}
|
249 |
+
|
250 |
+
@staticmethod
|
251 |
+
def _to_chat_result(llm_result: LLMResult) -> ChatResult:
|
252 |
+
chat_generations = []
|
253 |
+
|
254 |
+
for g in llm_result.generations[0]:
|
255 |
+
chat_generation = ChatGeneration(
|
256 |
+
message=AIMessage(content=g.text), generation_info=g.generation_info
|
257 |
+
)
|
258 |
+
chat_generations.append(chat_generation)
|
259 |
+
|
260 |
+
return ChatResult(
|
261 |
+
generations=chat_generations, llm_output=llm_result.llm_output
|
262 |
+
)
|
263 |
+
|
264 |
+
def _resolve_model_id(self) -> None:
|
265 |
+
self.model_id = "debug"
|
266 |
+
|
267 |
+
@property
|
268 |
+
def _llm_type(self) -> str:
|
269 |
+
return "engine-chat-wrapper"
|
270 |
+
|
271 |
+
|
272 |
+
|
273 |
+
|
274 |
+
class TavilyInput(BaseModel):
|
275 |
+
"""Input for the Tavily tool."""
|
276 |
+
|
277 |
+
query: str = Field(description="search query to look up")
|
278 |
+
|
279 |
+
|
280 |
+
class NewTavilySearchAPIWrapper(TavilySearchAPIWrapper):
|
281 |
+
def clean_results(self, results: List[Dict]) -> List[Dict]:
|
282 |
+
"""Clean results from Tavily Search API."""
|
283 |
+
clean_results = []
|
284 |
+
for result in results:
|
285 |
+
clean_results.append(
|
286 |
+
{
|
287 |
+
"url": result["url"],
|
288 |
+
"content": result.get("raw_content", result["content"]),
|
289 |
+
}
|
290 |
+
)
|
291 |
+
return clean_results
|
292 |
+
|
293 |
+
|
294 |
+
class NewTavilySearchResults(BaseTool):
|
295 |
+
"""Tool that queries the Tavily Search API and gets back json."""
|
296 |
+
|
297 |
+
name: str = "tavily_search_results_json"
|
298 |
+
description: str = (
|
299 |
+
"A search engine optimized for comprehensive, accurate, and trusted results. "
|
300 |
+
"Useful for when you need to answer questions about current events. "
|
301 |
+
"Input should be a search query."
|
302 |
+
)
|
303 |
+
api_wrapper: NewTavilySearchAPIWrapper = Field(default_factory=NewTavilySearchAPIWrapper)
|
304 |
+
max_results: int = 5
|
305 |
+
args_schema: Type[BaseModel] = TavilyInput
|
306 |
+
|
307 |
+
def _run(
|
308 |
+
self,
|
309 |
+
query: str,
|
310 |
+
run_manager: Optional[CallbackManagerForToolRun] = None,
|
311 |
+
) -> Union[List[Dict], str]:
|
312 |
+
"""Use the tool."""
|
313 |
+
try:
|
314 |
+
return self.api_wrapper.results(
|
315 |
+
query,
|
316 |
+
self.max_results,
|
317 |
+
include_answer=True,
|
318 |
+
include_raw_content=True,
|
319 |
+
)
|
320 |
+
except Exception as e:
|
321 |
+
return repr(e)
|
322 |
+
|
323 |
+
async def _arun(
|
324 |
+
self,
|
325 |
+
query: str,
|
326 |
+
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
327 |
+
) -> Union[List[Dict], str]:
|
328 |
+
"""Use the tool asynchronously."""
|
329 |
+
try:
|
330 |
+
return await self.api_wrapper.results_async(
|
331 |
+
query,
|
332 |
+
self.max_results,
|
333 |
+
include_answer=True,
|
334 |
+
include_raw_content=True,
|
335 |
+
)
|
336 |
+
except Exception as e:
|
337 |
+
return repr(e)
|
338 |
+
|
339 |
+
|
340 |
+
FINAL_ANSWER_ACTION = "Final Answer:"
|
341 |
+
class LooseReActJsonSingleInputOutputParser(ReActJsonSingleInputOutputParser):
|
342 |
+
def parse(self, text: str) -> AgentAction | AgentFinish:
|
343 |
+
try:
|
344 |
+
return super().parse(text)
|
345 |
+
except OutputParserException as e:
|
346 |
+
output = text
|
347 |
+
if FINAL_ANSWER_ACTION in text:
|
348 |
+
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
|
349 |
+
return AgentFinish({"output": output}, text)
|
350 |
+
|
351 |
+
|
352 |
+
|
353 |
+
web_search_system_prompt = """You are a helpful, intelligent and respectful assistant with access to the Internet via the `tavily_search_results_json` search engine tool. \
|
354 |
+
You provide answers and responses as accurately as possible to the user queries and questions, using the tools available to you. \
|
355 |
+
You may use your own knowledge to reply to the user. However, if you are not confident about your knowledge, or you do not have the up-to-date knowledge and abilitiy to answer the questions, please use the search tool to query appropriately.
|
356 |
+
|
357 |
+
You understand that you have to craft an informative and search-engine-friendly query given the user's question for the engine to retrieve the most relevant information. \
|
358 |
+
You also understand that if the question is complex, you may need to reason your thoughts step by step, and may call the search engine multiple times if needed. However, you must use the least API call as possible!
|
359 |
+
If you have used the search engine, you should include in your final response citations of the website links you have retrieved.
|
360 |
+
|
361 |
+
To use the search engine, you must first speak out your thought, then follow by an action as a json blob and understand the observation, and produce the final answer. ALWAYS use the following format:
|
362 |
+
|
363 |
+
Question: the input user question you must answer
|
364 |
+
Thought: you should always think about what to do in the first step
|
365 |
+
Action:
|
366 |
+
```
|
367 |
+
{{
|
368 |
+
"action": "tavily_search_results_json",
|
369 |
+
"action_input": {{
|
370 |
+
"query": "search query 1"
|
371 |
+
}}
|
372 |
+
}}
|
373 |
+
```
|
374 |
+
Observation: the result of the search query 1 you just performed
|
375 |
+
Thought: you continue to think about what to query next, if necessary
|
376 |
+
Action:
|
377 |
+
```
|
378 |
+
{{
|
379 |
+
"action": "tavily_search_results_json",
|
380 |
+
"action_input": {{
|
381 |
+
"query": "search query 1"
|
382 |
+
}}
|
383 |
+
}}
|
384 |
+
```
|
385 |
+
Observation: the result of the search query 2 you just performed
|
386 |
+
... (this Thought/Action/Observation can repeat N times)
|
387 |
+
Thought: I now know the final answer
|
388 |
+
Final answer: the final answer to the original user's input question
|
389 |
+
Citation: ...
|
390 |
+
|
391 |
+
You are provided the following concrete examples, please study them and understand your task.
|
392 |
+
|
393 |
+
### Example 1
|
394 |
+
|
395 |
+
Question: Who is the wife of the current US president?
|
396 |
+
Thought: This question is twofold and a single search query may not suffice. First I need to find out who is the current US president, then I need to find out who his wife is.
|
397 |
+
Action:
|
398 |
+
```
|
399 |
+
{{
|
400 |
+
"action": "tavily_search_results_json",
|
401 |
+
"action_input": {{
|
402 |
+
"query": "Current US president"
|
403 |
+
}}
|
404 |
+
}}
|
405 |
+
```
|
406 |
+
Observation: [{{'url': 'https://en.wikipedia.org/wiki/Joe_Biden', 'content': 'Joe Biden is the current US president. He is the 46th US president.'}}]
|
407 |
+
Thought: Now I need to find out who is the wife of Joe Biden
|
408 |
+
Action:
|
409 |
+
```
|
410 |
+
{{
|
411 |
+
"action": "tavily_search_results_json",
|
412 |
+
"action_input": {{
|
413 |
+
"query": "Who is the wife of Joe Biden?"
|
414 |
+
}}
|
415 |
+
}}
|
416 |
+
```
|
417 |
+
Observation: [{{'url': 'https://en.wikipedia.org/wiki/Jill_Biden', 'content': 'The wife of Joe Biden is Jill Biden, who is an American educator.'}}]
|
418 |
+
Thought: I now know the final answer
|
419 |
+
Final answer: The wife of the current US president is Jill Biden.
|
420 |
+
Citation:
|
421 |
+
* https://en.wikipedia.org/wiki/Joe_Biden
|
422 |
+
* https://en.wikipedia.org/wiki/Jill_Biden
|
423 |
+
|
424 |
+
### Example 2
|
425 |
+
|
426 |
+
Question: What is langchain?
|
427 |
+
Thought: I think I should query the internet to understand what is langchain
|
428 |
+
Action:
|
429 |
+
```
|
430 |
+
{{
|
431 |
+
"action": "tavily_search_results_json",
|
432 |
+
"action_input": {{
|
433 |
+
"query": "what is langchain?"
|
434 |
+
}}
|
435 |
+
}}
|
436 |
+
```
|
437 |
+
Observation: [{{'url': 'https://python.langchain.com/docs/get_started/introduction/', 'content': 'LangChain is a framework for developing applications powered by large language models (LLMs).'}}]
|
438 |
+
Thought: I now know the final answer
|
439 |
+
Final answer: From my search query, Langchain is a framework for building applications using Large Language Models or LLMs.
|
440 |
+
Citation:
|
441 |
+
* https://python.langchain.com/docs/get_started/introduction/
|
442 |
+
|
443 |
+
|
444 |
+
Let's begin! Below is the question from the user.
|
445 |
+
"""
|
446 |
+
# FINAL REMARKS: The user may not speak English and may ask you questions in any language. Thus, while your Thought, Action and Observation is in English, your `Final answer` should be in the same language as the user's query.
|
447 |
+
|
448 |
+
|
449 |
+
"""
|
450 |
+
|
451 |
+
|
452 |
+
"""
|
453 |
+
|
454 |
+
|
455 |
+
|
456 |
+
def create_web_search_engine():
|
457 |
+
from ..globals import MODEL_ENGINE
|
458 |
+
# from langchain_community.tools.tavily_search import TavilySearchResults
|
459 |
+
from langchain_core.utils.function_calling import (
|
460 |
+
convert_to_openai_function,
|
461 |
+
convert_to_openai_tool,
|
462 |
+
)
|
463 |
+
from langchain_core.exceptions import OutputParserException
|
464 |
+
from langchain_core.agents import AgentAction, AgentFinish
|
465 |
+
web_search_llm = AnyEnginePipeline.from_engine(MODEL_ENGINE)
|
466 |
+
web_search_chat_model = ChatAnyEnginePipeline(llm=web_search_llm)
|
467 |
+
if "TAVILY_API_KEY" not in os.environ:
|
468 |
+
raise ValueError(f'TAVILY_API_KEY is not found to use websearch, please `export TAVILY_API_KEY=YOUR_TAVILY_API_KEY`')
|
469 |
+
|
470 |
+
tools = [NewTavilySearchResults(max_results=1)]
|
471 |
+
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
|
472 |
+
# tools = load_tools(["llm-math"], llm=web_search_llm)
|
473 |
+
# formatted_tools = render_text_description_and_args(tools)
|
474 |
+
prompt_template = ChatPromptTemplate.from_messages(
|
475 |
+
[
|
476 |
+
# (
|
477 |
+
# "system",
|
478 |
+
# web_search_system_prompt,
|
479 |
+
# ),
|
480 |
+
(
|
481 |
+
"human",
|
482 |
+
web_search_system_prompt + "\n{input}\n{agent_scratchpad}"
|
483 |
+
# "{input}\n\n{agent_scratchpad}"
|
484 |
+
)
|
485 |
+
]
|
486 |
+
)
|
487 |
+
prompt = prompt_template.partial(
|
488 |
+
tools=formatted_tools,
|
489 |
+
tool_names=", ".join([t.name for t in tools]),
|
490 |
+
)
|
491 |
+
chat_model_with_stop = web_search_chat_model.bind(stop=["\nObservation"])
|
492 |
+
agent = (
|
493 |
+
{
|
494 |
+
"input": lambda x: x["input"],
|
495 |
+
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
|
496 |
+
}
|
497 |
+
| prompt
|
498 |
+
| chat_model_with_stop
|
499 |
+
| LooseReActJsonSingleInputOutputParser()
|
500 |
+
)
|
501 |
+
# | ReActJsonSingleInputOutputParser()
|
502 |
+
|
503 |
+
# instantiate AgentExecutor
|
504 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
505 |
+
# agent_executor.invoke({"input": "What is langchain?"})
|
506 |
+
return web_search_llm, web_search_chat_model, agent_executor
|
507 |
+
|
508 |
+
|
509 |
+
|
510 |
+
|
511 |
+
|
512 |
+
# if LANGCHAIN_AVAILABLE:
|
513 |
+
# class LooseReActJsonSingleInputOutputParser(ReActJsonSingleInputOutputParser):
|
514 |
+
# def parse(self, text: str) -> AgentAction | AgentFinish:
|
515 |
+
# try:
|
516 |
+
# return super().parse(text)
|
517 |
+
# except OutputParserException as e:
|
518 |
+
# return AgentFinish({"output": text}, text)
|
519 |
+
|
520 |
+
|
521 |
+
# class ChatHuggingfaceFromLocalPipeline(ChatHuggingFace):
|
522 |
+
# @root_validator()
|
523 |
+
# def validate_llm(cls, values: dict) -> dict:
|
524 |
+
# return values
|
525 |
+
# def _resolve_model_id(self) -> None:
|
526 |
+
# """Resolve the model_id from the LLM's inference_server_url"""
|
527 |
+
# self.model_id = self.llm.model_id
|
528 |
+
|
529 |
+
|
530 |
+
# class NewHuggingfacePipeline(HuggingFacePipeline):
|
531 |
+
# bos_token = "<bos>"
|
532 |
+
# add_bos_token = True
|
533 |
+
|
534 |
+
# @classmethod
|
535 |
+
# def from_model_id(
|
536 |
+
# cls,
|
537 |
+
# model_id: str,
|
538 |
+
# task: str,
|
539 |
+
# backend: str = "default",
|
540 |
+
# device: Optional[int] = -1,
|
541 |
+
# device_map: Optional[str] = None,
|
542 |
+
# model_kwargs: Optional[dict] = None,
|
543 |
+
# pipeline_kwargs: Optional[dict] = None,
|
544 |
+
# batch_size: int = 2,
|
545 |
+
# model = None,
|
546 |
+
# **kwargs: Any,
|
547 |
+
# ) -> HuggingFacePipeline:
|
548 |
+
# """Construct the pipeline object from model_id and task."""
|
549 |
+
# try:
|
550 |
+
# from transformers import (
|
551 |
+
# AutoModelForCausalLM,
|
552 |
+
# AutoModelForSeq2SeqLM,
|
553 |
+
# AutoTokenizer,
|
554 |
+
# )
|
555 |
+
# from transformers import pipeline as hf_pipeline
|
556 |
+
|
557 |
+
# except ImportError:
|
558 |
+
# raise ValueError(
|
559 |
+
# "Could not import transformers python package. "
|
560 |
+
# "Please install it with `pip install transformers`."
|
561 |
+
# )
|
562 |
+
|
563 |
+
# _model_kwargs = model_kwargs or {}
|
564 |
+
# tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
|
565 |
+
# if model is None:
|
566 |
+
# try:
|
567 |
+
# if task == "text-generation":
|
568 |
+
# if backend == "openvino":
|
569 |
+
# try:
|
570 |
+
# from optimum.intel.openvino import OVModelForCausalLM
|
571 |
+
|
572 |
+
# except ImportError:
|
573 |
+
# raise ValueError(
|
574 |
+
# "Could not import optimum-intel python package. "
|
575 |
+
# "Please install it with: "
|
576 |
+
# "pip install 'optimum[openvino,nncf]' "
|
577 |
+
# )
|
578 |
+
# try:
|
579 |
+
# # use local model
|
580 |
+
# model = OVModelForCausalLM.from_pretrained(
|
581 |
+
# model_id, **_model_kwargs
|
582 |
+
# )
|
583 |
+
|
584 |
+
# except Exception:
|
585 |
+
# # use remote model
|
586 |
+
# model = OVModelForCausalLM.from_pretrained(
|
587 |
+
# model_id, export=True, **_model_kwargs
|
588 |
+
# )
|
589 |
+
# else:
|
590 |
+
# model = AutoModelForCausalLM.from_pretrained(
|
591 |
+
# model_id, **_model_kwargs
|
592 |
+
# )
|
593 |
+
# elif task in ("text2text-generation", "summarization", "translation"):
|
594 |
+
# if backend == "openvino":
|
595 |
+
# try:
|
596 |
+
# from optimum.intel.openvino import OVModelForSeq2SeqLM
|
597 |
+
|
598 |
+
# except ImportError:
|
599 |
+
# raise ValueError(
|
600 |
+
# "Could not import optimum-intel python package. "
|
601 |
+
# "Please install it with: "
|
602 |
+
# "pip install 'optimum[openvino,nncf]' "
|
603 |
+
# )
|
604 |
+
# try:
|
605 |
+
# # use local model
|
606 |
+
# model = OVModelForSeq2SeqLM.from_pretrained(
|
607 |
+
# model_id, **_model_kwargs
|
608 |
+
# )
|
609 |
+
|
610 |
+
# except Exception:
|
611 |
+
# # use remote model
|
612 |
+
# model = OVModelForSeq2SeqLM.from_pretrained(
|
613 |
+
# model_id, export=True, **_model_kwargs
|
614 |
+
# )
|
615 |
+
# else:
|
616 |
+
# model = AutoModelForSeq2SeqLM.from_pretrained(
|
617 |
+
# model_id, **_model_kwargs
|
618 |
+
# )
|
619 |
+
# else:
|
620 |
+
# raise ValueError(
|
621 |
+
# f"Got invalid task {task}, "
|
622 |
+
# f"currently only {VALID_TASKS} are supported"
|
623 |
+
# )
|
624 |
+
# except ImportError as e:
|
625 |
+
# raise ValueError(
|
626 |
+
# f"Could not load the {task} model due to missing dependencies."
|
627 |
+
# ) from e
|
628 |
+
# else:
|
629 |
+
# print(f'PIpeline skipping creation of model because model is given')
|
630 |
+
|
631 |
+
# if tokenizer.pad_token is None:
|
632 |
+
# tokenizer.pad_token_id = model.config.eos_token_id
|
633 |
+
|
634 |
+
# if (
|
635 |
+
# (
|
636 |
+
# getattr(model, "is_loaded_in_4bit", False)
|
637 |
+
# or getattr(model, "is_loaded_in_8bit", False)
|
638 |
+
# )
|
639 |
+
# and device is not None
|
640 |
+
# and backend == "default"
|
641 |
+
# ):
|
642 |
+
# logger.warning(
|
643 |
+
# f"Setting the `device` argument to None from {device} to avoid "
|
644 |
+
# "the error caused by attempting to move the model that was already "
|
645 |
+
# "loaded on the GPU using the Accelerate module to the same or "
|
646 |
+
# "another device."
|
647 |
+
# )
|
648 |
+
# device = None
|
649 |
+
|
650 |
+
# if (
|
651 |
+
# device is not None
|
652 |
+
# and importlib.util.find_spec("torch") is not None
|
653 |
+
# and backend == "default"
|
654 |
+
# ):
|
655 |
+
# import torch
|
656 |
+
|
657 |
+
# cuda_device_count = torch.cuda.device_count()
|
658 |
+
# if device < -1 or (device >= cuda_device_count):
|
659 |
+
# raise ValueError(
|
660 |
+
# f"Got device=={device}, "
|
661 |
+
# f"device is required to be within [-1, {cuda_device_count})"
|
662 |
+
# )
|
663 |
+
# if device_map is not None and device < 0:
|
664 |
+
# device = None
|
665 |
+
# if device is not None and device < 0 and cuda_device_count > 0:
|
666 |
+
# logger.warning(
|
667 |
+
# "Device has %d GPUs available. "
|
668 |
+
# "Provide device={deviceId} to `from_model_id` to use available"
|
669 |
+
# "GPUs for execution. deviceId is -1 (default) for CPU and "
|
670 |
+
# "can be a positive integer associated with CUDA device id.",
|
671 |
+
# cuda_device_count,
|
672 |
+
# )
|
673 |
+
# if device is not None and device_map is not None and backend == "openvino":
|
674 |
+
# logger.warning("Please set device for OpenVINO through: " "'model_kwargs'")
|
675 |
+
# if "trust_remote_code" in _model_kwargs:
|
676 |
+
# _model_kwargs = {
|
677 |
+
# k: v for k, v in _model_kwargs.items() if k != "trust_remote_code"
|
678 |
+
# }
|
679 |
+
# _pipeline_kwargs = pipeline_kwargs or {}
|
680 |
+
# pipeline = hf_pipeline(
|
681 |
+
# task=task,
|
682 |
+
# model=model,
|
683 |
+
# tokenizer=tokenizer,
|
684 |
+
# device=device,
|
685 |
+
# device_map=device_map,
|
686 |
+
# batch_size=batch_size,
|
687 |
+
# model_kwargs=_model_kwargs,
|
688 |
+
# **_pipeline_kwargs,
|
689 |
+
# )
|
690 |
+
# if pipeline.task not in VALID_TASKS:
|
691 |
+
# raise ValueError(
|
692 |
+
# f"Got invalid task {pipeline.task}, "
|
693 |
+
# f"currently only {VALID_TASKS} are supported"
|
694 |
+
# )
|
695 |
+
# return cls(
|
696 |
+
# pipeline=pipeline,
|
697 |
+
# model_id=model_id,
|
698 |
+
# model_kwargs=_model_kwargs,
|
699 |
+
# pipeline_kwargs=_pipeline_kwargs,
|
700 |
+
# batch_size=batch_size,
|
701 |
+
# **kwargs,
|
702 |
+
# )
|
703 |
+
|
704 |
+
# def _generate(
|
705 |
+
# self,
|
706 |
+
# prompts: List[str],
|
707 |
+
# stop: Optional[List[str]] = None,
|
708 |
+
# run_manager: Optional[CallbackManagerForLLMRun] = None,
|
709 |
+
# **kwargs: Any,
|
710 |
+
# ) -> LLMResult:
|
711 |
+
# # List to hold all results
|
712 |
+
# text_generations: List[str] = []
|
713 |
+
# pipeline_kwargs = kwargs.get("pipeline_kwargs", self.pipeline_kwargs)
|
714 |
+
# pipeline_kwargs = pipeline_kwargs if len(pipeline_kwargs) > 0 else self.pipeline_kwargs
|
715 |
+
# for i in range(0, len(prompts), self.batch_size):
|
716 |
+
# batch_prompts = prompts[i : i + self.batch_size]
|
717 |
+
# bos_token = self.pipeline.tokenizer.convert_ids_to_tokens(self.pipeline.tokenizer.bos_token_id)
|
718 |
+
# for i in range(len(batch_prompts)):
|
719 |
+
# if not batch_prompts[i].startswith(bos_token) and self.add_bos_token:
|
720 |
+
# batch_prompts[i] = bos_token + batch_prompts[i]
|
721 |
+
# # print(f'PROMPT: {stop=} {pipeline_kwargs=} ==================\n{batch_prompts[0]}\n==========================')
|
722 |
+
# # Process batch of prompts
|
723 |
+
# responses = self.pipeline(
|
724 |
+
# batch_prompts,
|
725 |
+
# **pipeline_kwargs,
|
726 |
+
# )
|
727 |
+
# # Process each response in the batch
|
728 |
+
# for j, (prompt, response) in enumerate(zip(batch_prompts, responses)):
|
729 |
+
# if isinstance(response, list):
|
730 |
+
# # if model returns multiple generations, pick the top one
|
731 |
+
# response = response[0]
|
732 |
+
# if self.pipeline.task == "text-generation":
|
733 |
+
# text = response["generated_text"]
|
734 |
+
# elif self.pipeline.task == "text2text-generation":
|
735 |
+
# text = response["generated_text"]
|
736 |
+
# elif self.pipeline.task == "summarization":
|
737 |
+
# text = response["summary_text"]
|
738 |
+
# elif self.pipeline.task in "translation":
|
739 |
+
# text = response["translation_text"]
|
740 |
+
# else:
|
741 |
+
# raise ValueError(
|
742 |
+
# f"Got invalid task {self.pipeline.task}, "
|
743 |
+
# f"currently only {VALID_TASKS} are supported"
|
744 |
+
# )
|
745 |
+
# # Append the processed text to results
|
746 |
+
# if text.startswith(prompt):
|
747 |
+
# text = text[len(prompt):]
|
748 |
+
# if stop is not None and any(x in text for x in stop):
|
749 |
+
# text = text[:text.index(stop[0])]
|
750 |
+
# # print(f">>{text}")
|
751 |
+
# text_generations.append(text)
|
752 |
+
# return LLMResult(
|
753 |
+
# generations=[[Generation(text=text)] for text in text_generations]
|
754 |
+
# )
|
755 |
+
|
multipurpose_chatbot/demos/websearch_chat_interface.py
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
try:
|
2 |
+
import spaces
|
3 |
+
def maybe_spaces_gpu(fn):
|
4 |
+
fn = spaces.GPU(fn)
|
5 |
+
return fn
|
6 |
+
except ModuleNotFoundError:
|
7 |
+
print(f'Cannot import hf `spaces` with `import spaces`.')
|
8 |
+
def maybe_spaces_gpu(fn):
|
9 |
+
return fn
|
10 |
+
import os
|
11 |
+
from gradio.themes import ThemeClass as Theme
|
12 |
+
import numpy as np
|
13 |
+
import argparse
|
14 |
+
import gradio as gr
|
15 |
+
from typing import Any, Iterator
|
16 |
+
from typing import Iterator, List, Optional, Tuple
|
17 |
+
import filelock
|
18 |
+
import glob
|
19 |
+
import json
|
20 |
+
import time
|
21 |
+
from gradio.routes import Request
|
22 |
+
from gradio.utils import SyncToAsyncIterator, async_iteration
|
23 |
+
from gradio.helpers import special_args
|
24 |
+
import anyio
|
25 |
+
from typing import AsyncGenerator, Callable, Literal, Union, cast, Generator
|
26 |
+
|
27 |
+
from gradio_client.documentation import document, set_documentation_group
|
28 |
+
from gradio.components import Button, Component
|
29 |
+
from gradio.events import Dependency, EventListenerMethod
|
30 |
+
from typing import List, Optional, Union, Dict, Tuple
|
31 |
+
from tqdm.auto import tqdm
|
32 |
+
from huggingface_hub import snapshot_download
|
33 |
+
|
34 |
+
|
35 |
+
import inspect
|
36 |
+
from typing import AsyncGenerator, Callable, Literal, Union, cast
|
37 |
+
|
38 |
+
import anyio
|
39 |
+
from gradio_client import utils as client_utils
|
40 |
+
from gradio_client.documentation import document
|
41 |
+
|
42 |
+
from gradio.blocks import Blocks
|
43 |
+
from gradio.components import (
|
44 |
+
Button,
|
45 |
+
Chatbot,
|
46 |
+
Component,
|
47 |
+
Markdown,
|
48 |
+
State,
|
49 |
+
Textbox,
|
50 |
+
get_component_instance,
|
51 |
+
)
|
52 |
+
from gradio.events import Dependency, on
|
53 |
+
from gradio.helpers import create_examples as Examples # noqa: N812
|
54 |
+
from gradio.helpers import special_args
|
55 |
+
from gradio.layouts import Accordion, Group, Row
|
56 |
+
from gradio.routes import Request
|
57 |
+
from gradio.themes import ThemeClass as Theme
|
58 |
+
from gradio.utils import SyncToAsyncIterator, async_iteration
|
59 |
+
|
60 |
+
|
61 |
+
from .base_demo import register_demo, get_demo_class, BaseDemo
|
62 |
+
from ..configs import (
|
63 |
+
SYSTEM_PROMPT,
|
64 |
+
MODEL_NAME,
|
65 |
+
MAX_TOKENS,
|
66 |
+
TEMPERATURE,
|
67 |
+
USE_PANEL,
|
68 |
+
CHATBOT_HEIGHT,
|
69 |
+
)
|
70 |
+
|
71 |
+
from ..globals import MODEL_ENGINE
|
72 |
+
|
73 |
+
from .chat_interface import (
|
74 |
+
CHAT_EXAMPLES,
|
75 |
+
DATETIME_FORMAT,
|
76 |
+
gradio_history_to_conversation_prompt,
|
77 |
+
gradio_history_to_openai_conversations,
|
78 |
+
get_datetime_string,
|
79 |
+
format_conversation,
|
80 |
+
chat_response_stream_multiturn_engine,
|
81 |
+
CustomizedChatInterface,
|
82 |
+
ChatInterfaceDemo
|
83 |
+
)
|
84 |
+
|
85 |
+
from .langchain_web_search import (
|
86 |
+
AnyEnginePipeline,
|
87 |
+
ChatAnyEnginePipeline,
|
88 |
+
create_web_search_engine,
|
89 |
+
)
|
90 |
+
|
91 |
+
|
92 |
+
web_search_llm = None
|
93 |
+
web_search_chat_model = None
|
94 |
+
web_search_engine = None
|
95 |
+
web_search_agent = None
|
96 |
+
|
97 |
+
|
98 |
+
@maybe_spaces_gpu
|
99 |
+
def chat_web_search_response_stream_multiturn_engine(
|
100 |
+
message: str,
|
101 |
+
history: List[Tuple[str, str]],
|
102 |
+
temperature: float,
|
103 |
+
max_tokens: int,
|
104 |
+
system_prompt: Optional[str] = SYSTEM_PROMPT,
|
105 |
+
):
|
106 |
+
# global web_search_engine, web_search_llm, web_search_chat_model, web_search_agent, MODEL_ENGINE
|
107 |
+
global web_search_llm, web_search_chat_model, agent_executor, MODEL_ENGINE
|
108 |
+
temperature = float(temperature)
|
109 |
+
# ! remove frequency_penalty
|
110 |
+
# frequency_penalty = float(frequency_penalty)
|
111 |
+
max_tokens = int(max_tokens)
|
112 |
+
message = message.strip()
|
113 |
+
if len(message) == 0:
|
114 |
+
raise gr.Error("The message cannot be empty!")
|
115 |
+
|
116 |
+
response_output = agent_executor.invoke({"input": message})
|
117 |
+
print(response_output)
|
118 |
+
response = response_output['output']
|
119 |
+
|
120 |
+
full_prompt = gradio_history_to_conversation_prompt(message.strip(), history=history, system_prompt=system_prompt)
|
121 |
+
num_tokens = len(MODEL_ENGINE.tokenizer.encode(full_prompt))
|
122 |
+
yield response, num_tokens
|
123 |
+
|
124 |
+
# # ! skip safety
|
125 |
+
# if DATETIME_FORMAT in system_prompt:
|
126 |
+
# # ! This sometime works sometimes dont
|
127 |
+
# system_prompt = system_prompt.format(cur_datetime=get_datetime_string())
|
128 |
+
# full_prompt = gradio_history_to_conversation_prompt(message.strip(), history=history, system_prompt=system_prompt)
|
129 |
+
# # ! length checked
|
130 |
+
# num_tokens = len(MODEL_ENGINE.tokenizer.encode(full_prompt))
|
131 |
+
# if num_tokens >= MODEL_ENGINE.max_position_embeddings - 128:
|
132 |
+
# raise gr.Error(f"Conversation or prompt is too long ({num_tokens} toks), please clear the chatbox or try shorter input.")
|
133 |
+
# print(full_prompt)
|
134 |
+
# outputs = None
|
135 |
+
# response = None
|
136 |
+
# num_tokens = -1
|
137 |
+
# for j, outputs in enumerate(MODEL_ENGINE.generate_yield_string(
|
138 |
+
# prompt=full_prompt,
|
139 |
+
# temperature=temperature,
|
140 |
+
# max_tokens=max_tokens,
|
141 |
+
# )):
|
142 |
+
# if isinstance(outputs, tuple):
|
143 |
+
# response, num_tokens = outputs
|
144 |
+
# else:
|
145 |
+
# response, num_tokens = outputs, -1
|
146 |
+
# yield response, num_tokens
|
147 |
+
|
148 |
+
# print(format_conversation(history + [[message, response]]))
|
149 |
+
|
150 |
+
# if response is not None:
|
151 |
+
# yield response, num_tokens
|
152 |
+
|
153 |
+
|
154 |
+
|
155 |
+
|
156 |
+
|
157 |
+
@register_demo
|
158 |
+
class WebSearchChatInterfaceDemo(BaseDemo):
|
159 |
+
@property
|
160 |
+
def tab_name(self):
|
161 |
+
return "Web Search"
|
162 |
+
|
163 |
+
def create_demo(
|
164 |
+
self,
|
165 |
+
title: str | None = None,
|
166 |
+
description: str | None = None,
|
167 |
+
**kwargs
|
168 |
+
) -> gr.Blocks:
|
169 |
+
global web_search_llm, web_search_chat_model, agent_executor
|
170 |
+
system_prompt = kwargs.get("system_prompt", SYSTEM_PROMPT)
|
171 |
+
max_tokens = kwargs.get("max_tokens", MAX_TOKENS)
|
172 |
+
temperature = kwargs.get("temperature", TEMPERATURE)
|
173 |
+
model_name = kwargs.get("model_name", MODEL_NAME)
|
174 |
+
# frequence_penalty = FREQUENCE_PENALTY
|
175 |
+
# presence_penalty = PRESENCE_PENALTY
|
176 |
+
# create_web_search_engine()
|
177 |
+
description = description or "At the moment, Web search is only **SINGLE TURN**, only works well in **English** and may respond unnaturally!"
|
178 |
+
|
179 |
+
web_search_llm, web_search_chat_model, agent_executor = create_web_search_engine()
|
180 |
+
|
181 |
+
demo_chat = CustomizedChatInterface(
|
182 |
+
chat_web_search_response_stream_multiturn_engine,
|
183 |
+
chatbot=gr.Chatbot(
|
184 |
+
label=model_name,
|
185 |
+
bubble_full_width=False,
|
186 |
+
latex_delimiters=[
|
187 |
+
{ "left": "$", "right": "$", "display": False},
|
188 |
+
{ "left": "$$", "right": "$$", "display": True},
|
189 |
+
],
|
190 |
+
show_copy_button=True,
|
191 |
+
layout="panel" if USE_PANEL else "bubble",
|
192 |
+
height=CHATBOT_HEIGHT,
|
193 |
+
),
|
194 |
+
textbox=gr.Textbox(placeholder='Type message', lines=1, max_lines=128, min_width=200, scale=8),
|
195 |
+
submit_btn=gr.Button(value='Submit', variant="primary", scale=0),
|
196 |
+
title=title,
|
197 |
+
description=description,
|
198 |
+
additional_inputs=[
|
199 |
+
gr.Number(value=temperature, label='Temperature (higher -> more random)'),
|
200 |
+
gr.Number(value=max_tokens, label='Max generated tokens (increase if want more generation)'),
|
201 |
+
# gr.Number(value=frequence_penalty, label='Frequency penalty (> 0 encourage new tokens over repeated tokens)'),
|
202 |
+
# gr.Number(value=presence_penalty, label='Presence penalty (> 0 encourage new tokens, < 0 encourage existing tokens)'),
|
203 |
+
gr.Textbox(value=system_prompt, label='System prompt', lines=4, interactive=False)
|
204 |
+
],
|
205 |
+
examples=[
|
206 |
+
["What is Langchain?"],
|
207 |
+
["Give me latest news about Lawrence Wong."],
|
208 |
+
['What did Jerome Powell say today?'],
|
209 |
+
['What is the best model on the LMSys leaderboard?'],
|
210 |
+
['Where does Messi play right now?'],
|
211 |
+
],
|
212 |
+
# ] + CHAT_EXAMPLES,
|
213 |
+
cache_examples=False
|
214 |
+
)
|
215 |
+
return demo_chat
|
216 |
+
|
217 |
+
|
218 |
+
"""
|
219 |
+
run
|
220 |
+
|
221 |
+
|
222 |
+
export BACKEND=mlx
|
223 |
+
export DEMOS=WebSearchChatInterfaceDemo
|
224 |
+
python app.py
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
"""
|
multipurpose_chatbot/engines/base_engine.py
CHANGED
@@ -19,9 +19,31 @@ class BaseEngine(object):
|
|
19 |
def tokenizer(self):
|
20 |
raise NotImplementedError
|
21 |
|
|
|
|
|
|
|
|
|
22 |
def load_model(self, ):
|
23 |
raise NotImplementedError
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
def apply_chat_template(self, conversations, add_generation_prompt: bool, add_special_tokens=False, **kwargs) -> str:
|
26 |
"""
|
27 |
return string convo, add_special_tokens should be added later
|
|
|
19 |
def tokenizer(self):
|
20 |
raise NotImplementedError
|
21 |
|
22 |
+
@property
|
23 |
+
def processor(self):
|
24 |
+
raise NotImplementedError
|
25 |
+
|
26 |
def load_model(self, ):
|
27 |
raise NotImplementedError
|
28 |
|
29 |
+
def generate_yield_string(self, prompt, *args, **kwargs):
|
30 |
+
raise NotImplementedError
|
31 |
+
|
32 |
+
def generate_yield_string_final(self, *args, **kwargs):
|
33 |
+
output = None
|
34 |
+
for out in self.generate_yield_string(*args, **kwargs):
|
35 |
+
output = out
|
36 |
+
return output
|
37 |
+
|
38 |
+
def generate_yield_string(self, prompt, *args, **kwargs):
|
39 |
+
raise NotImplementedError
|
40 |
+
|
41 |
+
def generate_yield_string_final(self, *args, **kwargs):
|
42 |
+
output = None
|
43 |
+
for out in self.generate_yield_string(*args, **kwargs):
|
44 |
+
output = out
|
45 |
+
return output
|
46 |
+
|
47 |
def apply_chat_template(self, conversations, add_generation_prompt: bool, add_special_tokens=False, **kwargs) -> str:
|
48 |
"""
|
49 |
return string convo, add_special_tokens should be added later
|
multipurpose_chatbot/engines/transformers_engine.py
CHANGED
@@ -66,9 +66,13 @@ from ..configs import (
|
|
66 |
MODEL_PATH,
|
67 |
DTYPE,
|
68 |
DEVICE,
|
|
|
|
|
69 |
)
|
70 |
|
71 |
|
|
|
|
|
72 |
def setup_seed(seed):
|
73 |
if seed == -1:
|
74 |
return
|
@@ -405,11 +409,6 @@ class NewGenerationMixin(GenerationMixin):
|
|
405 |
|
406 |
|
407 |
|
408 |
-
from ..configs import (
|
409 |
-
STREAM_CHECK_MULTIPLE,
|
410 |
-
STREAM_YIELD_MULTIPLE,
|
411 |
-
)
|
412 |
-
|
413 |
|
414 |
BLOCK_LANGS = str(os.environ.get("BLOCK_LANGS", ""))
|
415 |
BLOCK_LANGS = [x.strip() for x in BLOCK_LANGS.strip().split(";")] if len(BLOCK_LANGS.strip()) > 0 else []
|
|
|
66 |
MODEL_PATH,
|
67 |
DTYPE,
|
68 |
DEVICE,
|
69 |
+
STREAM_CHECK_MULTIPLE,
|
70 |
+
STREAM_YIELD_MULTIPLE,
|
71 |
)
|
72 |
|
73 |
|
74 |
+
|
75 |
+
|
76 |
def setup_seed(seed):
|
77 |
if seed == -1:
|
78 |
return
|
|
|
409 |
|
410 |
|
411 |
|
|
|
|
|
|
|
|
|
|
|
412 |
|
413 |
BLOCK_LANGS = str(os.environ.get("BLOCK_LANGS", ""))
|
414 |
BLOCK_LANGS = [x.strip() for x in BLOCK_LANGS.strip().split(";")] if len(BLOCK_LANGS.strip()) > 0 else []
|