JoPmt commited on
Commit
851d144
1 Parent(s): 7e3715e

Upload web_surfer (2).py

Browse files
Files changed (1) hide show
  1. web_surfer (2).py +205 -0
web_surfer (2).py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Shamelessly stolen from Microsoft Autogen team: thanks to them for this great resource!
2
+ # https://github.com/microsoft/autogen/blob/gaia_multiagent_v01_march_1st/autogen/browser_utils.py
3
+ import os
4
+ import re
5
+ from typing import Tuple, Optional
6
+ from transformers.agents.agents import Tool
7
+ import time
8
+ ##from dotenv import load_dotenv
9
+ import requests
10
+ from pypdf import PdfReader
11
+ from markdownify import markdownify as md
12
+ import mimetypes
13
+ from browser import SimpleTextBrowser
14
+
15
+ ##load_dotenv(override=True)
16
+
17
+ user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0"
18
+
19
+ browser_config = {
20
+ "viewport_size": 1024 * 5,
21
+ "downloads_folder": "coding",
22
+ "request_kwargs": {
23
+ "headers": {"User-Agent": user_agent},
24
+ "timeout": 300,
25
+ },
26
+ }
27
+
28
+ ##browser_config["serpapi_key"] = os.getenv("SERP_KEY")
29
+
30
+ browser = SimpleTextBrowser(**browser_config)
31
+
32
+
33
+ # Helper functions
34
+ def _browser_state() -> Tuple[str, str]:
35
+ header = f"Address: {browser.address}\n"
36
+ if browser.page_title is not None:
37
+ header += f"Title: {browser.page_title}\n"
38
+
39
+ current_page = browser.viewport_current_page
40
+ total_pages = len(browser.viewport_pages)
41
+
42
+ address = browser.address
43
+ for i in range(len(browser.history)-2,-1,-1): # Start from the second last
44
+ if browser.history[i][0] == address:
45
+ header += f"You previously visited this page {round(time.time() - browser.history[i][1])} seconds ago.\n"
46
+ break
47
+
48
+ header += f"Viewport position: Showing page {current_page+1} of {total_pages}.\n"
49
+ return (header, browser.viewport)
50
+
51
+
52
+ class SearchInformationTool(Tool):
53
+ name="informational_web_search"
54
+ description="Perform an INFORMATIONAL web search query then return the search results."
55
+ inputs = {
56
+ "query": {
57
+ "type": "text",
58
+ "description": "The informational web search query to perform."
59
+ }
60
+ }
61
+ inputs["filter_year"]= {
62
+ "type": "text",
63
+ "description": "[Optional parameter]: filter the search results to only include pages from a specific year. For example, '2020' will only include pages from 2020. Make sure to use this parameter if you're trying to search for articles from a specific date!"
64
+ }
65
+ output_type = "text"
66
+
67
+ def forward(self, query: str, filter_year: Optional[int] = None) -> str:
68
+ browser.visit_page(f"google: {query}", filter_year=filter_year)
69
+ header, content = _browser_state()
70
+ return header.strip() + "\n=======================\n" + content
71
+
72
+
73
+ class NavigationalSearchTool(Tool):
74
+ name="navigational_web_search"
75
+ description="Perform a NAVIGATIONAL web search query then immediately navigate to the top result. Useful, for example, to navigate to a particular Wikipedia article or other known destination. Equivalent to Google's \"I'm Feeling Lucky\" button."
76
+ inputs = {"query": {"type": "text", "description": "The navigational web search query to perform."}}
77
+ output_type = "text"
78
+
79
+ def forward(self, query: str) -> str:
80
+ browser.visit_page(f"google: {query}")
81
+
82
+ # Extract the first line
83
+ m = re.search(r"\[.*?\]\((http.*?)\)", browser.page_content)
84
+ if m:
85
+ browser.visit_page(m.group(1))
86
+
87
+ # Return where we ended up
88
+ header, content = _browser_state()
89
+ return header.strip() + "\n=======================\n" + content
90
+
91
+
92
+ class VisitTool(Tool):
93
+ name="visit_page"
94
+ description="Visit a webpage at a given URL and return its text."
95
+ inputs = {"url": {"type": "text", "description": "The relative or absolute url of the webapge to visit."}}
96
+ output_type = "text"
97
+
98
+ def forward(self, url: str) -> str:
99
+ browser.visit_page(url)
100
+ header, content = _browser_state()
101
+ return header.strip() + "\n=======================\n" + content
102
+
103
+
104
+ class DownloadTool(Tool):
105
+ name="download_file"
106
+ description="""
107
+ Download a file at a given URL. The file should be of this format: [".xlsx", ".pptx", ".wav", ".mp3", ".png", ".docx"]
108
+ After using this tool, for further inspection of this page you should return the download path to your manager via final_answer, and they will be able to inspect it.
109
+ DO NOT use this tool for .pdf or .txt or .htm files: for these types of files use visit_page with the file url instead."""
110
+ inputs = {"url": {"type": "text", "description": "The relative or absolute url of the file to be downloaded."}}
111
+ output_type = "text"
112
+
113
+ def forward(self, url: str) -> str:
114
+ if "arxiv" in url:
115
+ url = url.replace("abs", "pdf")
116
+ response = requests.get(url)
117
+ content_type = response.headers.get("content-type", "")
118
+ extension = mimetypes.guess_extension(content_type)
119
+ if extension and isinstance(extension, str):
120
+ new_path = f"./downloads/file{extension}"
121
+ else:
122
+ new_path = "./downloads/file.object"
123
+
124
+ with open(new_path, "wb") as f:
125
+ f.write(response.content)
126
+
127
+ if "pdf" in extension or "txt" in extension or "htm" in extension:
128
+ raise Exception("Do not use this tool for pdf or txt or html files: use visit_page instead.")
129
+
130
+ return f"File was downloaded and saved under path {new_path}."
131
+
132
+
133
+ class PageUpTool(Tool):
134
+ name="page_up"
135
+ description="Scroll the viewport UP one page-length in the current webpage and return the new viewport content."
136
+ output_type = "text"
137
+
138
+ def forward(self) -> str:
139
+ browser.page_up()
140
+ header, content = _browser_state()
141
+ return header.strip() + "\n=======================\n" + content
142
+
143
+ class ArchiveSearchTool(Tool):
144
+ name="find_archived_url"
145
+ description="Given a url, searches the Wayback Machine and returns the archived version of the url that's closest in time to the desired date."
146
+ inputs={
147
+ "url": {"type": "text", "description": "The url you need the archive for."},
148
+ "date": {"type": "text", "description": "The date that you want to find the archive for. Give this date in the format 'YYYYMMDD', for instance '27 June 2008' is written as '20080627'."}
149
+ }
150
+ output_type = "text"
151
+
152
+ def forward(self, url, date) -> str:
153
+ archive_url = f"https://archive.org/wayback/available?url={url}&timestamp={date}"
154
+ response = requests.get(archive_url).json()
155
+ try:
156
+ closest = response["archived_snapshots"]["closest"]
157
+ except:
158
+ raise Exception(f"Your url was not archived on Wayback Machine, try a different url.")
159
+ target_url = closest["url"]
160
+ browser.visit_page(target_url)
161
+ header, content = _browser_state()
162
+ return f"Web archive for url {url}, snapshot taken at date {closest['timestamp'][:8]}:\n" + header.strip() + "\n=======================\n" + content
163
+
164
+
165
+ class PageDownTool(Tool):
166
+ name="page_down"
167
+ description="Scroll the viewport DOWN one page-length in the current webpage and return the new viewport content."
168
+ output_type = "text"
169
+
170
+ def forward(self, ) -> str:
171
+ browser.page_down()
172
+ header, content = _browser_state()
173
+ return header.strip() + "\n=======================\n" + content
174
+
175
+
176
+ class FinderTool(Tool):
177
+ name="find_on_page_ctrl_f"
178
+ description="Scroll the viewport to the first occurrence of the search string. This is equivalent to Ctrl+F."
179
+ inputs = {"search_string": {"type": "text", "description": "The string to search for on the page. This search string supports wildcards like '*'" }}
180
+ output_type = "text"
181
+
182
+ def forward(self, search_string: str) -> str:
183
+ find_result = browser.find_on_page(search_string)
184
+ header, content = _browser_state()
185
+
186
+ if find_result is None:
187
+ return header.strip() + f"\n=======================\nThe search string '{search_string}' was not found on this page."
188
+ else:
189
+ return header.strip() + "\n=======================\n" + content
190
+
191
+
192
+ class FindNextTool(Tool):
193
+ name="find_next"
194
+ description="Scroll the viewport to next occurrence of the search string. This is equivalent to finding the next match in a Ctrl+F search."
195
+ inputs = {}
196
+ output_type = "text"
197
+
198
+ def forward(self, ) -> str:
199
+ find_result = browser.find_next()
200
+ header, content = _browser_state()
201
+
202
+ if find_result is None:
203
+ return header.strip() + "\n=======================\nThe search string was not found on this page."
204
+ else:
205
+ return header.strip() + "\n=======================\n" + content