Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -127,7 +127,7 @@ def compress_data(c,purpose, task, history, result):
|
|
127 |
resp = run_gpt(
|
128 |
COMPRESS_DATA_PROMPT,
|
129 |
stop_tokens=["observation:", "task:", "action:", "thought:"],
|
130 |
-
max_tokens=
|
131 |
seed=seed,
|
132 |
purpose=purpose,
|
133 |
task=task,
|
@@ -162,7 +162,7 @@ def compress_history(purpose, task, history):
|
|
162 |
resp = run_gpt(
|
163 |
COMPRESS_HISTORY_PROMPT,
|
164 |
stop_tokens=["observation:", "task:", "action:", "thought:"],
|
165 |
-
max_tokens=
|
166 |
seed=random.randint(1,1000000000),
|
167 |
purpose=purpose,
|
168 |
task=task,
|
@@ -176,7 +176,7 @@ def call_main(purpose, task, history, action_input, result):
|
|
176 |
resp = run_gpt(
|
177 |
FINDER,
|
178 |
stop_tokens=["observation:", "task:"],
|
179 |
-
max_tokens=
|
180 |
seed=random.randint(1,1000000000),
|
181 |
purpose=purpose,
|
182 |
task=task,
|
@@ -230,25 +230,13 @@ def search_all(url):
|
|
230 |
def find_all(purpose,task,history, url, result):
|
231 |
return_list=[]
|
232 |
print (url)
|
233 |
-
#if action_input in query.tasks:
|
234 |
print (f"trying URL:: {url}")
|
235 |
try:
|
236 |
if url != "" and url != None:
|
237 |
-
#rawp = []
|
238 |
out = []
|
239 |
source = requests.get(url)
|
240 |
-
#source = urllib.request.urlopen(url).read()
|
241 |
soup = bs4.BeautifulSoup(source.content,'lxml')
|
242 |
-
|
243 |
-
print(soup.title)
|
244 |
-
# get attributes:
|
245 |
-
print(soup.title.name)
|
246 |
-
# get values:
|
247 |
-
#print(soup.title.string)
|
248 |
-
# beginning navigation:
|
249 |
-
#print(soup.title.parent.name)
|
250 |
-
#rawp.append([tag.name for tag in soup.find_all()] )
|
251 |
-
#print([tag.name for tag in soup.find_all()])
|
252 |
rawp=(f'RAW TEXT RETURNED: {soup.text}')
|
253 |
cnt=0
|
254 |
cnt+=len(rawp)
|
@@ -256,16 +244,11 @@ def find_all(purpose,task,history, url, result):
|
|
256 |
out.append("HTML fragments: ")
|
257 |
q=("a","p","span","content","article")
|
258 |
for p in soup.find_all("a"):
|
259 |
-
#cnt+=len(p.string)
|
260 |
-
#cnt+=len(p)
|
261 |
-
|
262 |
out.append([{"LINK TITLE":p.get('title'),"URL":p.get('href'),"STRING":p.string}])
|
263 |
c=0
|
264 |
out = str(out)
|
265 |
rl = len(out)
|
266 |
-
#print (cnt)
|
267 |
print(f'rl:: {rl}')
|
268 |
-
#for ea in out:
|
269 |
for i in str(out):
|
270 |
if i == " " or i=="," or i=="\n" or i=="/" or i=="." or i=="<":
|
271 |
c +=1
|
@@ -277,8 +260,8 @@ def find_all(purpose,task,history, url, result):
|
|
277 |
rawp = out
|
278 |
result += rawp
|
279 |
|
280 |
-
print (rawp)
|
281 |
-
print (f'out:: {out}')
|
282 |
history += "observation: the search results are:\n {}\n".format(rawp)
|
283 |
task = "compile report or complete?"
|
284 |
return "MAIN", None, history, task, result
|
@@ -358,7 +341,11 @@ def run(purpose,history):
|
|
358 |
action_input,
|
359 |
result
|
360 |
)
|
361 |
-
|
|
|
|
|
|
|
|
|
362 |
if action_name == "COMPLETE":
|
363 |
break
|
364 |
return result
|
|
|
127 |
resp = run_gpt(
|
128 |
COMPRESS_DATA_PROMPT,
|
129 |
stop_tokens=["observation:", "task:", "action:", "thought:"],
|
130 |
+
max_tokens=2048,
|
131 |
seed=seed,
|
132 |
purpose=purpose,
|
133 |
task=task,
|
|
|
162 |
resp = run_gpt(
|
163 |
COMPRESS_HISTORY_PROMPT,
|
164 |
stop_tokens=["observation:", "task:", "action:", "thought:"],
|
165 |
+
max_tokens=1024,
|
166 |
seed=random.randint(1,1000000000),
|
167 |
purpose=purpose,
|
168 |
task=task,
|
|
|
176 |
resp = run_gpt(
|
177 |
FINDER,
|
178 |
stop_tokens=["observation:", "task:"],
|
179 |
+
max_tokens=2048,
|
180 |
seed=random.randint(1,1000000000),
|
181 |
purpose=purpose,
|
182 |
task=task,
|
|
|
230 |
def find_all(purpose,task,history, url, result):
|
231 |
return_list=[]
|
232 |
print (url)
|
|
|
233 |
print (f"trying URL:: {url}")
|
234 |
try:
|
235 |
if url != "" and url != None:
|
|
|
236 |
out = []
|
237 |
source = requests.get(url)
|
|
|
238 |
soup = bs4.BeautifulSoup(source.content,'lxml')
|
239 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
240 |
rawp=(f'RAW TEXT RETURNED: {soup.text}')
|
241 |
cnt=0
|
242 |
cnt+=len(rawp)
|
|
|
244 |
out.append("HTML fragments: ")
|
245 |
q=("a","p","span","content","article")
|
246 |
for p in soup.find_all("a"):
|
|
|
|
|
|
|
247 |
out.append([{"LINK TITLE":p.get('title'),"URL":p.get('href'),"STRING":p.string}])
|
248 |
c=0
|
249 |
out = str(out)
|
250 |
rl = len(out)
|
|
|
251 |
print(f'rl:: {rl}')
|
|
|
252 |
for i in str(out):
|
253 |
if i == " " or i=="," or i=="\n" or i=="/" or i=="." or i=="<":
|
254 |
c +=1
|
|
|
260 |
rawp = out
|
261 |
result += rawp
|
262 |
|
263 |
+
#print (rawp)
|
264 |
+
#print (f'out:: {out}')
|
265 |
history += "observation: the search results are:\n {}\n".format(rawp)
|
266 |
task = "compile report or complete?"
|
267 |
return "MAIN", None, history, task, result
|
|
|
341 |
action_input,
|
342 |
result
|
343 |
)
|
344 |
+
|
345 |
+
if not result:
|
346 |
+
yield "More Searching..."
|
347 |
+
else:
|
348 |
+
yield result
|
349 |
if action_name == "COMPLETE":
|
350 |
break
|
351 |
return result
|