Niansuh commited on
Commit
bbc2ac1
1 Parent(s): dfd57a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +111 -45
app.py CHANGED
@@ -9,7 +9,6 @@ import urllib.parse
9
  import asyncio
10
  import aiohttp
11
  import threading
12
- from typing import List
13
 
14
  app = FastAPI()
15
 
@@ -28,12 +27,20 @@ async def search(
28
  timelimit: Optional[str] = None,
29
  safesearch: str = "moderate",
30
  region: str = "wt-wt",
31
- backend: str = "api"
 
32
  ):
33
  """Perform a text search."""
34
  try:
35
- with WEBS() as webs:
36
- results = webs.text(keywords=q, region=region, safesearch=safesearch, timelimit=timelimit, backend=backend, max_results=max_results)
 
 
 
 
 
 
 
37
  return JSONResponse(content=jsonable_encoder(results))
38
  except Exception as e:
39
  raise HTTPException(status_code=500, detail=f"Error during search: {e}")
@@ -49,12 +56,24 @@ async def images(
49
  color: Optional[str] = None,
50
  type_image: Optional[str] = None,
51
  layout: Optional[str] = None,
52
- license_image: Optional[str] = None
 
53
  ):
54
  """Perform an image search."""
55
  try:
56
- with WEBS() as webs:
57
- results = webs.images(keywords=q, region=region, safesearch=safesearch, timelimit=timelimit, size=size, color=color, type_image=type_image, layout=layout, license_image=license_image, max_results=max_results)
 
 
 
 
 
 
 
 
 
 
 
58
  return JSONResponse(content=jsonable_encoder(results))
59
  except Exception as e:
60
  raise HTTPException(status_code=500, detail=f"Error during image search: {e}")
@@ -68,32 +87,51 @@ async def videos(
68
  timelimit: Optional[str] = None,
69
  resolution: Optional[str] = None,
70
  duration: Optional[str] = None,
71
- license_videos: Optional[str] = None
 
72
  ):
73
  """Perform a video search."""
74
  try:
75
- with WEBS() as webs:
76
- results = webs.videos(keywords=q, region=region, safesearch=safesearch, timelimit=timelimit, resolution=resolution, duration=duration, license_videos=license_videos, max_results=max_results)
 
 
 
 
 
 
 
 
 
77
  return JSONResponse(content=jsonable_encoder(results))
78
  except Exception as e:
79
  raise HTTPException(status_code=500, detail=f"Error during video search: {e}")
80
 
 
81
  @app.get("/api/news")
82
  async def news(
83
  q: str,
84
  max_results: int = 10,
85
  safesearch: str = "moderate",
86
  region: str = "wt-wt",
87
- timelimit: Optional[str] = None
 
88
  ):
89
  """Perform a news search."""
90
  try:
91
- with WEBS() as webs:
92
- results = webs.news(keywords=q, region=region, safesearch=safesearch, timelimit=timelimit, max_results=max_results)
 
 
 
 
 
 
93
  return JSONResponse(content=jsonable_encoder(results))
94
  except Exception as e:
95
  raise HTTPException(status_code=500, detail=f"Error during news search: {e}")
96
 
 
97
  @app.get("/api/llm")
98
  async def llm_chat(
99
  model: str,
@@ -114,20 +152,20 @@ async def llm_chat(
114
 
115
 
116
  @app.get("/api/answers")
117
- async def answers(q: str):
118
  """Get instant answers for a query."""
119
  try:
120
- with WEBS() as webs:
121
  results = webs.answers(keywords=q)
122
  return JSONResponse(content=jsonable_encoder(results))
123
  except Exception as e:
124
  raise HTTPException(status_code=500, detail=f"Error getting instant answers: {e}")
125
 
126
  @app.get("/api/suggestions")
127
- async def suggestions(q: str, region: str = "wt-wt"):
128
  """Get search suggestions for a query."""
129
  try:
130
- with WEBS() as webs:
131
  results = webs.suggestions(keywords=q, region=region)
132
  return JSONResponse(content=jsonable_encoder(results))
133
  except Exception as e:
@@ -136,11 +174,12 @@ async def suggestions(q: str, region: str = "wt-wt"):
136
  @app.get("/api/chat")
137
  async def chat(
138
  q: str,
139
- model: str = "gpt-3.5"
 
140
  ):
141
  """Perform a text search."""
142
  try:
143
- with WEBS() as webs:
144
  results = webs.chat(keywords=q, model=model)
145
  return JSONResponse(content=jsonable_encoder(results))
146
  except Exception as e:
@@ -156,11 +195,12 @@ def extract_text_from_webpage(html_content):
156
  visible_text = soup.get_text(strip=True)
157
  return visible_text
158
 
159
- async def fetch_and_extract(url, max_chars):
160
  """Fetches a URL and extracts text asynchronously."""
 
161
  async with aiohttp.ClientSession() as session:
162
  try:
163
- async with session.get(url, headers={"User-Agent": "Mozilla/5.0"}) as response:
164
  response.raise_for_status()
165
  html_content = await response.text()
166
  visible_text = extract_text_from_webpage(html_content)
@@ -175,10 +215,11 @@ async def fetch_and_extract(url, max_chars):
175
  async def web_extract(
176
  url: str,
177
  max_chars: int = 12000, # Adjust based on token limit
 
178
  ):
179
  """Extracts text from a given URL."""
180
  try:
181
- result = await fetch_and_extract(url, max_chars)
182
  return {"url": url, "text": result["text"]}
183
  except requests.exceptions.RequestException as e:
184
  raise HTTPException(status_code=500, detail=f"Error fetching or processing URL: {e}")
@@ -192,19 +233,20 @@ async def web_search_and_extract(
192
  region: str = "wt-wt",
193
  backend: str = "html",
194
  max_chars: int = 6000,
195
- extract_only: bool = True
 
196
  ):
197
  """
198
  Searches using WEBS, extracts text from the top results, and returns both.
199
  """
200
  try:
201
- with WEBS() as webs:
202
  # Perform WEBS search
203
  search_results = webs.text(keywords=q, region=region, safesearch=safesearch,
204
  timelimit=timelimit, backend=backend, max_results=max_results)
205
 
206
  # Extract text from each result's link asynchronously
207
- tasks = [fetch_and_extract(result['href'], max_chars) for result in search_results if 'href' in result]
208
  extracted_results = await asyncio.gather(*tasks)
209
 
210
  if extract_only:
@@ -224,10 +266,11 @@ def extract_text_from_webpage2(html_content):
224
  visible_text = soup.get_text(strip=True)
225
  return visible_text
226
 
227
- def fetch_and_extract2(url, max_chars):
228
  """Fetches a URL and extracts text using threading."""
 
229
  try:
230
- response = requests.get(url, headers={"User-Agent": "Mozilla/5.0"})
231
  response.raise_for_status()
232
  html_content = response.text
233
  visible_text = extract_text_from_webpage2(html_content)
@@ -247,13 +290,14 @@ def web_search_and_extract_threading(
247
  region: str = "wt-wt",
248
  backend: str = "html",
249
  max_chars: int = 6000,
250
- extract_only: bool = True
 
251
  ):
252
  """
253
  Searches using WEBS, extracts text from the top results using threading, and returns both.
254
  """
255
  try:
256
- with WEBS() as webs:
257
  # Perform WEBS search
258
  search_results = webs.text(keywords=q, region=region, safesearch=safesearch,
259
  timelimit=timelimit, backend=backend, max_results=max_results)
@@ -263,7 +307,7 @@ def web_search_and_extract_threading(
263
  threads = []
264
  for result in search_results:
265
  if 'href' in result:
266
- thread = threading.Thread(target=lambda: extracted_results.append(fetch_and_extract2(result['href'], max_chars)))
267
  threads.append(thread)
268
  thread.start()
269
 
@@ -289,13 +333,14 @@ async def adv_web_search(
289
  region: str = "wt-wt",
290
  backend: str = "html",
291
  max_chars: int = 6000,
292
- system_prompt: str = "You are Most Advanced and Powerful Ai chatbot, User ask you questions and you have to answer that, You are also provided with Google Search Results, To increase your accuracy and providing real time data. Your task is to answer in best way to user."
 
293
  ):
294
  """
295
  Combines web search, web extraction, and LLM chat for advanced search.
296
  """
297
  try:
298
- with WEBS() as webs:
299
  # 1. Perform the web search
300
  search_results = webs.text(keywords=q, region=region,
301
  safesearch=safesearch,
@@ -304,7 +349,7 @@ async def adv_web_search(
304
 
305
  # 2. Extract text from top search result URLs asynchronously
306
  extracted_text = ""
307
- tasks = [fetch_and_extract(result['href'], max_chars) for result in search_results if 'href' in result]
308
  extracted_results = await asyncio.gather(*tasks)
309
  for result in extracted_results:
310
  if result['text']:
@@ -329,18 +374,19 @@ async def adv_web_search(
329
 
330
 
331
  @app.get("/api/website_summarizer")
332
- async def website_summarizer(url: str):
333
  """Summarizes the content of a given URL using a chat model."""
334
  try:
335
  # Extract text from the given URL
336
- response = requests.get(url, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"})
 
337
  response.raise_for_status()
338
  visible_text = extract_text_from_webpage(response.text)
339
  if len(visible_text) > 7500: # Adjust max_chars based on your needs
340
  visible_text = visible_text[:7500] + "..."
341
 
342
  # Use chat model to summarize the extracted text
343
- with WEBS() as webs:
344
  summary_prompt = f"Summarize this in detail in Paragraph: {visible_text}"
345
  summary_result = webs.chat(keywords=summary_prompt, model="gpt-3.5")
346
 
@@ -353,13 +399,14 @@ async def website_summarizer(url: str):
353
  raise HTTPException(status_code=500, detail=f"Error during summarization: {e}")
354
 
355
  @app.get("/api/ask_website")
356
- async def ask_website(url: str, question: str, model: str = "llama-3-70b"):
357
  """
358
  Asks a question about the content of a given website.
359
  """
360
  try:
361
  # Extract text from the given URL
362
- response = requests.get(url, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"})
 
363
  response.raise_for_status()
364
  visible_text = extract_text_from_webpage(response.text)
365
  if len(visible_text) > 7500: # Adjust max_chars based on your needs
@@ -369,7 +416,7 @@ async def ask_website(url: str, question: str, model: str = "llama-3-70b"):
369
  prompt = f"Based on the following text, answer this question in Paragraph: [QUESTION] {question} [TEXT] {visible_text}"
370
 
371
  # Use chat model to get the answer
372
- with WEBS() as webs:
373
  answer_result = webs.chat(keywords=prompt, model=model)
374
 
375
  # Return the answer result
@@ -393,11 +440,12 @@ async def maps(
393
  latitude: Optional[str] = None,
394
  longitude: Optional[str] = None,
395
  radius: int = 0,
396
- max_results: int = 10
 
397
  ):
398
  """Perform a maps search."""
399
  try:
400
- with WEBS() as webs:
401
  results = webs.maps(keywords=q, place=place, street=street, city=city, county=county, state=state, country=country, postalcode=postalcode, latitude=latitude, longitude=longitude, radius=radius, max_results=max_results)
402
  return JSONResponse(content=jsonable_encoder(results))
403
  except Exception as e:
@@ -407,26 +455,44 @@ async def maps(
407
  async def translate(
408
  q: str,
409
  from_: Optional[str] = None,
410
- to: str = "en"
 
411
  ):
412
  """Translate text."""
413
  try:
414
- with WEBS() as webs:
415
  results = webs.translate(keywords=q, from_=from_, to=to)
416
  return JSONResponse(content=jsonable_encoder(results))
417
  except Exception as e:
418
  raise HTTPException(status_code=500, detail=f"Error during translation: {e}")
419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420
  @app.get("/api/youtube/transcript")
421
  async def youtube_transcript(
422
  video_id: str,
423
  languages: str = "en",
424
- preserve_formatting: bool = False
 
425
  ):
426
  """Get the transcript of a YouTube video."""
427
  try:
428
  languages_list = languages.split(",")
429
- transcript = transcriber.get_transcript(video_id, languages=languages_list, preserve_formatting=preserve_formatting)
430
  return JSONResponse(content=jsonable_encoder(transcript))
431
  except Exception as e:
432
  raise HTTPException(status_code=500, detail=f"Error getting YouTube transcript: {e}")
 
9
  import asyncio
10
  import aiohttp
11
  import threading
 
12
 
13
  app = FastAPI()
14
 
 
27
  timelimit: Optional[str] = None,
28
  safesearch: str = "moderate",
29
  region: str = "wt-wt",
30
+ backend: str = "api",
31
+ proxy: Optional[str] = None # Add proxy parameter here
32
  ):
33
  """Perform a text search."""
34
  try:
35
+ with WEBS(proxy=proxy) as webs: # Pass proxy to WEBS instance
36
+ results = webs.text(
37
+ keywords=q,
38
+ region=region,
39
+ safesearch=safesearch,
40
+ timelimit=timelimit,
41
+ backend=backend,
42
+ max_results=max_results,
43
+ )
44
  return JSONResponse(content=jsonable_encoder(results))
45
  except Exception as e:
46
  raise HTTPException(status_code=500, detail=f"Error during search: {e}")
 
56
  color: Optional[str] = None,
57
  type_image: Optional[str] = None,
58
  layout: Optional[str] = None,
59
+ license_image: Optional[str] = None,
60
+ proxy: Optional[str] = None # Add proxy parameter here
61
  ):
62
  """Perform an image search."""
63
  try:
64
+ with WEBS(proxy=proxy) as webs: # Pass proxy to WEBS instance
65
+ results = webs.images(
66
+ keywords=q,
67
+ region=region,
68
+ safesearch=safesearch,
69
+ timelimit=timelimit,
70
+ size=size,
71
+ color=color,
72
+ type_image=type_image,
73
+ layout=layout,
74
+ license_image=license_image,
75
+ max_results=max_results,
76
+ )
77
  return JSONResponse(content=jsonable_encoder(results))
78
  except Exception as e:
79
  raise HTTPException(status_code=500, detail=f"Error during image search: {e}")
 
87
  timelimit: Optional[str] = None,
88
  resolution: Optional[str] = None,
89
  duration: Optional[str] = None,
90
+ license_videos: Optional[str] = None,
91
+ proxy: Optional[str] = None # Add proxy parameter here
92
  ):
93
  """Perform a video search."""
94
  try:
95
+ with WEBS(proxy=proxy) as webs: # Pass proxy to WEBS instance
96
+ results = webs.videos(
97
+ keywords=q,
98
+ region=region,
99
+ safesearch=safesearch,
100
+ timelimit=timelimit,
101
+ resolution=resolution,
102
+ duration=duration,
103
+ license_videos=license_videos,
104
+ max_results=max_results,
105
+ )
106
  return JSONResponse(content=jsonable_encoder(results))
107
  except Exception as e:
108
  raise HTTPException(status_code=500, detail=f"Error during video search: {e}")
109
 
110
+
111
  @app.get("/api/news")
112
  async def news(
113
  q: str,
114
  max_results: int = 10,
115
  safesearch: str = "moderate",
116
  region: str = "wt-wt",
117
+ timelimit: Optional[str] = None,
118
+ proxy: Optional[str] = None # Add proxy parameter here
119
  ):
120
  """Perform a news search."""
121
  try:
122
+ with WEBS(proxy=proxy) as webs: # Pass proxy to WEBS instance
123
+ results = webs.news(
124
+ keywords=q,
125
+ region=region,
126
+ safesearch=safesearch,
127
+ timelimit=timelimit,
128
+ max_results=max_results
129
+ )
130
  return JSONResponse(content=jsonable_encoder(results))
131
  except Exception as e:
132
  raise HTTPException(status_code=500, detail=f"Error during news search: {e}")
133
 
134
+
135
  @app.get("/api/llm")
136
  async def llm_chat(
137
  model: str,
 
152
 
153
 
154
  @app.get("/api/answers")
155
+ async def answers(q: str, proxy: Optional[str] = None):
156
  """Get instant answers for a query."""
157
  try:
158
+ with WEBS(proxy=proxy) as webs:
159
  results = webs.answers(keywords=q)
160
  return JSONResponse(content=jsonable_encoder(results))
161
  except Exception as e:
162
  raise HTTPException(status_code=500, detail=f"Error getting instant answers: {e}")
163
 
164
  @app.get("/api/suggestions")
165
+ async def suggestions(q: str, region: str = "wt-wt", proxy: Optional[str] = None):
166
  """Get search suggestions for a query."""
167
  try:
168
+ with WEBS(proxy=proxy) as webs:
169
  results = webs.suggestions(keywords=q, region=region)
170
  return JSONResponse(content=jsonable_encoder(results))
171
  except Exception as e:
 
174
  @app.get("/api/chat")
175
  async def chat(
176
  q: str,
177
+ model: str = "gpt-3.5",
178
+ proxy: Optional[str] = None
179
  ):
180
  """Perform a text search."""
181
  try:
182
+ with WEBS(proxy=proxy) as webs:
183
  results = webs.chat(keywords=q, model=model)
184
  return JSONResponse(content=jsonable_encoder(results))
185
  except Exception as e:
 
195
  visible_text = soup.get_text(strip=True)
196
  return visible_text
197
 
198
+ async def fetch_and_extract(url, max_chars, proxy: Optional[str] = None):
199
  """Fetches a URL and extracts text asynchronously."""
200
+
201
  async with aiohttp.ClientSession() as session:
202
  try:
203
+ async with session.get(url, headers={"User-Agent": "Mozilla/5.0"}, proxy=proxy) as response:
204
  response.raise_for_status()
205
  html_content = await response.text()
206
  visible_text = extract_text_from_webpage(html_content)
 
215
  async def web_extract(
216
  url: str,
217
  max_chars: int = 12000, # Adjust based on token limit
218
+ proxy: Optional[str] = None
219
  ):
220
  """Extracts text from a given URL."""
221
  try:
222
+ result = await fetch_and_extract(url, max_chars, proxy)
223
  return {"url": url, "text": result["text"]}
224
  except requests.exceptions.RequestException as e:
225
  raise HTTPException(status_code=500, detail=f"Error fetching or processing URL: {e}")
 
233
  region: str = "wt-wt",
234
  backend: str = "html",
235
  max_chars: int = 6000,
236
+ extract_only: bool = True,
237
+ proxy: Optional[str] = None
238
  ):
239
  """
240
  Searches using WEBS, extracts text from the top results, and returns both.
241
  """
242
  try:
243
+ with WEBS(proxy=proxy) as webs:
244
  # Perform WEBS search
245
  search_results = webs.text(keywords=q, region=region, safesearch=safesearch,
246
  timelimit=timelimit, backend=backend, max_results=max_results)
247
 
248
  # Extract text from each result's link asynchronously
249
+ tasks = [fetch_and_extract(result['href'], max_chars, proxy) for result in search_results if 'href' in result]
250
  extracted_results = await asyncio.gather(*tasks)
251
 
252
  if extract_only:
 
266
  visible_text = soup.get_text(strip=True)
267
  return visible_text
268
 
269
+ def fetch_and_extract2(url, max_chars, proxy: Optional[str] = None):
270
  """Fetches a URL and extracts text using threading."""
271
+ proxies = {'http': proxy, 'https': proxy} if proxy else None
272
  try:
273
+ response = requests.get(url, headers={"User-Agent": "Mozilla/5.0"}, proxies=proxies)
274
  response.raise_for_status()
275
  html_content = response.text
276
  visible_text = extract_text_from_webpage2(html_content)
 
290
  region: str = "wt-wt",
291
  backend: str = "html",
292
  max_chars: int = 6000,
293
+ extract_only: bool = True,
294
+ proxy: Optional[str] = None
295
  ):
296
  """
297
  Searches using WEBS, extracts text from the top results using threading, and returns both.
298
  """
299
  try:
300
+ with WEBS(proxy=proxy) as webs:
301
  # Perform WEBS search
302
  search_results = webs.text(keywords=q, region=region, safesearch=safesearch,
303
  timelimit=timelimit, backend=backend, max_results=max_results)
 
307
  threads = []
308
  for result in search_results:
309
  if 'href' in result:
310
+ thread = threading.Thread(target=lambda: extracted_results.append(fetch_and_extract2(result['href'], max_chars, proxy)))
311
  threads.append(thread)
312
  thread.start()
313
 
 
333
  region: str = "wt-wt",
334
  backend: str = "html",
335
  max_chars: int = 6000,
336
+ system_prompt: str = "You are Most Advanced and Powerful Ai chatbot, User ask you questions and you have to answer that, You are also provided with Google Search Results, To increase your accuracy and providing real time data. Your task is to answer in best way to user.",
337
+ proxy: Optional[str] = None
338
  ):
339
  """
340
  Combines web search, web extraction, and LLM chat for advanced search.
341
  """
342
  try:
343
+ with WEBS(proxy=proxy) as webs:
344
  # 1. Perform the web search
345
  search_results = webs.text(keywords=q, region=region,
346
  safesearch=safesearch,
 
349
 
350
  # 2. Extract text from top search result URLs asynchronously
351
  extracted_text = ""
352
+ tasks = [fetch_and_extract(result['href'], max_chars, proxy) for result in search_results if 'href' in result]
353
  extracted_results = await asyncio.gather(*tasks)
354
  for result in extracted_results:
355
  if result['text']:
 
374
 
375
 
376
  @app.get("/api/website_summarizer")
377
+ async def website_summarizer(url: str, proxy: Optional[str] = None):
378
  """Summarizes the content of a given URL using a chat model."""
379
  try:
380
  # Extract text from the given URL
381
+ proxies = {'http': proxy, 'https': proxy} if proxy else None
382
+ response = requests.get(url, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"}, proxies=proxies)
383
  response.raise_for_status()
384
  visible_text = extract_text_from_webpage(response.text)
385
  if len(visible_text) > 7500: # Adjust max_chars based on your needs
386
  visible_text = visible_text[:7500] + "..."
387
 
388
  # Use chat model to summarize the extracted text
389
+ with WEBS(proxy=proxy) as webs:
390
  summary_prompt = f"Summarize this in detail in Paragraph: {visible_text}"
391
  summary_result = webs.chat(keywords=summary_prompt, model="gpt-3.5")
392
 
 
399
  raise HTTPException(status_code=500, detail=f"Error during summarization: {e}")
400
 
401
  @app.get("/api/ask_website")
402
+ async def ask_website(url: str, question: str, model: str = "llama-3-70b", proxy: Optional[str] = None):
403
  """
404
  Asks a question about the content of a given website.
405
  """
406
  try:
407
  # Extract text from the given URL
408
+ proxies = {'http': proxy, 'https': proxy} if proxy else None
409
+ response = requests.get(url, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"}, proxies=proxies)
410
  response.raise_for_status()
411
  visible_text = extract_text_from_webpage(response.text)
412
  if len(visible_text) > 7500: # Adjust max_chars based on your needs
 
416
  prompt = f"Based on the following text, answer this question in Paragraph: [QUESTION] {question} [TEXT] {visible_text}"
417
 
418
  # Use chat model to get the answer
419
+ with WEBS(proxy=proxy) as webs:
420
  answer_result = webs.chat(keywords=prompt, model=model)
421
 
422
  # Return the answer result
 
440
  latitude: Optional[str] = None,
441
  longitude: Optional[str] = None,
442
  radius: int = 0,
443
+ max_results: int = 10,
444
+ proxy: Optional[str] = None
445
  ):
446
  """Perform a maps search."""
447
  try:
448
+ with WEBS(proxy=proxy) as webs:
449
  results = webs.maps(keywords=q, place=place, street=street, city=city, county=county, state=state, country=country, postalcode=postalcode, latitude=latitude, longitude=longitude, radius=radius, max_results=max_results)
450
  return JSONResponse(content=jsonable_encoder(results))
451
  except Exception as e:
 
455
  async def translate(
456
  q: str,
457
  from_: Optional[str] = None,
458
+ to: str = "en",
459
+ proxy: Optional[str] = None
460
  ):
461
  """Translate text."""
462
  try:
463
+ with WEBS(proxy=proxy) as webs:
464
  results = webs.translate(keywords=q, from_=from_, to=to)
465
  return JSONResponse(content=jsonable_encoder(results))
466
  except Exception as e:
467
  raise HTTPException(status_code=500, detail=f"Error during translation: {e}")
468
 
469
+ from easygoogletranslate import EasyGoogleTranslate
470
+
471
+ @app.get("/api/google_translate")
472
+ def google_translate(q: str, from_: Optional[str] = 'auto', to: str = "en"):
473
+ try:
474
+ translator = EasyGoogleTranslate(
475
+ source_language=from_,
476
+ target_language=to,
477
+ timeout=10
478
+ )
479
+ result = translator.translate(q)
480
+ return JSONResponse(content=jsonable_encoder({"detected_language": from_ , "original": q , "translated": result}))
481
+ except Exception as e:
482
+ raise HTTPException(status_code=500, detail=f"Error during translation: {e}")
483
+
484
+
485
  @app.get("/api/youtube/transcript")
486
  async def youtube_transcript(
487
  video_id: str,
488
  languages: str = "en",
489
+ preserve_formatting: bool = False,
490
+ proxy: Optional[str] = None # Add proxy parameter
491
  ):
492
  """Get the transcript of a YouTube video."""
493
  try:
494
  languages_list = languages.split(",")
495
+ transcript = transcriber.get_transcript(video_id, languages=languages_list, preserve_formatting=preserve_formatting, proxies=proxy)
496
  return JSONResponse(content=jsonable_encoder(transcript))
497
  except Exception as e:
498
  raise HTTPException(status_code=500, detail=f"Error getting YouTube transcript: {e}")