Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,39 +1,41 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
-
from
|
4 |
-
|
5 |
-
|
|
|
|
|
6 |
|
7 |
client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
|
8 |
|
9 |
def is_uncertain(question, response):
|
10 |
"""Check if the model's response is unreliable."""
|
11 |
-
if len(response.split()) < 4
|
12 |
-
return True
|
13 |
-
if response.lower() in question.lower(): # Repeats question = unsure
|
14 |
-
return True
|
15 |
-
uncertain_phrases = [
|
16 |
-
"Kulingana na utafiti", "Inaaminika kuwa", "Ninadhani",
|
17 |
-
"It is believed that", "Some people say", "Inasemekana kuwa"
|
18 |
-
]
|
19 |
-
if any(phrase.lower() in response.lower() for phrase in uncertain_phrases):
|
20 |
return True
|
21 |
-
|
|
|
22 |
|
23 |
def google_search(query):
|
24 |
-
"""Fetch search results using
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
39 |
messages = [{"role": "system", "content": system_message}]
|
@@ -48,7 +50,6 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
48 |
response += token
|
49 |
yield response # Stream the response
|
50 |
|
51 |
-
# If the model's response is unreliable, fetch from Google
|
52 |
if is_uncertain(message, response):
|
53 |
google_response = google_search(message)
|
54 |
yield f"🤖 AI: {response}\n\n🌍 Google: {google_response}"
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
from selenium import webdriver
|
4 |
+
from selenium.webdriver.common.by import By
|
5 |
+
from selenium.webdriver.chrome.service import Service
|
6 |
+
from webdriver_manager.chrome import ChromeDriverManager
|
7 |
+
import time
|
8 |
|
9 |
client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
|
10 |
|
11 |
def is_uncertain(question, response):
|
12 |
"""Check if the model's response is unreliable."""
|
13 |
+
if len(response.split()) < 4 or response.lower() in question.lower():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
return True
|
15 |
+
uncertain_phrases = ["Kulingana na utafiti", "Inaaminika kuwa", "Ninadhani", "It is believed that", "Some people say"]
|
16 |
+
return any(phrase.lower() in response.lower() for phrase in uncertain_phrases)
|
17 |
|
18 |
def google_search(query):
|
19 |
+
"""Fetch search results using Selenium."""
|
20 |
+
options = webdriver.ChromeOptions()
|
21 |
+
options.add_argument("--headless") # Run in background
|
22 |
+
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=options)
|
23 |
+
|
24 |
+
driver.get(f"https://www.google.com/search?q={query}")
|
25 |
+
time.sleep(2) # Wait for page to load
|
26 |
+
|
27 |
+
try:
|
28 |
+
# Extract answer from featured snippet if available
|
29 |
+
snippet = driver.find_element(By.CLASS_NAME, "hgKElc").text
|
30 |
+
except:
|
31 |
+
# Extract first search result
|
32 |
+
try:
|
33 |
+
snippet = driver.find_element(By.CSS_SELECTOR, "div.BNeawe.s3v9rd.AP7Wnd").text
|
34 |
+
except:
|
35 |
+
snippet = "Sorry, I couldn't find an answer on Google."
|
36 |
+
|
37 |
+
driver.quit()
|
38 |
+
return snippet
|
39 |
|
40 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
41 |
messages = [{"role": "system", "content": system_message}]
|
|
|
50 |
response += token
|
51 |
yield response # Stream the response
|
52 |
|
|
|
53 |
if is_uncertain(message, response):
|
54 |
google_response = google_search(message)
|
55 |
yield f"🤖 AI: {response}\n\n🌍 Google: {google_response}"
|