Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,40 @@ import gradio as gr
|
|
5 |
model_name = "deepset/electra-base-squad2"
|
6 |
nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
def get_wiki_article(topic):
|
9 |
topic=topic
|
10 |
try:
|
@@ -18,6 +52,7 @@ def get_wiki_article(topic):
|
|
18 |
choices = [x for x in e.options if ('disambiguation' not in x) and ('All pages' not in x) and (x!=topic)]
|
19 |
s = random.choice(choices)
|
20 |
p = wikipedia.page(s)
|
|
|
21 |
return p.content, p.url
|
22 |
|
23 |
def get_answer(topic, question):
|
|
|
5 |
model_name = "deepset/electra-base-squad2"
|
6 |
nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
|
7 |
|
8 |
+
# dataset save ------------------------------------
|
9 |
+
import huggingface_hub
|
10 |
+
from huggingface_hub import Repository, hf_hub_download, upload_file
|
11 |
+
from datetime import datetime
|
12 |
+
# created new dataset as awacke1/MindfulStory.csv
|
13 |
+
DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/MindfulStory.csv"
|
14 |
+
DATASET_REPO_ID = "awacke1/MindfulStory.csv"
|
15 |
+
DATA_FILENAME = "MindfulStory.csv"
|
16 |
+
DATA_FILE = os.path.join("data", DATA_FILENAME)
|
17 |
+
HF_TOKEN = os.environ.get("HF_TOKEN")
|
18 |
+
# Download dataset repo using hub download
|
19 |
+
try:
|
20 |
+
hf_hub_download(
|
21 |
+
repo_id=DATASET_REPO_ID,
|
22 |
+
filename=DATA_FILENAME,
|
23 |
+
cache_dir=DATA_DIRNAME,
|
24 |
+
force_filename=DATA_FILENAME
|
25 |
+
)
|
26 |
+
except:
|
27 |
+
print("file not found")
|
28 |
+
def AIMemory(title: str, story: str):
|
29 |
+
if title and story:
|
30 |
+
with open(DATA_FILE, "a") as csvfile:
|
31 |
+
writer = csv.DictWriter(csvfile, fieldnames=["title", "story", "time"])
|
32 |
+
writer.writerow({"title": title, "story": story, "time": str(datetime.now())})
|
33 |
+
commit_url = repo.push_to_hub()
|
34 |
+
return ""
|
35 |
+
# Set up cloned dataset from repo for operations
|
36 |
+
repo = Repository(
|
37 |
+
local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
|
38 |
+
)
|
39 |
+
# dataset save ------------------------------------
|
40 |
+
|
41 |
+
|
42 |
def get_wiki_article(topic):
|
43 |
topic=topic
|
44 |
try:
|
|
|
52 |
choices = [x for x in e.options if ('disambiguation' not in x) and ('All pages' not in x) and (x!=topic)]
|
53 |
s = random.choice(choices)
|
54 |
p = wikipedia.page(s)
|
55 |
+
saved = AIMemory(p, s)
|
56 |
return p.content, p.url
|
57 |
|
58 |
def get_answer(topic, question):
|