Spaces:
Runtime error
Runtime error
Commit
·
6d383fb
1
Parent(s):
b881ca6
First Commit
Browse files- .gitignore +0 -0
- app.py +101 -0
- gradio_queue.db +0 -0
- requirements.txt +90 -0
.gitignore
ADDED
File without changes
|
app.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import tensorflow as tf
|
3 |
+
from tensorflow import keras
|
4 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
5 |
+
from tensorflow.keras.datasets import imdb # pyright: reportMissingImports=false
|
6 |
+
from huggingface_hub import from_pretrained_keras
|
7 |
+
import gradio as gr
|
8 |
+
from typing import Dict
|
9 |
+
|
10 |
+
class KerasIMDBTokenizer:
|
11 |
+
|
12 |
+
def __init__(self, vocab_size: int = 20000) -> None:
|
13 |
+
|
14 |
+
# Parameters used in `keras.datasets.imdb.load_data`
|
15 |
+
self.START_CHAR = 1
|
16 |
+
self.OOV_CHAR = 2
|
17 |
+
self.INDEX_FROM = 3
|
18 |
+
|
19 |
+
self.word_index: dict[str, int] = imdb.get_word_index()
|
20 |
+
self.word_index = {
|
21 |
+
token: input_id + self.INDEX_FROM
|
22 |
+
for token, input_id in self.word_index.items() if input_id <= vocab_size
|
23 |
+
}
|
24 |
+
|
25 |
+
def tokenize_and_pad(self, text: str, maxlen: int = 200) -> np.ndarray:
|
26 |
+
tokens = text.split()
|
27 |
+
input_ids = [self.word_index.get(token.lower(), self.OOV_CHAR) for token in tokens]
|
28 |
+
input_ids.insert(0, self.START_CHAR)
|
29 |
+
# pad_sequences only accepts a list of sequences
|
30 |
+
return pad_sequences([input_ids], maxlen=maxlen)
|
31 |
+
|
32 |
+
model = from_pretrained_keras("keras-io/text-classification-with-transformer", compile=False)
|
33 |
+
tokenizer = KerasIMDBTokenizer()
|
34 |
+
|
35 |
+
def sentiment_analysis(model_input: str) -> Dict[str, float]:
|
36 |
+
tokenized = tokenizer.tokenize_and_pad(model_input)
|
37 |
+
|
38 |
+
prediction = model.predict(tokenized)[0]
|
39 |
+
ret = {
|
40 |
+
"negative": float(prediction[0]),
|
41 |
+
"positive": float(prediction[1])
|
42 |
+
}
|
43 |
+
return ret
|
44 |
+
|
45 |
+
|
46 |
+
model_input = gr.Textbox("Input text here", show_label=False)
|
47 |
+
model_output = gr.Label("Sentiment Analysis Result", num_top_classes=2, show_label=True, label="Sentiment Analysis Result")
|
48 |
+
|
49 |
+
|
50 |
+
examples = [
|
51 |
+
(
|
52 |
+
"Story of a man who has unnatural feelings for a pig. "
|
53 |
+
"Starts out with a opening scene that is a terrific example of absurd comedy. "
|
54 |
+
"A formal orchestra audience is turned into an insane, violent mob by the crazy chantings of it's singers. "
|
55 |
+
"Unfortunately it stays absurd the WHOLE time with no general narrative eventually making it just too off putting. "
|
56 |
+
"Even those from the era should be turned off. "
|
57 |
+
"The cryptic dialogue would make Shakespeare seem easy to a third grader. "
|
58 |
+
"On a technical level it's better than you might think with some good cinematography by future great Vilmos Zsigmond. "
|
59 |
+
"Future stars Sally Kirkland and Frederic Forrest can be seen briefly."
|
60 |
+
),
|
61 |
+
(
|
62 |
+
"I came in in the middle of this film so I had no idea about any credits or even its title till I looked it up here, "
|
63 |
+
"where I see that it has received a mixed reception by your commentators. "
|
64 |
+
"I'm on the positive side regarding this film but one thing really caught my attention as I watched: "
|
65 |
+
"the beautiful and sensitive score written in a Coplandesque Americana style. "
|
66 |
+
"My surprise was great when I discovered the score to have been written by none other than John Williams himself. "
|
67 |
+
"True he has written sensitive and poignant scores such as Schindler's List but one usually associates "
|
68 |
+
"his name with such bombasticities as Star Wars. "
|
69 |
+
"But in my opinion what Williams has written for this movie surpasses anything I've ever heard of his "
|
70 |
+
"for tenderness, sensitivity and beauty, fully in keeping with the tender and lovely plot of the movie. "
|
71 |
+
"And another recent score of his, for Catch Me if You Can, shows still more wit and sophistication. "
|
72 |
+
"As to Stanley and Iris, I like education movies like How Green was my Valley and Konrack, "
|
73 |
+
"that one with John Voigt and his young African American charges in South Carolina, "
|
74 |
+
"and Danny deVito's Renaissance Man, etc. They tell a necessary story of intellectual and spiritual awakening, "
|
75 |
+
"a story which can't be told often enough. This one is an excellent addition to that genre."
|
76 |
+
)
|
77 |
+
]
|
78 |
+
|
79 |
+
title = "Text classification with Transformer"
|
80 |
+
description = "Implement a Transformer block as a Keras layer and use it for text classification."
|
81 |
+
article = (
|
82 |
+
"Author: Xin Sui "
|
83 |
+
"Based on <a href=\"https://keras.io/examples/nlp/text_classification_with_transformer\">this</a> "
|
84 |
+
"keras example by <a href=\"https://twitter.com/NandanApoorv\">Apoorv Nandan</a>. "
|
85 |
+
"HuggingFace Model <a href=\"https://huggingface.co/keras-io/text-classification-with-transformer\">here</a>"
|
86 |
+
)
|
87 |
+
|
88 |
+
|
89 |
+
app = gr.Interface(
|
90 |
+
sentiment_analysis,
|
91 |
+
inputs=model_input,
|
92 |
+
outputs=model_output,
|
93 |
+
examples=examples,
|
94 |
+
title=title,
|
95 |
+
description=description,
|
96 |
+
article=article,
|
97 |
+
allow_flagging='never',
|
98 |
+
analytics_enabled=False,
|
99 |
+
)
|
100 |
+
|
101 |
+
app.launch(enable_queue=True)
|
gradio_queue.db
ADDED
File without changes
|
requirements.txt
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==1.1.0
|
2 |
+
aiohttp==3.8.1
|
3 |
+
aiosignal==1.2.0
|
4 |
+
analytics-python==1.4.0
|
5 |
+
anyio==3.6.1
|
6 |
+
asgiref==3.5.2
|
7 |
+
astunparse==1.6.3
|
8 |
+
async-timeout==4.0.2
|
9 |
+
attrs==21.4.0
|
10 |
+
backoff==1.10.0
|
11 |
+
bcrypt==3.2.2
|
12 |
+
cachetools==5.2.0
|
13 |
+
certifi==2022.5.18.1
|
14 |
+
cffi==1.15.0
|
15 |
+
charset-normalizer==2.0.12
|
16 |
+
click==8.1.3
|
17 |
+
cryptography==37.0.2
|
18 |
+
cycler==0.11.0
|
19 |
+
fastapi==0.78.0
|
20 |
+
ffmpy==0.3.0
|
21 |
+
filelock==3.7.1
|
22 |
+
flatbuffers==1.12
|
23 |
+
fonttools==4.33.3
|
24 |
+
frozenlist==1.3.0
|
25 |
+
gast==0.4.0
|
26 |
+
google-auth==2.7.0
|
27 |
+
google-auth-oauthlib==0.4.6
|
28 |
+
google-pasta==0.2.0
|
29 |
+
gradio==3.0.13
|
30 |
+
grpcio==1.46.3
|
31 |
+
h11==0.13.0
|
32 |
+
h5py==3.7.0
|
33 |
+
huggingface-hub @ https://github.com/huggingface/huggingface_hub/archive/main.zip
|
34 |
+
idna==3.3
|
35 |
+
Jinja2==3.1.2
|
36 |
+
keras==2.9.0
|
37 |
+
Keras-Preprocessing==1.1.2
|
38 |
+
kiwisolver==1.4.2
|
39 |
+
libclang==14.0.1
|
40 |
+
linkify-it-py==1.0.3
|
41 |
+
Markdown==3.3.7
|
42 |
+
markdown-it-py==2.1.0
|
43 |
+
MarkupSafe==2.1.1
|
44 |
+
matplotlib==3.5.2
|
45 |
+
mdit-py-plugins==0.3.0
|
46 |
+
mdurl==0.1.1
|
47 |
+
monotonic==1.6
|
48 |
+
multidict==6.0.2
|
49 |
+
numpy==1.22.4
|
50 |
+
oauthlib==3.2.0
|
51 |
+
opt-einsum==3.3.0
|
52 |
+
orjson==3.7.2
|
53 |
+
packaging==21.3
|
54 |
+
pandas==1.4.2
|
55 |
+
paramiko==2.11.0
|
56 |
+
Pillow==9.1.1
|
57 |
+
protobuf==3.19.4
|
58 |
+
pyasn1==0.4.8
|
59 |
+
pyasn1-modules==0.2.8
|
60 |
+
pycparser==2.21
|
61 |
+
pycryptodome==3.14.1
|
62 |
+
pydantic==1.9.1
|
63 |
+
pydub==0.25.1
|
64 |
+
PyNaCl==1.5.0
|
65 |
+
pyparsing==3.0.9
|
66 |
+
python-dateutil==2.8.2
|
67 |
+
python-multipart==0.0.5
|
68 |
+
pytz==2022.1
|
69 |
+
PyYAML==6.0
|
70 |
+
requests==2.28.0
|
71 |
+
requests-oauthlib==1.3.1
|
72 |
+
rsa==4.8
|
73 |
+
six==1.16.0
|
74 |
+
sniffio==1.2.0
|
75 |
+
starlette==0.19.1
|
76 |
+
tensorboard==2.9.1
|
77 |
+
tensorboard-data-server==0.6.1
|
78 |
+
tensorboard-plugin-wit==1.8.1
|
79 |
+
tensorflow==2.9.1
|
80 |
+
tensorflow-estimator==2.9.0
|
81 |
+
tensorflow-io-gcs-filesystem==0.26.0
|
82 |
+
termcolor==1.1.0
|
83 |
+
tqdm==4.64.0
|
84 |
+
typing_extensions==4.2.0
|
85 |
+
uc-micro-py==1.0.1
|
86 |
+
urllib3==1.26.9
|
87 |
+
uvicorn==0.17.6
|
88 |
+
Werkzeug==2.1.2
|
89 |
+
wrapt==1.14.1
|
90 |
+
yarl==1.7.2
|