Spaces:
Runtime error
Runtime error
File size: 4,213 Bytes
2168cf5 faf61e8 2168cf5 6ce2f8e 2168cf5 7f68476 2168cf5 7f68476 ef26fd6 7f68476 a60235f 2168cf5 7f68476 2168cf5 42535f1 2168cf5 ef26fd6 7f68476 d961c51 faf61e8 7f68476 2168cf5 1df8439 faf61e8 2168cf5 faf61e8 1df8439 e332fe8 1df8439 e332fe8 1df8439 c5b3056 c5f8334 1df8439 faf61e8 1df8439 faf61e8 1df8439 38d1024 faf61e8 38d1024 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
from transformers import pipeline
import matplotlib.pyplot as plt
import twitter_scraper as ts
import gradio as gr
from gradio.mix import Parallel
pretrained_sentiment = "w11wo/indonesian-roberta-base-sentiment-classifier"
pretrained_ner = "cahya/bert-base-indonesian-NER"
sentiment_pipeline = pipeline(
"sentiment-analysis",
model=pretrained_sentiment,
tokenizer=pretrained_sentiment,
return_all_scores=True
)
ner_pipeline = pipeline(
"ner",
model=pretrained_ner,
tokenizer=pretrained_ner
)
examples = [
"Jokowi sangat kecewa dengan POLRI atas kerusuhan yang terjadi di Malang",
"Lesti marah terhadap perlakuan KDRT yang dilakukan oleh Bilar",
"Ungkapan rasa bahagia diutarakan oleh Coki Pardede karena kebabasannya dari penjara"
]
def sentiment_analysis(text):
output = sentiment_pipeline(text)
return {elm["label"]: elm["score"] for elm in output[0]}
def ner(text):
output = ner_pipeline(text)
return {"text": text, "entities": output}
def sentiment_ner(text):
return(sentiment_analysis(text), ner(text))
def sentiment_df(df):
text_list = list(df["Text"].astype(str).values)
result = [sentiment_analysis(text) for text in text_list]
df['Label'] = [pred['label'] for pred in result]
df['Score'] = [round(pred['Score'], 3) for pred in result]
return df
def twitter_analyzer(keyword, max_tweets):
df = ts.scrape_tweets(keyword, max_tweets=max_tweets)
df["Text"] = df["Text"].apply(ts.preprocess_text)
df = sentiment_df(df)
fig = plt.figure()
df.groupby(["Label"])["Text"].count().plot.pie(autopct="%.1f%%", figsize=(6,6))
return fig, df[["URL", "Text", "Label", "Score"]]
if __name__ == "__main__":
with gr.Blocks() as demo:
gr.Markdown("""Entity Based Sentiment Analysis Indonesia""")
gr.Markdown(
"""
"""
)
with gr.Tab("Single Input"):
with gr.Row():
with gr.Column():
input_text = gr.Textbox(label="Input Text")
analyze_button = gr.Button(label="Analyze")
with gr.Column():
sent_output = gr.Textbox(label="Sentiment Analysis")
ner_output = gr.Textbox(label="Named Entity Recognition")
analyze_button.click(sentiment_analysis, input_text, [sent_output, ner_output])
# sentiment_demo = gr.Interface(
# fn=sentiment_analysis,
# inputs="text",
# outputs="label"
# )
# ner_demo = gr.Interface(
# ner,
# "text",
# gr.HighlightedText(),
# examples=examples,
# allow_flagging='never'
# )
# Parallel(
# sentiment_demo, ner_demo,
# inputs=gr.Textbox(lines=10, label="Input Text", placeholder="Enter sentences here..."),
# examples=examples
# )
# gr.InterfaceList([sentiment_demo, ner_demo])
with gr.Tab("Twitter"):
with gr.Blocks():
with gr.Row():
with gr.Column():
keyword_textbox = gr.Textbox(lines=1, label="Keyword")
max_tweets_component = gr.Number(value=10, label="Total of Tweets to Scrape", precision=0)
submit_button = gr.Button("Submit")
plot_component = gr.Plot(label="Pie Chart of Sentiments")
dataframe_component = gr.DataFrame(type="pandas",
label="Dataframe",
max_rows=(20,'fixed'),
overflow_row_behaviour='paginate',
wrap=True)
submit_button.click(twitter_analyzer,
inputs=[keyword_textbox, max_tweets_component],
outputs=[plot_component, dataframe_component])
gr.Markdown(
"""
"""
)
demo.launch(inbrowser=True) |