wira.indra
add twitter feature
2dd816c
raw
history blame
4.28 kB
from transformers import pipeline
import matplotlib.pyplot as plt
import twitter_scraper as ts
import gradio as gr
from gradio.mix import Parallel
pretrained_sentiment = "w11wo/indonesian-roberta-base-sentiment-classifier"
pretrained_ner = "cahya/bert-base-indonesian-NER"
sentiment_pipeline = pipeline(
"sentiment-analysis",
model=pretrained_sentiment,
tokenizer=pretrained_sentiment,
return_all_scores=True
)
ner_pipeline = pipeline(
"ner",
model=pretrained_ner,
tokenizer=pretrained_ner
)
examples = [
"Jokowi sangat kecewa dengan POLRI atas kerusuhan yang terjadi di Malang",
"Lesti marah terhadap perlakuan KDRT yang dilakukan oleh Bilar",
"Ungkapan rasa bahagia diutarakan oleh Coki Pardede karena kebabasannya dari penjara"
]
def sentiment_analysis(text):
output = sentiment_pipeline(text)
return {elm["label"]: elm["score"] for elm in output[0]}
def ner(text):
output = ner_pipeline(text)
return {"text": text, "entities": output}
def sentiment_ner(text):
return(sentiment_analysis(text), ner(text))
def sentiment_df(df):
text_list = list(df["Text"].astype(str).values)
result = [sentiment_analysis(text) for text in text_list]
df['Label'] = [pred['label'] for pred in result]
df['Score'] = [round(pred['Score'], 3) for pred in result]
return df
def twitter_analyzer(keyword, max_tweets):
df = ts.scrape_tweets(keyword, max_tweets=max_tweets)
df["Text"] = df["Text"].apply(ts.preprocess_text)
df = sentiment_df(df)
fig = plt.figure()
df.groupby(["Label"])["Text"].count().plot.pie(autopct="%.1f%%", figsize=(6,6))
return fig, df[["URL", "Text", "Label", "Score"]]
if __name__ == "__main__":
with gr.Blocks() as demo:
gr.Markdown("""Entity Based Sentiment Analysis Indonesia""")
gr.Markdown(
"""
"""
)
with gr.Tab("Single Input"):
with gr.Blocks():
with gr.Row():
with gr.Column():
input_text = gr.Textbox(label="Input Text")
analyze_button = gr.Button(label="Analyze")
with gr.Column():
sent_output = gr.Textbox(label="Sentiment Analysis")
ner_output = gr.Textbox(label="Named Entity Recognition")
analyze_button.click(sentiment_analysis, input_text, [sent_output, ner_output])
# sentiment_demo = gr.Interface(
# fn=sentiment_analysis,
# inputs="text",
# outputs="label"
# )
# ner_demo = gr.Interface(
# ner,
# "text",
# gr.HighlightedText(),
# examples=examples,
# allow_flagging='never'
# )
# Parallel(
# sentiment_demo, ner_demo,
# inputs=gr.Textbox(lines=10, label="Input Text", placeholder="Enter sentences here..."),
# examples=examples
# )
# gr.InterfaceList([sentiment_demo, ner_demo])
with gr.Tab("Twitter"):
with gr.Blocks():
with gr.Row():
with gr.Column():
keyword_textbox = gr.Textbox(lines=1, label="Keyword")
max_tweets_component = gr.Number(value=10, label="Total of Tweets to Scrape", precision=0)
submit_button = gr.Button("Submit")
plot_component = gr.Plot(label="Pie Chart of Sentiments")
dataframe_component = gr.DataFrame(type="pandas",
label="Dataframe",
max_rows=(20,'fixed'),
overflow_row_behaviour='paginate',
wrap=True)
submit_button.click(twitter_analyzer,
inputs=[keyword_textbox, max_tweets_component],
outputs=[plot_component, dataframe_component])
gr.Markdown(
"""
"""
)
demo.launch(inbrowser=True)