|
|
|
import streamlit as st |
|
import pandas as pd |
|
import numpy as np |
|
import re |
|
import json |
|
import joblib |
|
from sklearn.feature_extraction.text import TfidfVectorizer |
|
|
|
|
|
|
|
|
|
|
|
from wordcloud import WordCloud |
|
import nltk |
|
from nltk.corpus import stopwords |
|
|
|
|
|
|
|
|
|
@st.cache_data |
|
def clean_text(text): |
|
|
|
text = re.sub(r'[^\x00-\x7F]+', '', text) |
|
|
|
|
|
text = re.sub(r'http[s]?://.[a-zA-Z0-9./_?=%&#+!]+', '', text) |
|
text = re.sub(r'pic.twitter.com?.[a-zA-Z0-9./_?=%&#+!]+', '', text) |
|
|
|
|
|
text = re.sub(r'@[\w]+', '', text) |
|
|
|
|
|
text = re.sub(r'#([\w]+)', '', text) |
|
|
|
|
|
text = re.sub(r'&|>', '', text) |
|
|
|
|
|
text = re.sub(r'[!$%^&*@#()_+|~=`{}\[\]%\-:";\'<>?,./]', '', text) |
|
|
|
|
|
text = re.sub(r'[0-9]+', '', text) |
|
|
|
|
|
text = re.sub(' +', ' ', text) |
|
|
|
|
|
text = text.strip() |
|
|
|
|
|
text = text.lower() |
|
|
|
|
|
|
|
|
|
text = re.sub(r'(\w)\1{2,}', r'\1', text) |
|
|
|
return text |
|
@st.cache_data |
|
def load_file(kamus_path, kamus_sendiri_path): |
|
|
|
with open(kamus_path) as f: |
|
data = f.read() |
|
lookp_dict = json.loads(data) |
|
|
|
|
|
with open(kamus_sendiri_path) as f: |
|
kamus_sendiri = f.read() |
|
kamus_gaul_baru = json.loads(kamus_sendiri) |
|
|
|
|
|
lookp_dict.update(kamus_gaul_baru) |
|
|
|
nltk.download("stopwords") |
|
stop_words = set(stopwords.words("indonesian")) |
|
return lookp_dict, stop_words |
|
|
|
kamus_path = '_json_colloquial-indonesian-lexicon (1).txt' |
|
kamus_sendiri_path = 'kamus_gaul_custom.txt' |
|
lookp_dict, stop_words = load_file(kamus_path, kamus_sendiri_path) |
|
|
|
|
|
@st.cache_data |
|
def normalize_slang(text, slang_dict): |
|
words = text.split() |
|
normalized_words = [slang_dict.get(word, word) for word in words] |
|
return ' '.join(normalized_words) |
|
|
|
|
|
|
|
@st.cache_data |
|
def remove_stopwords(text, stop_words): |
|
|
|
words = text.split() |
|
|
|
|
|
words = [word for word in words if word not in stop_words] |
|
|
|
return " ".join(words) |
|
|
|
|
|
tfidf_model_path = 'X_tfidf_model.joblib' |
|
tfidf_vectorizer = joblib.load(tfidf_model_path) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@st.cache_data |
|
def select_sentiment_model(selected_model): |
|
if selected_model == "Ensemble": |
|
model_path = 'ensemble_clf_soft_smote.joblib' |
|
elif selected_model == "Random Forest": |
|
model_path = 'best_rf_model_smote.joblib' |
|
elif selected_model == "Naive Bayes": |
|
model_path = 'naive_bayes_model_smote.joblib' |
|
elif selected_model == "Logistic Regression": |
|
model_path = 'logreg_model_smote.joblib' |
|
else: |
|
|
|
model_path = 'ensemble_clf_soft_smote.joblib' |
|
|
|
model = joblib.load(model_path) |
|
return model |
|
|
|
|
|
|
|
|
|
def predict_sentiment(text, _model, _tfidf_vectorizer, slang_dict): |
|
|
|
cleaned_text = clean_text(text) |
|
norm_slang_text = normalize_slang(cleaned_text, slang_dict) |
|
|
|
|
|
tfidf_matrix = _tfidf_vectorizer.transform([norm_slang_text]) |
|
|
|
|
|
sentiment = _model.predict(tfidf_matrix) |
|
|
|
|
|
labels = {0: "Negatif", 1: "Netral", 2: "Positif"} |
|
sentiment_label = labels[int(sentiment)] |
|
|
|
return sentiment_label |
|
@st.cache_data |
|
def get_emoticon(sentiment): |
|
if sentiment == "Positif": |
|
emoticon = "π" |
|
elif sentiment == "Negatif": |
|
emoticon = "π" |
|
else: |
|
emoticon = "π" |
|
|
|
return emoticon |
|
|
|
@st.cache_data |
|
def buat_chart(df, target_year): |
|
target_year = int(target_year) |
|
st.write(f"Bar Chart Tahun {target_year}:") |
|
|
|
|
|
df['Date'] = pd.to_datetime(df['Date']) |
|
df['month'] = df['Date'].dt.month |
|
df['year'] = df['Date'].dt.year |
|
|
|
|
|
df_filtered = df[df['year'] == target_year] |
|
|
|
|
|
if df_filtered.empty: |
|
st.warning(f"Tidak ada data untuk tahun {target_year}.") |
|
return |
|
|
|
|
|
bulan_mapping = { |
|
1: f'Januari {target_year}', |
|
2: f'Februari {target_year}', |
|
3: f'Maret {target_year}', |
|
4: f'April {target_year}', |
|
5: f'Mei {target_year}', |
|
6: f'Juni {target_year}', |
|
7: f'Juli {target_year}', |
|
8: f'Agustus {target_year}', |
|
9: f'September {target_year}', |
|
10: f'Oktober {target_year}', |
|
11: f'November {target_year}', |
|
12: f'Desember {target_year}' |
|
} |
|
|
|
|
|
df_filtered['month'] = df_filtered['month'].replace(bulan_mapping) |
|
|
|
|
|
warna_label = { |
|
'Negatif': '#FF9AA2', |
|
'Netral': '#FFDAC1', |
|
'Positif': '#B5EAD7' |
|
} |
|
|
|
|
|
unique_label = sorted(df_filtered['label'].unique()) |
|
|
|
|
|
months_order = [ |
|
f'Januari {target_year}', f'Februari {target_year}', f'Maret {target_year}', f'April {target_year}', f'Mei {target_year}', f'Juni {target_year}', |
|
f'Juli {target_year}', f'Agustus {target_year}', f'September {target_year}', f'Oktober {target_year}', f'November {target_year}', f'Desember {target_year}' |
|
] |
|
|
|
|
|
df_filtered['month'] = pd.Categorical(df_filtered['month'], categories=months_order, ordered=True) |
|
df_filtered = df_filtered.sort_values('month') |
|
|
|
|
|
st.bar_chart( |
|
df_filtered.groupby(['month', 'label']).size().unstack().fillna(0), |
|
color=[warna_label[label] for label in unique_label] |
|
) |
|
|
|
@st.cache_data |
|
def all_data_process(texts, df, _sentiment_model, _tfidf_vectorizer, lookp_dict, stop_words): |
|
results = [] |
|
analisis = False |
|
|
|
if 'Text' in df.columns: |
|
if 'Date' in df.columns: |
|
for text, date in zip(texts, df['Date']): |
|
sentiment_label = predict_sentiment(text, sentiment_model, tfidf_vectorizer, lookp_dict) |
|
emoticon = get_emoticon(sentiment_label) |
|
cleaned_text = clean_text(text) |
|
norm_slang_text = normalize_slang(cleaned_text, lookp_dict) |
|
tanpa_stopwords = remove_stopwords(norm_slang_text, stop_words) |
|
|
|
result_entry = { |
|
'Date': date, |
|
'Text': text, |
|
'cleaned-text': cleaned_text, |
|
'normalisasi-text': norm_slang_text, |
|
'stopwords-remove': tanpa_stopwords, |
|
'label': sentiment_label, |
|
'emotikon': emoticon, |
|
} |
|
|
|
results.append(result_entry) |
|
|
|
analisis = True |
|
else: |
|
for text in texts: |
|
sentiment_label = predict_sentiment(text, sentiment_model, tfidf_vectorizer, lookp_dict) |
|
emoticon = get_emoticon(sentiment_label) |
|
cleaned_text = clean_text(text) |
|
norm_slang_text = normalize_slang(cleaned_text, lookp_dict) |
|
tanpa_stopwords = remove_stopwords(norm_slang_text, stop_words) |
|
|
|
result_entry = { |
|
'Text': text, |
|
'cleaned-text': cleaned_text, |
|
'normalisasi-text': norm_slang_text, |
|
'stopwords-remove': tanpa_stopwords, |
|
'label': sentiment_label, |
|
'emotikon': emoticon, |
|
} |
|
|
|
results.append(result_entry) |
|
|
|
analisis = True |
|
else: |
|
st.warning("Berkas XLSX harus memiliki kolom bernama 'Text' untuk analisis sentimen.") |
|
|
|
return results, analisis |
|
|
|
|
|
@st.cache_data |
|
def get_table_download_link(df, download_format): |
|
if download_format == "XLSX": |
|
df.to_excel("hasil_sentimen.xlsx", index=False) |
|
return f'<a href="hasil_sentimen.xlsx" download="hasil_sentimen.xlsx">Unduh File XLSX</a>' |
|
else: |
|
csv = df.to_csv(index=False) |
|
return f'<a href="data:file/csv;base64,{b64encode(csv.encode()).decode()}" download="hasil_sentimen.csv">Unduh File CSV</a>' |
|
|
|
|
|
|
|
st.title("Sentiment Analysis : Based on Tweets Biskita Transpakuan Bogor 2022-2023") |
|
preference_barchart_date = False |
|
|
|
with st.sidebar : |
|
with st.expander("General Settings :"): |
|
|
|
selected_model = st.selectbox("Pilih Model Sentimen:", ("Ensemble", "Naive Bayes", "Logistic Regression", "Transformer")) |
|
|
|
|
|
sentiment_model = select_sentiment_model(selected_model) |
|
|
|
|
|
input_option = st.radio("Pilih metode input:", ("Teks Manual", "Unggah Berkas XLSX")) |
|
|
|
if input_option == "Teks Manual": |
|
|
|
user_input = st.text_area("Masukkan teks:", "") |
|
else: |
|
|
|
uploaded_file = st.file_uploader("Unggah berkas XLSX", type=["xlsx"]) |
|
st.caption("Pastikan berkas XLSX Anda memiliki kolom yang bernama :blue[Text] _(Maks.500 data)_.") |
|
st.caption("Jika terdapat kolom type :blue[datetime], ganti nama kolom menjadi :blue[Date]") |
|
|
|
if uploaded_file is not None: |
|
df = pd.read_excel(uploaded_file) |
|
df = df[:500] |
|
if 'Text' not in df.columns: |
|
st.warning("Berkas XLSX harus memiliki kolom bernama 'Text' untuk analisis sentimen.") |
|
if not df['Text'].empty: |
|
st.warning("Kolom 'Text' harus mempunyai value.") |
|
else: |
|
texts = df['Text'] |
|
if "Date" in df.columns : |
|
if not df['Date'].empty: |
|
dates = df['Date'] |
|
preference_barchart_date = True |
|
|
|
|
|
with st.expander ("Preference Settings :"): |
|
colormap = st.selectbox("Pilih Warna Wordclouds :", ["Greys", "Purples", "Blues", "Greens", "Oranges", "Reds", "YlOrBr", "YlOrRd", "OrRd", "PuRd", "RdPu", "BuPu", "GnBu", "PuBu", "YlGnBu", "PuBuGn", "BuGn", "YlGn"]) |
|
if preference_barchart_date == True: |
|
bar = st.selectbox("Pilih Tampilan Bar Chart :", ("Distribusi Kelas", "Distribusi Kelas Berdasarkan Waktu"), index = 0) |
|
target_year = st.selectbox("Pilih Tahun Bar Chart :", df['Date'].str[:4].unique()) |
|
|
|
tab1, tab2, tab3 = st.tabs(["Profile", "Documentation", "Results"]) |
|
|
|
with tab1: |
|
st.header("Profile :") |
|
st.image('https://naufalnashif.github.io/assets/images/WhatsApp%20Image%202023-01-26%20at%2020.37.17.jpeg', caption='Naufal Nashif') |
|
|
|
with tab2: |
|
st.header("Documentation :") |
|
|
|
|
|
with tab3: |
|
st.header("Results :") |
|
|
|
results = [] |
|
analisis = False |
|
if input_option == "Teks Manual" and user_input: |
|
if st.button("Analysis"): |
|
|
|
user_texts = user_input.split('\n') |
|
for text in user_texts: |
|
sentiment_label = predict_sentiment(text, sentiment_model, tfidf_vectorizer, lookp_dict) |
|
emoticon = get_emoticon(sentiment_label) |
|
cleaned_text = clean_text(text) |
|
norm_slang_text = normalize_slang(cleaned_text, lookp_dict) |
|
tanpa_stopwords = remove_stopwords(norm_slang_text, stop_words) |
|
|
|
results.append({ |
|
'Text': text, |
|
'cleaned-text' : cleaned_text, |
|
'normalisasi-text' : norm_slang_text, |
|
'stopwords-remove' : tanpa_stopwords, |
|
'label' : sentiment_label, |
|
'emotikon' : emoticon, |
|
}) |
|
analisis = True |
|
|
|
elif input_option == "Unggah Berkas XLSX" and uploaded_file is not None: |
|
if st.button("Analysis"): |
|
results, analisis = all_data_process(texts, df, sentiment_model, tfidf_vectorizer, lookp_dict, stop_words) |
|
|
|
st.info('Tekan "Analysis" kembali jika tampilan menghilang', icon = 'βΉοΈ') |
|
if results and analisis == True: |
|
df_results = pd.DataFrame(results) |
|
|
|
columns = st.columns(2) |
|
|
|
|
|
with columns[0]: |
|
st.write("Wordclouds :") |
|
all_texts = [result['stopwords-remove'] for result in results if result['stopwords-remove'] is not None and not pd.isna(result['stopwords-remove'])] |
|
all_texts = " ".join(all_texts) |
|
|
|
if all_texts: |
|
wordcloud = WordCloud(width=800, height=660, background_color='white', |
|
colormap=colormap, |
|
contour_color='black', |
|
contour_width=2, |
|
mask=None, |
|
).generate(all_texts) |
|
st.image(wordcloud.to_array()) |
|
else: |
|
st.write("Tidak ada data untuk ditampilkan dalam Word Cloud.") |
|
|
|
if 'Date' in df_results.columns: |
|
if bar == "Distribusi Kelas Berdasarkan Waktu": |
|
if not df_results['Date'].empty: |
|
with columns[1]: |
|
buat_chart(df_results, target_year) |
|
else : |
|
|
|
with columns[1]: |
|
st.write("Bar Chart :") |
|
|
|
st.bar_chart( |
|
df_results["label"].value_counts() |
|
) |
|
else : |
|
|
|
with columns[1]: |
|
st.write("Bar Chart :") |
|
|
|
st.bar_chart( |
|
df_results["label"].value_counts() |
|
) |
|
|
|
with st.expander("Hasil Analisis Sentimen"): |
|
|
|
st.write(pd.DataFrame(results)) |
|
|
|
if results: |
|
|
|
df = pd.DataFrame(results) |
|
csv = df.to_csv(index=False) |
|
|
|
|
|
st.download_button(label="Unduh CSV", data=csv, key="csv_download", file_name="hasil_sentimen.csv") |
|
else: |
|
st.write("Tidak ada data untuk diunduh.") |
|
|
|
|
|
|
|
st.divider() |
|
left, right = st.columns([1,3]) |
|
|
|
with left : |
|
st.image('https://naufalnashif.github.io/assets/images/WhatsApp%20Image%202023-01-26%20at%2020.37.17.jpeg', caption='Naufal Nashif') |
|
|
|
with right : |
|
|
|
github_link = "https://github.com/naufalnashif/" |
|
st.markdown(f"GitHub: [{github_link}]({github_link})") |
|
|
|
|
|
instagram_link = "https://www.instagram.com/naufal.nashif/" |
|
st.markdown(f"Instagram: [{instagram_link}]({instagram_link})") |
|
|
|
|
|
st.write('Thank you for trying the demo!') |
|
st.caption('Best regards, Naufal Nashif :sunglasses:') |
|
|