import streamlit as st import pandas as pd import numpy as np import re import json import joblib from sklearn.feature_extraction.text import TfidfVectorizer # Impor library tambahan #import matplotlib.pyplot as plt #import seaborn as sns #import plotly.express as px from wordcloud import WordCloud import nltk from nltk.corpus import stopwords #from transformers import pipeline # Fungsi untuk membersihkan teks dengan ekspresi reguler @st.cache_data def clean_text(text): # Tahap-1: Menghapus karakter non-ASCII text = re.sub(r'[^\x00-\x7F]+', '', text) # Tahap-2: Menghapus URL text = re.sub(r'http[s]?://.[a-zA-Z0-9./_?=%&#+!]+', '', text) text = re.sub(r'pic.twitter.com?.[a-zA-Z0-9./_?=%&#+!]+', '', text) # Tahap-3: Menghapus mentions text = re.sub(r'@[\w]+', '', text) # Tahap-4: Menghapus hashtag text = re.sub(r'#([\w]+)', '', text) # Tahap-5 Menghapus 'amp' yang menempel pada '&' dan 'gt' yang menempel pada '&' text = re.sub(r'&|>', '', text) # Tahap-6: Menghapus karakter khusus (simbol) text = re.sub(r'[!$%^&*@#()_+|~=`{}\[\]%\-:";\'<>?,./]', '', text) # Tahap-7: Menghapus angka text = re.sub(r'[0-9]+', '', text) # Tahap-8: Menggabungkan spasi ganda menjadi satu spasi text = re.sub(' +', ' ', text) # Tahap-9: Menghapus spasi di awal dan akhir kalimat text = text.strip() # Tahap-10: Konversi teks ke huruf kecil text = text.lower() # Tahap-11: koreksi duplikasi tiga karakter beruntun atau lebih (contoh. yukkk) # text = re.sub(r'([a-zA-Z])\1\1', '\\1', text) #text = re.sub(r'(.)(\1{2,})', r'\1\1', text) text = re.sub(r'(\w)\1{2,}', r'\1', text) return text @st.cache_data def load_file(kamus_path, kamus_sendiri_path): # Membaca kamus kata gaul Salsabila with open(kamus_path) as f: data = f.read() lookp_dict = json.loads(data) # Dict kata gaul saya sendiri yang tidak masuk di dict Salsabila with open(kamus_sendiri_path) as f: kamus_sendiri = f.read() kamus_gaul_baru = json.loads(kamus_sendiri) # Menambahkan dict kata gaul baru ke kamus yang sudah ada lookp_dict.update(kamus_gaul_baru) nltk.download("stopwords") stop_words = set(stopwords.words("indonesian")) return lookp_dict, stop_words kamus_path = '_json_colloquial-indonesian-lexicon (1).txt' kamus_sendiri_path = 'kamus_gaul_custom.txt' lookp_dict, stop_words = load_file(kamus_path, kamus_sendiri_path) # Fungsi untuk normalisasi kata gaul @st.cache_data def normalize_slang(text, slang_dict): words = text.split() normalized_words = [slang_dict.get(word, word) for word in words] return ' '.join(normalized_words) #---------------------------------------------------NLTK Remove Stopwords---------------------------------------------------------------------- @st.cache_data def remove_stopwords(text, stop_words): # Pecah teks menjadi kata-kata words = text.split() # Hapus stopwords bahasa Indonesia words = [word for word in words if word not in stop_words] return " ".join(words) #---------------------------------------------------TFIDF---------------------------------------------------------------------- # Memuat model TF-IDF dengan joblib (pastikan path-nya benar) tfidf_model_path = 'X_tfidf_model.joblib' tfidf_vectorizer = joblib.load(tfidf_model_path) # Fungsi untuk ekstraksi fitur TF-IDF #@st.cache_data #def extract_tfidf_features(texts, _tfidf_vectorizer): # tfidf_matrix = tfidf_vectorizer.transform(texts) # return tfidf_matrix #---------------------------------------------------Milih Model---------------------------------------------------------------------- # Fungsi untuk memilih model berdasarkan pilihan pengguna @st.cache_data def select_sentiment_model(selected_model): if selected_model == "Ensemble": model_path = 'ensemble_clf_soft_smote.joblib' elif selected_model == "Random Forest": model_path = 'best_rf_model_smote.joblib' elif selected_model == "Naive Bayes": model_path = 'naive_bayes_model_smote.joblib' elif selected_model == "Logistic Regression": model_path = 'logreg_model_smote.joblib' else: # Fallback ke model default jika pilihan tidak valid model_path = 'ensemble_clf_soft_smote.joblib' model = joblib.load(model_path) return model # Fungsi untuk prediksi sentimen def predict_sentiment(text, _model, _tfidf_vectorizer, slang_dict): # Tahap-1: Membersihkan dan normalisasi teks cleaned_text = clean_text(text) norm_slang_text = normalize_slang(cleaned_text, slang_dict) # Tahap-2: Ekstraksi fitur TF-IDF tfidf_matrix = _tfidf_vectorizer.transform([norm_slang_text]) # Tahap-3: Lakukan prediksi sentimen sentiment = _model.predict(tfidf_matrix) # Tahap-4: Menggantikan indeks dengan label sentimen labels = {0: "Negatif", 1: "Netral", 2: "Positif"} sentiment_label = labels[int(sentiment)] return sentiment_label @st.cache_data def get_emoticon(sentiment): if sentiment == "Positif": emoticon = "😄" # Emotikon untuk sentimen positif elif sentiment == "Negatif": emoticon = "😞" # Emotikon untuk sentimen negatif else: emoticon = "😐" # Emotikon untuk sentimen netral return emoticon @st.cache_data def buat_chart(df, target_year): target_year = int(target_year) st.write(f"Bar Chart Tahun {target_year}:") # Ambil bulan df['Date'] = pd.to_datetime(df['Date']) # Convert 'Date' column to datetime df['month'] = df['Date'].dt.month df['year'] = df['Date'].dt.year # Filter DataFrame for the desired year df_filtered = df[df['year'] == target_year] # Check if data for the target year is available if df_filtered.empty: st.warning(f"Tidak ada data untuk tahun {target_year}.") return # Mapping nilai bulan ke nama bulan bulan_mapping = { 1: f'Januari {target_year}', 2: f'Februari {target_year}', 3: f'Maret {target_year}', 4: f'April {target_year}', 5: f'Mei {target_year}', 6: f'Juni {target_year}', 7: f'Juli {target_year}', 8: f'Agustus {target_year}', 9: f'September {target_year}', 10: f'Oktober {target_year}', 11: f'November {target_year}', 12: f'Desember {target_year}' } # Mengganti nilai dalam kolom 'month' menggunakan mapping df_filtered['month'] = df_filtered['month'].replace(bulan_mapping) # Menentukan warna untuk setiap kategori dalam kolom 'score' warna_label = { 'Negatif': '#FF9AA2', 'Netral': '#FFDAC1', 'Positif': '#B5EAD7' } # Sorting unique scores unique_label = sorted(df_filtered['label'].unique()) # Ensure months are in the correct order months_order = [ f'Januari {target_year}', f'Februari {target_year}', f'Maret {target_year}', f'April {target_year}', f'Mei {target_year}', f'Juni {target_year}', f'Juli {target_year}', f'Agustus {target_year}', f'September {target_year}', f'Oktober {target_year}', f'November {target_year}', f'Desember {target_year}' ] # Sort DataFrame based on the custom order of months df_filtered['month'] = pd.Categorical(df_filtered['month'], categories=months_order, ordered=True) df_filtered = df_filtered.sort_values('month') # Create a bar chart with stacking and manual colors st.bar_chart( df_filtered.groupby(['month', 'label']).size().unstack().fillna(0), color=[warna_label[label] for label in unique_label] ) @st.cache_data def all_data_process(texts, df, _sentiment_model, _tfidf_vectorizer, lookp_dict, stop_words): results = [] analisis = False if 'Text' in df.columns: if 'Date' in df.columns: for text, date in zip(texts, df['Date']): sentiment_label = predict_sentiment(text, sentiment_model, tfidf_vectorizer, lookp_dict) emoticon = get_emoticon(sentiment_label) cleaned_text = clean_text(text) norm_slang_text = normalize_slang(cleaned_text, lookp_dict) tanpa_stopwords = remove_stopwords(norm_slang_text, stop_words) result_entry = { 'Date': date, 'Text': text, 'cleaned-text': cleaned_text, 'normalisasi-text': norm_slang_text, 'stopwords-remove': tanpa_stopwords, 'label': sentiment_label, 'emotikon': emoticon, } results.append(result_entry) analisis = True else: for text in texts: sentiment_label = predict_sentiment(text, sentiment_model, tfidf_vectorizer, lookp_dict) emoticon = get_emoticon(sentiment_label) cleaned_text = clean_text(text) norm_slang_text = normalize_slang(cleaned_text, lookp_dict) tanpa_stopwords = remove_stopwords(norm_slang_text, stop_words) result_entry = { 'Text': text, 'cleaned-text': cleaned_text, 'normalisasi-text': norm_slang_text, 'stopwords-remove': tanpa_stopwords, 'label': sentiment_label, 'emotikon': emoticon, } results.append(result_entry) analisis = True else: st.warning("Berkas XLSX harus memiliki kolom bernama 'Text' untuk analisis sentimen.") return results, analisis # Fungsi untuk membuat tautan unduhan @st.cache_data def get_table_download_link(df, download_format): if download_format == "XLSX": df.to_excel("hasil_sentimen.xlsx", index=False) return f'Unduh File XLSX' else: csv = df.to_csv(index=False) return f'Unduh File CSV' # Judul st.title("Sentiment Analysis : Based on Tweets Biskita Transpakuan Bogor 2022-2023") preference_barchart_date = False #-----------------------------------------------------General Settings--------------------------------------------------------------- with st.sidebar : st.subheader('Settings :') with st.expander("General Settings :"): # Tambahkan widget untuk memilih model selected_model = st.selectbox("Pilih Model Sentimen:", ("Ensemble", "Naive Bayes", "Logistic Regression", "Transformer")) # Memilih model sentimen berdasarkan pilihan pengguna sentiment_model = select_sentiment_model(selected_model) # Pilihan input teks manual atau berkas XLSX input_option = st.radio("Pilih metode input:", ("Teks Manual", "Unggah Berkas XLSX")) if input_option == "Teks Manual": # Input teks dari pengguna user_input = st.text_area("Masukkan teks:", "") else: # Input berkas XLSX uploaded_file = st.file_uploader("Unggah berkas XLSX", type=["xlsx"]) st.caption("Pastikan berkas XLSX Anda memiliki kolom yang bernama :blue[Text] _(Maks.500 data)_.") st.caption("Jika terdapat kolom type :blue[datetime], ganti nama kolom menjadi :blue[Date]") if uploaded_file is not None: df = pd.read_excel(uploaded_file) df = df[:500] if 'Text' not in df.columns: st.warning("Berkas XLSX harus memiliki kolom bernama 'Text' untuk analisis sentimen.") if not df['Text'].empty: st.warning("Kolom 'Text' harus mempunyai value.") else: texts = df['Text'] # Sesuaikan dengan nama kolom di berkas XLSX Anda if "Date" in df.columns : if not df['Date'].empty: dates = df['Date'] preference_barchart_date = True #-----------------------------------------------------Preference Settings-------------------------------------------------- with st.expander ("Preference Settings :"): colormap = st.selectbox("Pilih Warna Wordclouds :", ["Greys", "Purples", "Blues", "Greens", "Oranges", "Reds", "YlOrBr", "YlOrRd", "OrRd", "PuRd", "RdPu", "BuPu", "GnBu", "PuBu", "YlGnBu", "PuBuGn", "BuGn", "YlGn"]) if preference_barchart_date == True: bar = st.selectbox("Pilih Tampilan Bar Chart :", ("Distribusi Kelas", "Distribusi Kelas Berdasarkan Waktu"), index = 0) target_year = st.selectbox("Pilih Tahun Bar Chart :", df['Date'].str[:4].unique()) st.info('Tekan "Analysis" kembali jika tampilan menghilang', icon = 'ℹī¸') button = st.button("Analysis") tab1, tab2, tab3 = st.tabs(["📋 Documentation", "📈 Results", "đŸ¤ĩ Creator"]) with tab1: st.header("Documentation :") ''' Langkah - langkah : 1. Buka sidebar sebelah kiri 2. Buka General Settings 3. Pilih Model 4. Pilih Input ('Text Manual', 'File Xlsx') 5. Jika file Xlsx harus memiliki kolom 'Text' 6. Jika ada kolom type Datetime, ada fitur tambahan asalkan kolom bernama 'Date' 7. Buka Preferences Settings untuk menyetel tampilan Wordclouds/Barchart 8. Klik Analysis 9. Klik tab Results ''' with tab2: st.header("Results :") # Analisis sentimen results = [] analisis = False if input_option == "Teks Manual" and user_input: if button: # Pisahkan teks yang dimasukkan pengguna menjadi baris-baris terpisah user_texts = user_input.split('\n') for text in user_texts: sentiment_label = predict_sentiment(text, sentiment_model, tfidf_vectorizer, lookp_dict) emoticon = get_emoticon(sentiment_label) cleaned_text = clean_text(text) norm_slang_text = normalize_slang(cleaned_text, lookp_dict) tanpa_stopwords = remove_stopwords(norm_slang_text, stop_words) results.append({ 'Text': text, 'cleaned-text' : cleaned_text, 'normalisasi-text' : norm_slang_text, 'stopwords-remove' : tanpa_stopwords, 'label' : sentiment_label, 'emotikon' : emoticon, }) analisis = True elif input_option == "Unggah Berkas XLSX" and uploaded_file is not None: if button: results, analisis = all_data_process(texts, df, sentiment_model, tfidf_vectorizer, lookp_dict, stop_words) if results and analisis == True: df_results = pd.DataFrame(results) # Membagi tampilan menjadi dua kolom columns = st.columns(2) # Kolom pertama untuk Word Cloud with columns[0]: st.write("Wordclouds :") all_texts = [result['stopwords-remove'] for result in results if result['stopwords-remove'] is not None and not pd.isna(result['stopwords-remove'])] all_texts = " ".join(all_texts) if all_texts: wordcloud = WordCloud(width=800, height=660, background_color='white', colormap=colormap, # Warna huruf contour_color='black', # Warna kontur contour_width=2, # Lebar kontur mask=None, # Gunakan mask untuk bentuk kustom ).generate(all_texts) st.image(wordcloud.to_array()) else: st.write("Tidak ada data untuk ditampilkan dalam Word Cloud.") if 'Date' in df_results.columns: if bar == "Distribusi Kelas Berdasarkan Waktu": if not df_results['Date'].empty: with columns[1]: buat_chart(df_results, target_year) else : # Kolom kedua untuk Bar Chart with columns[1]: st.write("Bar Chart :") # Membuat bar chart st.bar_chart( df_results["label"].value_counts() ) else : # Kolom kedua untuk Bar Chart with columns[1]: st.write("Bar Chart :") # Membuat bar chart st.bar_chart( df_results["label"].value_counts() ) # Menampilkan hasil analisis sentimen dalam kotak yang dapat diperluas with st.expander("Hasil Analisis Sentimen"): # Tampilkan tabel hasil analisis sentimen st.write(pd.DataFrame(results)) if results: # Simpan DataFrame ke dalam file CSV df = pd.DataFrame(results) csv = df.to_csv(index=False) # Tampilkan tombol unduh CSV st.download_button(label="Unduh CSV", data=csv, key="csv_download", file_name="hasil_sentimen.csv") else: st.write("Tidak ada data untuk diunduh.") with tab3: st.header("Profile :") st.image('https://naufalnashif.github.io/assets/images/WhatsApp%20Image%202023-01-26%20at%2020.37.17.jpeg', caption='Naufal Nashif') st.subheader('Hello, nice to meet you !') # Tautan ke GitHub github_link = "https://github.com/naufalnashif/" st.markdown(f"GitHub: [{github_link}]({github_link})") # Tautan ke Instagram instagram_link = "https://www.instagram.com/naufal.nashif/" st.markdown(f"Instagram: [{instagram_link}]({instagram_link})") # Garis pemisah st.divider() st.write('Thank you for trying the demo!') left, right = st.columns(2) with left : st.caption('Best regards, Naufal Nashif :sunglasses:') with right : st.caption('Šī¸ 2023')