BulatF commited on
Commit
bb50616
1 Parent(s): 768bcdc

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +52 -3
  2. requirements.txt +5 -1
app.py CHANGED
@@ -2,16 +2,21 @@ import streamlit as st
2
  import pandas as pd
3
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
  from transformers import pipeline
5
-
 
6
  import torch.nn.functional as F
7
  import torch
8
  import io
9
  import base64
10
  from stqdm import stqdm
 
11
 
 
 
12
  import matplotlib.pyplot as plt
13
  import numpy as np
14
 
 
15
 
16
  # Define the model and tokenizer
17
  model_name = 'nlptown/bert-base-multilingual-uncased-sentiment'
@@ -42,6 +47,19 @@ def get_table_download_link(df):
42
  b64 = base64.b64encode(csv.encode()).decode()
43
  return f'<a href="data:file/csv;base64,{b64}" download="data.csv">Download csv file</a>'
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  # Function for classifying with the new model
47
  def classify_with_new_classes(reviews, class_names):
@@ -78,7 +96,11 @@ def main():
78
  review_column = st.selectbox('Select the column from your excel file containing text', df.columns)
79
  df[review_column] = df[review_column].astype(str)
80
 
 
 
 
81
  class_names = st.text_input('Enter the possible class names separated by comma') # New input field for class names
 
82
  except Exception as e:
83
  st.write("An error occurred while reading the uploaded file. Please make sure it's a valid Excel file.")
84
  return
@@ -109,6 +131,8 @@ def main():
109
 
110
 
111
 
 
 
112
  def process_reviews(df, review_column, class_names):
113
  with st.spinner('Classifying reviews...'):
114
  progress_bar = st.progress(0)
@@ -134,7 +158,9 @@ def process_reviews(df, review_column, class_names):
134
  class_scores_dict[name] = [score[i] for score in class_scores]
135
 
136
  # Add a new column with the class that has the highest score
137
- df['Highest Class'] = df[class_names].idxmax(axis=1)
 
 
138
 
139
  df_new = df.copy()
140
  df_new['raw_scores'] = raw_scores
@@ -192,14 +218,37 @@ def display_dataframe(df, df_display):
192
  )
193
 
194
  st.dataframe(df_display)
 
 
 
 
 
 
 
 
 
 
195
 
196
  def display_ratings(df, review_column):
197
  cols = st.columns(5)
198
 
199
  for i in range(1, 6):
200
- rating_counts = df[df['Rating'] == i].shape[0]
 
 
 
201
  cols[i-1].markdown(f"### {rating_counts}")
202
  cols[i-1].markdown(f"{'⭐' * i}")
 
 
 
 
 
 
 
 
 
 
203
 
204
 
205
 
 
2
  import pandas as pd
3
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
  from transformers import pipeline
5
+ from fuzzywuzzy import fuzz
6
+ from sklearn.feature_extraction.text import TfidfVectorizer
7
  import torch.nn.functional as F
8
  import torch
9
  import io
10
  import base64
11
  from stqdm import stqdm
12
+ import nltk
13
 
14
+ from nltk.corpus import stopwords
15
+ nltk.download('stopwords')
16
  import matplotlib.pyplot as plt
17
  import numpy as np
18
 
19
+ stopwords_list = stopwords.words('english') + ['your_additional_stopwords_here']
20
 
21
  # Define the model and tokenizer
22
  model_name = 'nlptown/bert-base-multilingual-uncased-sentiment'
 
47
  b64 = base64.b64encode(csv.encode()).decode()
48
  return f'<a href="data:file/csv;base64,{b64}" download="data.csv">Download csv file</a>'
49
 
50
+ def filter_dataframe(df, review_column, filter_words):
51
+ # Return full DataFrame if filter_words is empty or contains only spaces
52
+ if not filter_words or all(word.isspace() for word in filter_words):
53
+ return df
54
+ filter_scores = df[review_column].apply(lambda x: max([fuzz.token_set_ratio(x, word) for word in filter_words]))
55
+ return df[filter_scores > 70] # Adjust this threshold as necessary
56
+
57
+
58
+
59
+ def process_filter_words(filter_words_input):
60
+ filter_words = [word.strip() for word in filter_words_input.split(',')]
61
+ return filter_words
62
+
63
 
64
  # Function for classifying with the new model
65
  def classify_with_new_classes(reviews, class_names):
 
96
  review_column = st.selectbox('Select the column from your excel file containing text', df.columns)
97
  df[review_column] = df[review_column].astype(str)
98
 
99
+
100
+ filter_words_input = st.text_input('Enter words to filter the data by, separated by comma (or leave empty)') # New input field for filter words
101
+ filter_words = [] if filter_words_input.strip() == "" else process_filter_words(filter_words_input) # Process the filter words
102
  class_names = st.text_input('Enter the possible class names separated by comma') # New input field for class names
103
+ df = filter_dataframe(df, review_column, filter_words) # Filter the DataFrame
104
  except Exception as e:
105
  st.write("An error occurred while reading the uploaded file. Please make sure it's a valid Excel file.")
106
  return
 
131
 
132
 
133
 
134
+
135
+
136
  def process_reviews(df, review_column, class_names):
137
  with st.spinner('Classifying reviews...'):
138
  progress_bar = st.progress(0)
 
158
  class_scores_dict[name] = [score[i] for score in class_scores]
159
 
160
  # Add a new column with the class that has the highest score
161
+ if class_names and not all(name.isspace() for name in class_names):
162
+ df['Highest Class'] = df[class_names].idxmax(axis=1)
163
+
164
 
165
  df_new = df.copy()
166
  df_new['raw_scores'] = raw_scores
 
218
  )
219
 
220
  st.dataframe(df_display)
221
+
222
+ def important_words(reviews, num_words=5):
223
+ if len(reviews) == 0:
224
+ return []
225
+ vectorizer = TfidfVectorizer(stop_words=stopwords_list, max_features=10000)
226
+ vectors = vectorizer.fit_transform(reviews)
227
+ features = vectorizer.get_feature_names_out()
228
+ indices = np.argsort(vectorizer.idf_)[::-1]
229
+ top_features = [features[i] for i in indices[:num_words]]
230
+ return top_features
231
 
232
  def display_ratings(df, review_column):
233
  cols = st.columns(5)
234
 
235
  for i in range(1, 6):
236
+ rating_reviews = df[df['Rating'] == i][review_column]
237
+ top_words = important_words(rating_reviews)
238
+
239
+ rating_counts = rating_reviews.shape[0]
240
  cols[i-1].markdown(f"### {rating_counts}")
241
  cols[i-1].markdown(f"{'⭐' * i}")
242
+
243
+ # Display the most important words for each rating
244
+ cols[i-1].markdown(f"#### Most Important Words:")
245
+ if top_words:
246
+ for word in top_words:
247
+ cols[i-1].markdown(f"**{word}**")
248
+ else:
249
+ cols[i-1].markdown("No important words to display")
250
+
251
+
252
 
253
 
254
 
requirements.txt CHANGED
@@ -5,4 +5,8 @@ torch
5
  stqdm
6
  openpyxl
7
  wordcloud
8
- matplotlib
 
 
 
 
 
5
  stqdm
6
  openpyxl
7
  wordcloud
8
+ matplotlib
9
+ fuzzywuzzy
10
+ scikit-learn
11
+ nltk
12
+ numpy