hongaik commited on
Commit
2b2bf8e
1 Parent(s): 50cefb1

Upload utils.py

Browse files
Files changed (1) hide show
  1. utils.py +77 -0
utils.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import pickle
3
+ import numpy as np
4
+ import pandas as pd
5
+
6
+
7
+ tfidf = pickle.load(open('models/tfidf.sav', 'rb'))
8
+ svc_sentiment = pickle.load(open('models/sentiment_model.sav', 'rb'))
9
+ tfidf_sentiment = pickle.load(open('models/tfidf_sentiment.sav', 'rb'))
10
+ svc = pickle.load(open('models/svc_model.sav', 'rb'))
11
+
12
+ labels = [
13
+ 'Product quality', 'Knowledge',
14
+ 'Appointment', 'Service etiquette', 'Waiting time',
15
+ 'Repair speed', 'Repair cost', 'Repair quality', 'Warranty',
16
+ 'Product replacement', 'Loan sets']
17
+
18
+ sample_file = pd.read_csv('sample.csv').to_csv(index=False).encode('utf-8')
19
+
20
+ print('utils imported!')
21
+
22
+ def get_single_prediction(text):
23
+
24
+ # manipulate data into a format that we pass to our model
25
+ text = text.lower().strip() #lower case
26
+
27
+ # Vectorise text and store in new dataframe. Sentence vector = average of word vectors
28
+ text_vectors = tfidf.transform(list(text))
29
+
30
+ # Make topic predictions
31
+ results = svc.predict_proba(text_vectors).squeeze().round(2)
32
+ pred_prob = pd.DataFrame({'topic': labels, 'probability': results}).sort_values('probability', ascending=True)
33
+
34
+ # Make sentiment predictions
35
+ text_vectors_sentiment = tfidf_sentiment.transform(list(text))
36
+
37
+ results_sentiment = svc_sentiment.predict_proba(text_vectors).squeeze().round(2)
38
+ pred_prob_sentiment = pd.DataFrame({'sentiment': ['Negative', 'Positive'], 'probability': results_sentiment}).sort_values('probability', ascending=True)
39
+
40
+
41
+ return (pred_prob, pred_prob_sentiment)
42
+
43
+ def get_multiple_predictions(csv):
44
+
45
+ df = pd.read_csv(csv)
46
+ df.columns = ['sequence']
47
+
48
+ df['sequence_clean'] = df['sequence'].str.lower().str.strip()
49
+
50
+ # Remove rows with blank string
51
+ invalid = df[(pd.isna(df['sequence_clean'])) | (df['sequence_clean'] == '')]
52
+ invalid.drop(columns=['sequence_clean'], inplace=True)
53
+
54
+ # Drop rows with blank string
55
+ df.dropna(inplace=True)
56
+ df = df[df['sequence_clean'] != ''].reset_index(drop=True)
57
+
58
+ # Vectorise text and get topic predictions
59
+ text_vectors = tfidf.transform(df['sequence_clean'])
60
+ pred_results = pd.DataFrame(svc.predict(text_vectors), columns = labels)
61
+
62
+ # Vectorise text and get sentiment predictions
63
+ text_vectors_sentiment = tfidf_sentiment.transform(df['sequence_clean'])
64
+ pred_results_sentiment = pd.DataFrame(svc_sentiment.predict(text_vectors_sentiment), columns = ['sentiment'])
65
+
66
+ # Join back to original sequence
67
+ final_results = df.join(pred_results).join(pred_results_sentiment)
68
+ final_results['others'] = final_results[labels].max(axis=1)
69
+ final_results['others'] = final_results['others'].apply(lambda x: 1 if x == 0 else 0)
70
+
71
+ final_results.drop(columns=['sequence_clean'], inplace=True)
72
+
73
+ # Append invalid rows
74
+ if len(invalid) == 0:
75
+ return final_results.to_csv(index=False).encode('utf-8')
76
+ else:
77
+ return pd.concat([final_results, invalid]).reset_index(drop=True).to_csv(index=False).encode('utf-8')