geetu040 commited on
Commit
4d6d610
·
1 Parent(s): e577af3

Added dup_ques without word embeddings

Browse files
.gitignore CHANGED
@@ -6,4 +6,5 @@
6
  /src/face_analytics/__pycache__
7
  /src/movie_rec/__pycache__
8
  /src/movie_2022_rec/__pycache__
9
- /src/movie_reviews/__pycache__
 
 
6
  /src/face_analytics/__pycache__
7
  /src/movie_rec/__pycache__
8
  /src/movie_2022_rec/__pycache__
9
+ /src/movie_reviews/__pycache__
10
+ /src/dup_ques/__pycache__
main.py CHANGED
@@ -1,9 +1,10 @@
1
  from fastapi import FastAPI
2
 
3
  # Importing Models and Schemas
 
4
  from src.movie_reviews.main import movie_reviews, Schema as MovieReviewsSchema
5
  from src.cat_and_dog.main import cat_and_dog, Schema as CatAndDogSchema
6
- from src.face_analytics.main import face_analytics, Schema as FaceAnalytics
7
  from src.book_rec.main import book_rec, Schema as BookRecSchema
8
  from src.movie_rec.main import movie_rec, Schema as MovieRecSchema
9
  from src.movie_2022_rec.main import movie_2022_rec, Schema as Movie2022RecSchema
@@ -35,6 +36,10 @@ print(" ........... App Started ........... ")
35
  def index():
36
  return "Welcome to the API of PyModelsAI"
37
 
 
 
 
 
38
  @app.post("/movie_reviews")
39
  def endpoint_movie_reviews(req: MovieReviewsSchema):
40
  return movie_reviews(req)
@@ -44,7 +49,7 @@ def endpoint_cat_and_dog(req: CatAndDogSchema):
44
  return cat_and_dog(req)
45
 
46
  @app.post("/face_analytics")
47
- def endpoint_face_analytics(req: CatAndDogSchema):
48
  return face_analytics(req)
49
 
50
  @app.post("/book_rec")
 
1
  from fastapi import FastAPI
2
 
3
  # Importing Models and Schemas
4
+ from src.dup_ques.main import dup_ques, Schema as DupQuesSchema
5
  from src.movie_reviews.main import movie_reviews, Schema as MovieReviewsSchema
6
  from src.cat_and_dog.main import cat_and_dog, Schema as CatAndDogSchema
7
+ from src.face_analytics.main import face_analytics, Schema as FaceAnalyticsSchema
8
  from src.book_rec.main import book_rec, Schema as BookRecSchema
9
  from src.movie_rec.main import movie_rec, Schema as MovieRecSchema
10
  from src.movie_2022_rec.main import movie_2022_rec, Schema as Movie2022RecSchema
 
36
  def index():
37
  return "Welcome to the API of PyModelsAI"
38
 
39
+ @app.post("/dup_ques")
40
+ def endpoint_movie_reviews(req: DupQuesSchema):
41
+ return dup_ques(req)
42
+
43
  @app.post("/movie_reviews")
44
  def endpoint_movie_reviews(req: MovieReviewsSchema):
45
  return movie_reviews(req)
 
49
  return cat_and_dog(req)
50
 
51
  @app.post("/face_analytics")
52
+ def endpoint_face_analytics(req: FaceAnalyticsSchema):
53
  return face_analytics(req)
54
 
55
  @app.post("/book_rec")
requirements.txt CHANGED
@@ -5,4 +5,6 @@ scikit-learn
5
  numpy
6
  tensorflow-cpu
7
  keras
8
- Pillow
 
 
 
5
  numpy
6
  tensorflow-cpu
7
  keras
8
+ Pillow
9
+ distance
10
+ fuzzywuzzy
src/dup_ques/main.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import joblib
2
+ from pydantic import BaseModel
3
+ from src.dup_ques.preprocess import get_x
4
+
5
+ # SCHEMA
6
+ class Schema(BaseModel):
7
+ ques1: str
8
+ ques2: str
9
+
10
+ # Request Handler
11
+ def dup_ques(req):
12
+ ques1 = req.ques1
13
+ ques2 = req.ques2
14
+
15
+ X = get_x(ques1, ques2)
16
+ y = predict(X)
17
+
18
+ return y
19
+
20
+ # PIPELINE
21
+ pipeline = joblib.load("./src/dup_ques/pipeline.pkl")
22
+
23
+ def predict(X):
24
+ return pipeline.predict_proba(X).round(3).tolist()
src/dup_ques/pipeline.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fc8c1363c54b2332c840db78a82bed513bc78a4dcc78b7483f892185abb64a7
3
+ size 4090964
src/dup_ques/preprocess.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ WORD_EMBEDDINGS_PATH = "./src/dup_ques/word_embeddings.json"
4
+ with open(WORD_EMBEDDINGS_PATH, 'rb') as f:
5
+ WORD_EMBEDDINGS = json.load(f)
6
+
7
+ import nltk
8
+
9
+ nltk.download('wordnet')
10
+ nltk.download('omw-1.4')
11
+ nltk.download('stopwords')
12
+
13
+ abbv = {
14
+ "AFAIK":"as far as I know", "IMO": "in my opinion", "IMHO": "in my humble opinion", "LGTM": "look good to me", "AKA": "also know as", "ASAP": "as sone as possible", "BTW": "by the way", "FAQ": "frequently asked questions", "DIY": "do it yourself", "DM": "direct message", "FYI": "for your information", "IC": "i see", "IOW": "in other words", "IIRC": "If I Remember Correctly", "icymi":"In case you missed it", "CUZ": "because", "COS": "because", "nv": "nevermind", "PLZ": "please",
15
+ }
16
+
17
+ # https://en.wikipedia.org/wiki/Wikipedia%3aList_of_English_contractions
18
+ # https://stackoverflow.com/a/19794953
19
+ contractions = {
20
+ "ain't": "am not", "aren't": "are not", "can't": "can not", "can't've": "can not have", "'cause": "because", "could've": "could have", "couldn't": "could not", "couldn't've": "could not have", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hadn't've": "had not have", "hasn't": "has not", "haven't": "have not", "he'd": "he would", "he'd've": "he would have", "he'll": "he will", "he'll've": "he will have", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have", "i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have", "it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have", "mightn't": "might not", "mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have", "so's": "so as", "that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would", "y'all'd've": "you all would have", "y'all're": "you all are", "y'all've": "you all have", "you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have", "'ve": " have", "n't": " not", "'re": " are", "'ll": " will",
21
+ }
22
+
23
+ import re
24
+ html_pattern = re.compile('<.*?>')
25
+ urls_pattern = re.compile(r'https?://\S+|www\.\S+')
26
+ emoji_pattern = re.compile("["
27
+ u"\U0001F600-\U0001F64F" # emoticons
28
+ u"\U0001F300-\U0001F5FF" # symbols & pictographs
29
+ u"\U0001F680-\U0001F6FF" # transport & map symbols
30
+ u"\U0001F1E0-\U0001F1FF" # flags (iOS)
31
+ "]+", flags=re.UNICODE)
32
+
33
+ from nltk.stem.porter import PorterStemmer
34
+ ps = PorterStemmer()
35
+
36
+ from nltk.stem import WordNetLemmatizer
37
+ lemmatizer = WordNetLemmatizer()
38
+
39
+ import string
40
+ punc = string.punctuation
41
+
42
+ from nltk.corpus import stopwords
43
+ stopwords = stopwords.words('english')
44
+
45
+ def text_preprocess(q, allow_stopwords=True):
46
+
47
+ q = str(q).lower().strip()
48
+
49
+ # HTML Tags
50
+ q = html_pattern.sub(r'', q)
51
+
52
+ # urls
53
+ q = urls_pattern.sub(r'', q)
54
+
55
+ # punctuations
56
+ q = q.translate(str.maketrans("", "", punc))
57
+
58
+ # Emojis
59
+ q = emoji_pattern.sub(r'', q)
60
+
61
+ # Replace certain special characters with their string equivalents
62
+ q = q.replace('%', ' percent')
63
+ q = q.replace('$', ' dollar ')
64
+ q = q.replace('₹', ' rupee ')
65
+ q = q.replace('€', ' euro ')
66
+ q = q.replace('@', ' at ')
67
+
68
+ # The pattern '[math]' appears around 900 times in the whole dataset.
69
+ q = q.replace('[math]', '')
70
+
71
+ # Replacing some numbers with string equivalents (not perfect, can be done better to account for more cases)
72
+ q = q.replace(',000,000,000 ', 'b ')
73
+ q = q.replace(',000,000 ', 'm ')
74
+ q = q.replace(',000 ', 'k ')
75
+ q = re.sub(r'([0-9]+)000000000', r'\1b', q)
76
+ q = re.sub(r'([0-9]+)000000', r'\1m', q)
77
+ q = re.sub(r'([0-9]+)000', r'\1k', q)
78
+
79
+ # Decontracting words
80
+ new_text = []
81
+
82
+ for word in q.split():
83
+
84
+ # Contractions
85
+ word = contractions.get(word.upper(), word)
86
+
87
+ # abbreviations
88
+ word = abbv.get(word.upper(), word)
89
+
90
+ # Stemming
91
+ # word = ps.stem(word)
92
+
93
+ # Lemmatizing
94
+ word = lemmatizer.lemmatize(word)
95
+
96
+ if word not in stopwords or allow_stopwords:
97
+ new_text.append(word)
98
+
99
+ q = ' '.join(new_text)
100
+
101
+ return q
102
+
103
+ import distance
104
+ from fuzzywuzzy import fuzz
105
+ import numpy as np
106
+ from numpy.linalg import norm
107
+ SAFE_DIV = 0.0001
108
+
109
+ def cos_sim(q1, q2, allow_stopwords=True):
110
+ q1 = [i for i in q1.split() if i not in stopwords or allow_stopwords]
111
+ q2 = [i for i in q2.split() if i not in stopwords or allow_stopwords]
112
+
113
+ vocab = set(q1 + q2)
114
+
115
+ vocab1 = dict(zip(vocab, [0]*len(vocab)))
116
+ vocab2 = dict(zip(vocab, [0]*len(vocab)))
117
+
118
+ for w in q1:
119
+ vocab1[w] += 1
120
+ for w in q2:
121
+ vocab2[w] += 1
122
+
123
+ v1 = list(vocab1.values())
124
+ v2 = list(vocab2.values())
125
+
126
+ return (np.dot(v1,v2) + SAFE_DIV)/(norm(v1)*norm(v2) + SAFE_DIV)
127
+
128
+ def cos_sim_vec(v1, v2):
129
+ return (np.dot(v1,v2) + SAFE_DIV)/(norm(v1)*norm(v2) + SAFE_DIV)
130
+
131
+ def euler_distance(v1, v2):
132
+ return sum((v1 - v2)**2)
133
+
134
+ def sentence_emb(sent):
135
+ embs = np.zeros(100)
136
+ counter = 0
137
+ for word in sent.split():
138
+ emb = WORD_EMBEDDINGS.get(word)
139
+ if emb != None:
140
+ embs += emb
141
+ counter += 1
142
+ if counter == 0:
143
+ counter = 1
144
+ return embs / counter
145
+
146
+ def test_common_words(q1,q2):
147
+ w1 = set(map(lambda word: word.lower().strip(), q1.split(" ")))
148
+ w2 = set(map(lambda word: word.lower().strip(), q2.split(" ")))
149
+ return len(w1 & w2)
150
+
151
+ def test_total_words(q1,q2):
152
+ w1 = set(map(lambda word: word.lower().strip(), q1.split(" ")))
153
+ w2 = set(map(lambda word: word.lower().strip(), q2.split(" ")))
154
+ return (len(w1) + len(w2))
155
+
156
+
157
+ def test_fetch_token_features(q1, q2):
158
+ SAFE_DIV = 0.0001
159
+
160
+ # STOP_WORDS = pickle.load(open('stopwords.pkl','rb'))
161
+ STOP_WORDS = stopwords
162
+
163
+ token_features = [0.0] * 8
164
+
165
+ # Converting the Sentence into Tokens:
166
+ q1_tokens = q1.split()
167
+ q2_tokens = q2.split()
168
+
169
+ if len(q1_tokens) == 0 or len(q2_tokens) == 0:
170
+ return token_features
171
+
172
+ # Get the non-stopwords in Questions
173
+ q1_words = set([word for word in q1_tokens if word not in STOP_WORDS])
174
+ q2_words = set([word for word in q2_tokens if word not in STOP_WORDS])
175
+
176
+ # Get the stopwords in Questions
177
+ q1_stops = set([word for word in q1_tokens if word in STOP_WORDS])
178
+ q2_stops = set([word for word in q2_tokens if word in STOP_WORDS])
179
+
180
+ # Get the common non-stopwords from Question pair
181
+ common_word_count = len(q1_words.intersection(q2_words))
182
+
183
+ # Get the common stopwords from Question pair
184
+ common_stop_count = len(q1_stops.intersection(q2_stops))
185
+
186
+ # Get the common Tokens from Question pair
187
+ common_token_count = len(set(q1_tokens).intersection(set(q2_tokens)))
188
+
189
+ token_features[0] = common_word_count / (min(len(q1_words), len(q2_words)) + SAFE_DIV)
190
+ token_features[1] = common_word_count / (max(len(q1_words), len(q2_words)) + SAFE_DIV)
191
+ token_features[2] = common_stop_count / (min(len(q1_stops), len(q2_stops)) + SAFE_DIV)
192
+ token_features[3] = common_stop_count / (max(len(q1_stops), len(q2_stops)) + SAFE_DIV)
193
+ token_features[4] = common_token_count / (min(len(q1_tokens), len(q2_tokens)) + SAFE_DIV)
194
+ token_features[5] = common_token_count / (max(len(q1_tokens), len(q2_tokens)) + SAFE_DIV)
195
+
196
+ # Last word of both question is same or not
197
+ token_features[6] = int(q1_tokens[-1] == q2_tokens[-1])
198
+
199
+ # First word of both question is same or not
200
+ token_features[7] = int(q1_tokens[0] == q2_tokens[0])
201
+
202
+ return token_features
203
+
204
+
205
+ def test_fetch_length_features(q1, q2):
206
+ length_features = [0.0] * 3
207
+
208
+ # Converting the Sentence into Tokens:
209
+ q1_tokens = q1.split()
210
+ q2_tokens = q2.split()
211
+
212
+ if len(q1_tokens) == 0 or len(q2_tokens) == 0:
213
+ return length_features
214
+
215
+ # Absolute length features
216
+ length_features[0] = abs(len(q1_tokens) - len(q2_tokens))
217
+
218
+ # Average Token Length of both Questions
219
+ length_features[1] = (len(q1_tokens) + len(q2_tokens)) / 2
220
+
221
+ strs = list(distance.lcsubstrings(q1, q2))
222
+ if len(strs) > 0:
223
+ length_features[2] = len(strs[0]) / (min(len(q1), len(q2)) + 1)
224
+
225
+ return length_features
226
+
227
+
228
+ def test_fetch_fuzzy_features(q1, q2):
229
+ fuzzy_features = [0.0] * 4
230
+
231
+ # fuzz_ratio
232
+ fuzzy_features[0] = fuzz.QRatio(q1, q2)
233
+
234
+ # fuzz_partial_ratio
235
+ fuzzy_features[1] = fuzz.partial_ratio(q1, q2)
236
+
237
+ # token_sort_ratio
238
+ fuzzy_features[2] = fuzz.token_sort_ratio(q1, q2)
239
+
240
+ # token_set_ratio
241
+ fuzzy_features[3] = fuzz.token_set_ratio(q1, q2)
242
+
243
+ return fuzzy_features
244
+
245
+
246
+ def query_point_creator(q1, q2, allow_stopwords):
247
+ input_query = []
248
+
249
+ # preprocess
250
+ q1 = text_preprocess(q1, allow_stopwords)
251
+ q2 = text_preprocess(q2, allow_stopwords)
252
+
253
+ # cosine similarity
254
+ input_query.append(cos_sim(q1, q2))
255
+
256
+ # fetch basic features
257
+ input_query.append(len(q1))
258
+ input_query.append(len(q2))
259
+
260
+ input_query.append(len(q1.split(" ")))
261
+ input_query.append(len(q2.split(" ")))
262
+
263
+ input_query.append(test_common_words(q1, q2))
264
+ input_query.append(test_total_words(q1, q2))
265
+ input_query.append(round(test_common_words(q1, q2) / test_total_words(q1, q2), 2))
266
+
267
+ # fetch token features
268
+ token_features = test_fetch_token_features(q1, q2)
269
+ input_query.extend(token_features)
270
+
271
+ # fetch length based features
272
+ length_features = test_fetch_length_features(q1, q2)
273
+ input_query.extend(length_features)
274
+
275
+ # fetch fuzzy features
276
+ fuzzy_features = test_fetch_fuzzy_features(q1, q2)
277
+ input_query.extend(fuzzy_features)
278
+
279
+ return input_query
280
+
281
+ def sentence_emb(sent):
282
+ embs = np.zeros(100)
283
+ counter = 0
284
+ for word in sent.split():
285
+ emb = WORD_EMBEDDINGS.get(word)
286
+ if emb != None:
287
+ embs += emb
288
+ counter += 1
289
+ if counter == 0:
290
+ counter = 1
291
+ return embs / counter
292
+
293
+ def get_x(q1, q2):
294
+ x = []
295
+
296
+ x.extend(
297
+ query_point_creator(q1, q2, False)
298
+ )
299
+ x.extend(
300
+ query_point_creator(q1, q2, True)
301
+ )
302
+
303
+ q1 = text_preprocess(q1, allow_stopwords=True)
304
+ q2 = text_preprocess(q2, allow_stopwords=True)
305
+
306
+ emb1 = sentence_emb(q1)
307
+ emb2 = sentence_emb(q2)
308
+
309
+ x.append(cos_sim_vec(emb1, emb2))
310
+ x.append(euler_distance(emb1, emb2))
311
+
312
+ x.extend(emb1)
313
+ x.extend(emb2)
314
+
315
+ return np.expand_dims(x, axis=0)
src/movie_reviews/main.py CHANGED
@@ -50,11 +50,7 @@ pipeline = joblib.load("./src/movie_reviews/pipeline.pkl")
50
 
51
  def predict(text):
52
  cleaned = preprocess(text)
53
- pred = pipeline.predict([cleaned])[0]
54
- output = [0, 0]
55
- output[pred] = 0.8
56
- output[1-pred] = 0.2
57
- return [output]
58
 
59
  def preprocess(text):
60
  text = text.lower() # Lowercase
 
50
 
51
  def predict(text):
52
  cleaned = preprocess(text)
53
+ return pipeline.predict_proba([cleaned]).round(3).tolist()
 
 
 
 
54
 
55
  def preprocess(text):
56
  text = text.lower() # Lowercase