AliHaider0343 commited on
Commit
2362d45
1 Parent(s): e077179

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import nltk
3
+ import streamlit as st
4
+ from transformers import RobertaTokenizer, RobertaForSequenceClassification
5
+
6
+ nltk.download('punkt')
7
+
8
+ def tokenize_sentences(sentence):
9
+ encoded_dict = tokenizer.encode_plus(
10
+ sentence,
11
+ add_special_tokens=True,
12
+ max_length=128,
13
+ padding='max_length',
14
+ truncation=True,
15
+ return_attention_mask=True,
16
+ return_tensors='pt'
17
+ )
18
+ return torch.cat([encoded_dict['input_ids']], dim=0), torch.cat([encoded_dict['attention_mask']], dim=0)
19
+
20
+ def remove_stop_words(sentence):
21
+ words = nltk.word_tokenize(sentence)
22
+ custom_words = ['recommend', 'having', 'Hello', 'best', 'restaurant', 'top', 'want', 'need', 'well', 'most', 'should', 'be', 'good', 'also']
23
+ stop_words.update(custom_words)
24
+ words_without_stopwords = [word for word in words if word.lower() not in stop_words]
25
+ sentence_without_stopwords = ' '.join(words_without_stopwords)
26
+ return sentence_without_stopwords
27
+
28
+ def preprocess_query(query):
29
+ query = str(query).lower()
30
+ query = query.strip()
31
+ query = remove_stop_words(query)
32
+ return query
33
+
34
+ def predict_aspects(sentence, threshold):
35
+ input_ids, attention_mask = tokenize_sentences(sentence)
36
+ with torch.no_grad():
37
+ outputs = aspects_model(input_ids, attention_mask=attention_mask)
38
+ logits = outputs.logits
39
+ predicted_aspects = torch.sigmoid(logits).squeeze().tolist()
40
+ results = dict()
41
+ for label, prediction in zip(LABEL_COLUMNS_ASPECTS, predicted_aspects):
42
+ if prediction < threshold:
43
+ continue
44
+ precentage = round(float(prediction) * 100, 2)
45
+ results[label] = precentage
46
+ return results
47
+
48
+ # Load tokenizer and model
49
+ BERT_MODEL_NAME_FOR_ASPECTS_CLASSIFICATION = 'roberta-large'
50
+ tokenizer = RobertaTokenizer.from_pretrained(BERT_MODEL_NAME_FOR_ASPECTS_CLASSIFICATION, do_lower_case=True)
51
+
52
+ LABEL_COLUMNS_ASPECTS = ['FOOD-CUISINE', 'FOOD-DEALS', 'FOOD-DIET_OPTION', 'FOOD-EXPERIENCE', 'FOOD-FLAVOR', 'FOOD-GENERAL', 'FOOD-INGREDIENT', 'FOOD-KITCHEN', 'FOOD-MEAL', 'FOOD-MENU', 'FOOD-PORTION', 'FOOD-PRESENTATION', 'FOOD-PRICE', 'FOOD-QUALITY', 'FOOD-RECOMMENDATION', 'FOOD-TASTE', 'GENERAL-GENERAL', 'RESTAURANT-ATMOSPHERE', 'RESTAURANT-BUILDING', 'RESTAURANT-DECORATION', 'RESTAURANT-EXPERIENCE', 'RESTAURANT-FEATURES', 'RESTAURANT-GENERAL', 'RESTAURANT-HYGIENE', 'RESTAURANT-KITCHEN', 'RESTAURANT-LOCATION', 'RESTAURANT-OPTIONS', 'RESTAURANT-RECOMMENDATION', 'RESTAURANT-SEATING_PLAN', 'RESTAURANT-VIEW', 'SERVICE-BEHAVIOUR', 'SERVICE-EXPERIENCE', 'SERVICE-GENERAL', 'SERVICE-WAIT_TIME']
53
+
54
+ aspects_model = RobertaForSequenceClassification.from_pretrained(BERT_MODEL_NAME_FOR_ASPECTS_CLASSIFICATION, num_labels=len(LABEL_COLUMNS_ASPECTS))
55
+ aspects_model.load_state_dict(torch.load('./Aspects_Extraction_Model_updated.pth', map_location=torch.device('cpu')))
56
+ aspects_model.eval()
57
+
58
+ # Streamlit App
59
+ st.title("Implicit and Explicit Aspect Extraction")
60
+
61
+ sentence = st.text_input("Enter a sentence:")
62
+ threshold = st.slider("Threshold", min_value=0.0, max_value=1.0, step=0.01, value=0.5)
63
+
64
+ if sentence:
65
+ processed_sentence = preprocess_query(sentence)
66
+ results = predict_aspects(processed_sentence, threshold)
67
+ if len(results)>0:
68
+ st.write("Predicted Aspects:")
69
+ for aspect, percentage in results.items():
70
+ st.write(f"- {aspect}: {percentage}%")
71
+ else:
72
+ st.write("No aspects above the threshold.")