xuandin commited on
Commit
77dabd4
Β·
verified Β·
1 Parent(s): d265e61

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -40
app.py CHANGED
@@ -13,59 +13,151 @@ def load_model(model_name, model_class, is_bc=False):
13
  model = model_class.from_pretrained(model_name, num_labels=3 if not is_bc else 2)
14
  return tokenizer, model
15
 
16
- # UI Configuration
17
  st.set_page_config(page_title="SemViQA Demo", layout="wide")
18
 
19
  st.markdown("""
20
  <style>
21
- .big-title { font-size: 36px; font-weight: bold; color: #4A90E2; text-align: center; }
22
- .sub-title { font-size: 20px; color: #666; text-align: center; }
23
- .stButton>button { background-color: #4CAF50; color: white; font-size: 16px; width: 100%; border-radius: 8px; padding: 10px; }
24
- .stTextArea textarea { font-size: 16px; }
25
- .result-box { background-color: #f9f9f9; padding: 20px; border-radius: 10px; box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.1); }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  </style>
27
  """, unsafe_allow_html=True)
28
 
29
- st.markdown("<p class='big-title'>πŸ” SemViQA: A Semantic Question Answering System for Vietnamese Information Fact-Checking</p>", unsafe_allow_html=True)
30
  st.markdown("<p class='sub-title'>Enter a claim and context to verify its accuracy</p>", unsafe_allow_html=True)
31
 
32
- # Sidebar - Configuration Settings
33
  with st.sidebar.expander("βš™οΈ Settings", expanded=False):
34
- tfidf_threshold = st.slider("πŸ”§ TF-IDF Threshold", 0.0, 1.0, 0.5, 0.01)
35
- length_ratio_threshold = st.slider("πŸ“ Length Ratio Threshold", 0.1, 1.0, 0.5, 0.01)
36
- qatc_model_name = st.selectbox("πŸ€– QATC Model", ["SemViQA/qatc-infoxlm-viwikifc","SemViQA/qatc-infoxlm-isedsc01","SemViQA/qatc-vimrc-viwikifc","SemViQA/qatc-vimrc-isedsc01"])
37
- bc_model_name = st.selectbox("🏷️ Binary Classification Model", ["SemViQA/bc-xlmr-viwikifc","SemViQA/bc-xlmr-isedsc01","SemViQA/bc-infoxlm-viwikifc","SemViQA/bc-infoxlm-isedsc01","SemViQA/bc-erniem-viwikifc","SemViQA/bc-erniem-isedsc01"])
38
- tc_model_name = st.selectbox("πŸ“Š Three-Class Classification Model", ["SemViQA/tc-xlmr-viwikifc","SemViQA/tc-xlmr-isedsc01","SemViQA/tc-infoxlm-viwikifc","SemViQA/tc-infoxlm-isedsc01","SemViQA/tc-erniem-viwikifc","SemViQA/tc-erniem-isedsc01"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
- # Load selected models
 
 
 
 
41
  tokenizer_qatc, model_qatc = load_model(qatc_model_name, QATCForQuestionAnswering)
42
  tokenizer_bc, model_bc = load_model(bc_model_name, ClaimModelForClassification, is_bc=True)
43
  tokenizer_tc, model_tc = load_model(tc_model_name, ClaimModelForClassification)
44
 
45
- # User Input Fields
46
- claim = st.text_area("✍️ Enter Claim", "Vietnam is a country in Southeast Asia.")
47
- context = st.text_area("πŸ“– Enter Context", "Vietnam is a country located in Southeast Asia, covering an area of over 331,000 kmΒ² with a population of more than 98 million people.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
- if st.button("πŸ”Ž Verify"):
50
- # Extract evidence
51
- evidence = extract_evidence_tfidf_qatc(
52
- claim, context, model_qatc, tokenizer_qatc, "cuda" if torch.cuda.is_available() else "cpu",
53
- confidence_threshold=tfidf_threshold, length_ratio_threshold=length_ratio_threshold
54
- )
55
-
56
- # Claim Classification
57
- verdict = "NEI"
58
- prob3class, pred_tc = classify_claim(claim, evidence, model_tc, tokenizer_tc, "cuda" if torch.cuda.is_available() else "cpu")
59
-
60
- if pred_tc != 0:
61
- prob2class, pred_bc = classify_claim(claim, evidence, model_bc, tokenizer_bc, "cuda" if torch.cuda.is_available() else "cpu")
62
- verdict = "SUPPORTED" if pred_bc == 0 else "REFUTED" if prob2class > prob3class else ["NEI", "SUPPORTED", "REFUTED"][pred_tc]
63
-
64
- # Display Results
65
- st.markdown(f"""
66
- <div class='result-box'>
67
- <h3>πŸ“Œ Result</h3>
68
- <p><strong>πŸ” Evidence:</strong> {evidence}</p>
69
- <p><strong>βœ… Verdict:</strong> {verdict}</p>
70
- </div>
71
- """, unsafe_allow_html=True)
 
13
  model = model_class.from_pretrained(model_name, num_labels=3 if not is_bc else 2)
14
  return tokenizer, model
15
 
16
+ # Set up page configuration and custom CSS for a modern, clean look
17
  st.set_page_config(page_title="SemViQA Demo", layout="wide")
18
 
19
  st.markdown("""
20
  <style>
21
+ .big-title {
22
+ font-size: 36px;
23
+ font-weight: bold;
24
+ color: #4A90E2;
25
+ text-align: center;
26
+ margin-top: 20px;
27
+ }
28
+ .sub-title {
29
+ font-size: 20px;
30
+ color: #666;
31
+ text-align: center;
32
+ margin-bottom: 20px;
33
+ }
34
+ .stButton>button {
35
+ background-color: #4CAF50;
36
+ color: white;
37
+ font-size: 16px;
38
+ width: 100%;
39
+ border-radius: 8px;
40
+ padding: 10px;
41
+ }
42
+ .stTextArea textarea {
43
+ font-size: 16px;
44
+ }
45
+ .result-box {
46
+ background-color: #f9f9f9;
47
+ padding: 20px;
48
+ border-radius: 10px;
49
+ box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.1);
50
+ margin-top: 20px;
51
+ }
52
+ .verdict {
53
+ font-size: 24px;
54
+ font-weight: bold;
55
+ margin: 0;
56
+ display: flex;
57
+ align-items: center;
58
+ }
59
+ .verdict-icon {
60
+ margin-right: 10px;
61
+ }
62
  </style>
63
  """, unsafe_allow_html=True)
64
 
65
+ st.markdown("<p class='big-title'>SemViQA: Semantic Question Answering System for Vietnamese Fact-Checking</p>", unsafe_allow_html=True)
66
  st.markdown("<p class='sub-title'>Enter a claim and context to verify its accuracy</p>", unsafe_allow_html=True)
67
 
68
+ # Sidebar: Settings and additional features
69
  with st.sidebar.expander("βš™οΈ Settings", expanded=False):
70
+ tfidf_threshold = st.slider("TF-IDF Threshold", 0.0, 1.0, 0.5, 0.01)
71
+ length_ratio_threshold = st.slider("Length Ratio Threshold", 0.1, 1.0, 0.5, 0.01)
72
+ qatc_model_name = st.selectbox("QATC Model", [
73
+ "SemViQA/qatc-infoxlm-viwikifc",
74
+ "SemViQA/qatc-infoxlm-isedsc01",
75
+ "SemViQA/qatc-vimrc-viwikifc",
76
+ "SemViQA/qatc-vimrc-isedsc01"
77
+ ])
78
+ bc_model_name = st.selectbox("Binary Classification Model", [
79
+ "SemViQA/bc-xlmr-viwikifc",
80
+ "SemViQA/bc-xlmr-isedsc01",
81
+ "SemViQA/bc-infoxlm-viwikifc",
82
+ "SemViQA/bc-infoxlm-isedsc01",
83
+ "SemViQA/bc-erniem-viwikifc",
84
+ "SemViQA/bc-erniem-isedsc01"
85
+ ])
86
+ tc_model_name = st.selectbox("Three-Class Classification Model", [
87
+ "SemViQA/tc-xlmr-viwikifc",
88
+ "SemViQA/tc-xlmr-isedsc01",
89
+ "SemViQA/tc-infoxlm-viwikifc",
90
+ "SemViQA/tc-infoxlm-isedsc01",
91
+ "SemViQA/tc-erniem-viwikifc",
92
+ "SemViQA/tc-erniem-isedsc01"
93
+ ])
94
+ show_details = st.checkbox("Show probability details", value=False)
95
 
96
+ # Initialize verification history in session state
97
+ if 'history' not in st.session_state:
98
+ st.session_state.history = []
99
+
100
+ # Load the selected models
101
  tokenizer_qatc, model_qatc = load_model(qatc_model_name, QATCForQuestionAnswering)
102
  tokenizer_bc, model_bc = load_model(bc_model_name, ClaimModelForClassification, is_bc=True)
103
  tokenizer_tc, model_tc = load_model(tc_model_name, ClaimModelForClassification)
104
 
105
+ # User input fields
106
+ claim = st.text_area("Enter Claim", "Vietnam is a country in Southeast Asia.")
107
+ context = st.text_area("Enter Context", "Vietnam is a country located in Southeast Asia, covering an area of over 331,000 kmΒ² with a population of more than 98 million people.")
108
+
109
+ # Define icon mapping for each verdict label
110
+ verdict_icons = {
111
+ "SUPPORTED": "βœ…",
112
+ "REFUTED": "❌",
113
+ "NEI": "⚠️"
114
+ }
115
+
116
+ if st.button("Verify"):
117
+ with st.spinner("Verifying..."):
118
+ # Extract evidence
119
+ evidence = extract_evidence_tfidf_qatc(
120
+ claim, context, model_qatc, tokenizer_qatc, "cuda" if torch.cuda.is_available() else "cpu",
121
+ confidence_threshold=tfidf_threshold, length_ratio_threshold=length_ratio_threshold
122
+ )
123
+
124
+ # Classify the claim
125
+ verdict = "NEI"
126
+ prob3class, pred_tc = classify_claim(claim, evidence, model_tc, tokenizer_tc, "cuda" if torch.cuda.is_available() else "cpu")
127
+
128
+ details = ""
129
+ if pred_tc != 0:
130
+ prob2class, pred_bc = classify_claim(claim, evidence, model_bc, tokenizer_bc, "cuda" if torch.cuda.is_available() else "cpu")
131
+ if pred_bc == 0:
132
+ verdict = "SUPPORTED"
133
+ elif prob2class > prob3class:
134
+ verdict = "REFUTED"
135
+ else:
136
+ verdict = ["NEI", "SUPPORTED", "REFUTED"][pred_tc]
137
+ if show_details:
138
+ details = f"<p><strong>3-Class Probability:</strong> {prob3class:.2f} - <strong>2-Class Probability:</strong> {prob2class:.2f}</p>"
139
+
140
+ # Save the verification record in session history
141
+ st.session_state.history.append({
142
+ "claim": claim,
143
+ "evidence": evidence,
144
+ "verdict": verdict
145
+ })
146
+
147
+ # Display the results with icon and label (without extra "Verdict:" text)
148
+ st.markdown(f"""
149
+ <div class='result-box'>
150
+ <h3>Result</h3>
151
+ <p><strong>Evidence:</strong> {evidence}</p>
152
+ <p class='verdict'><span class='verdict-icon'>{verdict_icons.get(verdict, '')}</span>{verdict}</p>
153
+ {details}
154
+ </div>
155
+ """, unsafe_allow_html=True)
156
 
157
+ # Display verification history in the sidebar
158
+ with st.sidebar.expander("Verification History", expanded=False):
159
+ if st.session_state.history:
160
+ for idx, record in enumerate(reversed(st.session_state.history), 1):
161
+ st.markdown(f"**{idx}. Claim:** {record['claim']} \n**Result:** {verdict_icons.get(record['verdict'], '')} {record['verdict']}")
162
+ else:
163
+ st.write("No verification history yet.")