semviqa-demo / app.py
xuandin's picture
Update app.py
77dabd4 verified
raw
history blame
6.32 kB
import streamlit as st
import torch
from transformers import AutoTokenizer
from semviqa.ser.qatc_model import QATCForQuestionAnswering
from semviqa.tvc.model import ClaimModelForClassification
from semviqa.ser.ser_eval import extract_evidence_tfidf_qatc
from semviqa.tvc.tvc_eval import classify_claim
# Load models with caching
@st.cache_resource()
def load_model(model_name, model_class, is_bc=False):
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = model_class.from_pretrained(model_name, num_labels=3 if not is_bc else 2)
return tokenizer, model
# Set up page configuration and custom CSS for a modern, clean look
st.set_page_config(page_title="SemViQA Demo", layout="wide")
st.markdown("""
<style>
.big-title {
font-size: 36px;
font-weight: bold;
color: #4A90E2;
text-align: center;
margin-top: 20px;
}
.sub-title {
font-size: 20px;
color: #666;
text-align: center;
margin-bottom: 20px;
}
.stButton>button {
background-color: #4CAF50;
color: white;
font-size: 16px;
width: 100%;
border-radius: 8px;
padding: 10px;
}
.stTextArea textarea {
font-size: 16px;
}
.result-box {
background-color: #f9f9f9;
padding: 20px;
border-radius: 10px;
box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.1);
margin-top: 20px;
}
.verdict {
font-size: 24px;
font-weight: bold;
margin: 0;
display: flex;
align-items: center;
}
.verdict-icon {
margin-right: 10px;
}
</style>
""", unsafe_allow_html=True)
st.markdown("<p class='big-title'>SemViQA: Semantic Question Answering System for Vietnamese Fact-Checking</p>", unsafe_allow_html=True)
st.markdown("<p class='sub-title'>Enter a claim and context to verify its accuracy</p>", unsafe_allow_html=True)
# Sidebar: Settings and additional features
with st.sidebar.expander("⚙️ Settings", expanded=False):
tfidf_threshold = st.slider("TF-IDF Threshold", 0.0, 1.0, 0.5, 0.01)
length_ratio_threshold = st.slider("Length Ratio Threshold", 0.1, 1.0, 0.5, 0.01)
qatc_model_name = st.selectbox("QATC Model", [
"SemViQA/qatc-infoxlm-viwikifc",
"SemViQA/qatc-infoxlm-isedsc01",
"SemViQA/qatc-vimrc-viwikifc",
"SemViQA/qatc-vimrc-isedsc01"
])
bc_model_name = st.selectbox("Binary Classification Model", [
"SemViQA/bc-xlmr-viwikifc",
"SemViQA/bc-xlmr-isedsc01",
"SemViQA/bc-infoxlm-viwikifc",
"SemViQA/bc-infoxlm-isedsc01",
"SemViQA/bc-erniem-viwikifc",
"SemViQA/bc-erniem-isedsc01"
])
tc_model_name = st.selectbox("Three-Class Classification Model", [
"SemViQA/tc-xlmr-viwikifc",
"SemViQA/tc-xlmr-isedsc01",
"SemViQA/tc-infoxlm-viwikifc",
"SemViQA/tc-infoxlm-isedsc01",
"SemViQA/tc-erniem-viwikifc",
"SemViQA/tc-erniem-isedsc01"
])
show_details = st.checkbox("Show probability details", value=False)
# Initialize verification history in session state
if 'history' not in st.session_state:
st.session_state.history = []
# Load the selected models
tokenizer_qatc, model_qatc = load_model(qatc_model_name, QATCForQuestionAnswering)
tokenizer_bc, model_bc = load_model(bc_model_name, ClaimModelForClassification, is_bc=True)
tokenizer_tc, model_tc = load_model(tc_model_name, ClaimModelForClassification)
# User input fields
claim = st.text_area("Enter Claim", "Vietnam is a country in Southeast Asia.")
context = st.text_area("Enter Context", "Vietnam is a country located in Southeast Asia, covering an area of over 331,000 km² with a population of more than 98 million people.")
# Define icon mapping for each verdict label
verdict_icons = {
"SUPPORTED": "✅",
"REFUTED": "❌",
"NEI": "⚠️"
}
if st.button("Verify"):
with st.spinner("Verifying..."):
# Extract evidence
evidence = extract_evidence_tfidf_qatc(
claim, context, model_qatc, tokenizer_qatc, "cuda" if torch.cuda.is_available() else "cpu",
confidence_threshold=tfidf_threshold, length_ratio_threshold=length_ratio_threshold
)
# Classify the claim
verdict = "NEI"
prob3class, pred_tc = classify_claim(claim, evidence, model_tc, tokenizer_tc, "cuda" if torch.cuda.is_available() else "cpu")
details = ""
if pred_tc != 0:
prob2class, pred_bc = classify_claim(claim, evidence, model_bc, tokenizer_bc, "cuda" if torch.cuda.is_available() else "cpu")
if pred_bc == 0:
verdict = "SUPPORTED"
elif prob2class > prob3class:
verdict = "REFUTED"
else:
verdict = ["NEI", "SUPPORTED", "REFUTED"][pred_tc]
if show_details:
details = f"<p><strong>3-Class Probability:</strong> {prob3class:.2f} - <strong>2-Class Probability:</strong> {prob2class:.2f}</p>"
# Save the verification record in session history
st.session_state.history.append({
"claim": claim,
"evidence": evidence,
"verdict": verdict
})
# Display the results with icon and label (without extra "Verdict:" text)
st.markdown(f"""
<div class='result-box'>
<h3>Result</h3>
<p><strong>Evidence:</strong> {evidence}</p>
<p class='verdict'><span class='verdict-icon'>{verdict_icons.get(verdict, '')}</span>{verdict}</p>
{details}
</div>
""", unsafe_allow_html=True)
# Display verification history in the sidebar
with st.sidebar.expander("Verification History", expanded=False):
if st.session_state.history:
for idx, record in enumerate(reversed(st.session_state.history), 1):
st.markdown(f"**{idx}. Claim:** {record['claim']} \n**Result:** {verdict_icons.get(record['verdict'], '')} {record['verdict']}")
else:
st.write("No verification history yet.")