|
|
|
from transformers import RobertaTokenizer, RobertaForSequenceClassification
|
|
import torch
|
|
import streamlit as st
|
|
|
|
|
|
tokenizer = RobertaTokenizer.from_pretrained('beomi/KcBERT-v2023')
|
|
model = RobertaForSequenceClassification.from_pretrained('beomi/KcBERT-v2023', num_labels=2)
|
|
model.load_state_dict(torch.load("pytorchmodel_518๋ง์ธ๋ถ๋ฅ_acc9308.bin"))
|
|
|
|
model.eval()
|
|
|
|
|
|
class_labels = ["์ ์ (518๋ง์ธ_NO)", "๋ถ์ ์ (518๋ง์ธ_YES)"]
|
|
def inference(new_text):
|
|
inputs = tokenizer(new_text, return_tensors="pt")
|
|
|
|
with torch.no_grad():
|
|
outputs = model(**inputs)
|
|
logits = outputs.logits
|
|
probs = torch.nn.functional.softmax(logits, dim=-1)
|
|
predicted_class = torch.argmax(probs, dim=1).item()
|
|
predicted_label = class_labels[predicted_class]
|
|
probability = probs[0][predicted_class].item()
|
|
return f"์์ธก: {predicted_label}, ํ๋ฅ : {probability:.4f}"
|
|
|
|
|
|
st.title('5ยท18 ๋ฏผ์ฃผํ์ด๋ ๊ด๋ จ ๋ถ์ ์ ํ ๋ฐ์ธ ํ์ง')
|
|
st.markdown('<small style="color:grey;">5ยท18 ๋ฏผ์ฃผํ์ด๋๊ณผ ๊ด๋ จํด ๋ฌด์ฅ ํญ๋, ๋ถํ๊ตฐ ๊ฐ์
, ๊ฐ์ง ์ ๊ณต์ ๋ฑ ๋ถ์ ์ ํ ์ธ๊ธ๊ณผ ์ง์ญ-์ด๋
์ ๋ํ ํ์ค์ฑ ๋ฐ์ธ์ด ๋ฌธ์ ๋๊ณ ์์ต๋๋ค. ์๋์ ๋ฌธ์ฅ์ ์
๋ ฅํ๋ฉด ์ด๋ฌํ ๋ด์ฉ์ ์ค์ฌ์ผ๋ก ๋ฌธ์ฅ์ ๋ถ์ ์ ์ฑ ์ฌ๋ถ๋ฅผ ํ๋ฅ ๊ณผ ํจ๊ป ํ๋จํด ๋๋ฆฝ๋๋ค. </small>', unsafe_allow_html=True)
|
|
user_input = st.text_area("์ด ๊ณณ์ ๋ฌธ์ฅ ์
๋ ฅ(100์ ์ดํ ๊ถ์ฅ):")
|
|
if st.button('์์'):
|
|
result = inference(user_input)
|
|
st.write(result) |