jonghhhh commited on
Commit
52d58f7
โ€ข
1 Parent(s): fa9db5b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -6,7 +6,7 @@ import streamlit as st
6
  # ์ „์ดํ•™์Šต์— ์‚ฌ์šฉํ•œ ํ† ํฌ๋‚˜์ด์ €์™€ ๋ชจ๋ธ ๋กœ๋“œ & ๊ฐ€์ค‘์น˜ ๋กœ๋“œ
7
  tokenizer = RobertaTokenizer.from_pretrained('beomi/KcBERT-v2023')
8
  model = RobertaForSequenceClassification.from_pretrained('beomi/KcBERT-v2023', num_labels=2)
9
- model.load_state_dict(torch.load("pytorchmodel_518๋ง์–ธ๋ถ„๋ฅ˜_acc9419.bin", map_location=torch.device('cpu')))
10
  # ๋ชจ๋ธ์„ ํ‰๊ฐ€ ๋ชจ๋“œ๋กœ ์„ค์ •
11
  model.eval()
12
 
@@ -21,12 +21,13 @@ def inference(new_text):
21
  probs = torch.nn.functional.softmax(logits, dim=-1)
22
  predicted_class = torch.argmax(probs, dim=1).item()
23
  predicted_label = class_labels[predicted_class]
 
24
  probability = probs[0][predicted_class].item()
25
- return f"์˜ˆ์ธก: {predicted_label}, ํ™•๋ฅ : {probability:.4f}"
26
 
27
  # Streamlit interface
28
  st.markdown('## 5ยท18 ๋ฏผ์ฃผํ™”์šด๋™ ๊ด€๋ จ ๋ถ€์ ์ ˆํ•œ ๋ฐœ์–ธ ํƒ์ง€')
29
- st.markdown('<small style="color:grey;">5ยท18 ๋ฏผ์ฃผํ™”์šด๋™๊ณผ ๊ด€๋ จํ•ด ๋ฌด์žฅ ํญ๋™, ๋ถํ•œ๊ตฐ ๊ฐœ์ž…, ๊ฐ€์งœ ์œ ๊ณต์ž ๋“ฑ ๋ถ€์ ์ ˆํ•œ ์–ธ๊ธ‰๊ณผ ์ง€์—ญ-์ด๋…์— ๋Œ€ํ•œ ํ˜์˜ค์„ฑ ๋ฐœ์–ธ์ด ๋ฌธ์ œ๋˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ์•„๋ž˜์— ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜๋ฉด ์ด๋Ÿฌํ•œ ๋‚ด์šฉ์„ ์ค‘์‹ฌ์œผ๋กœ "๋ฌธ์ œ์—†์Œ/๊ด€๋ จ์—†์Œ" ๋˜๋Š” "๋ถ€์ ์ ˆ(518 ๋ง์–ธ ๊ฐ€๋Šฅ)"๋กœ ํŒ๋‹จํ•ด ๋“œ๋ฆฝ๋‹ˆ๋‹ค. ์˜ˆ์ธก ๋ชจ๋ธ์˜ ์ •ํ™•๋„๋Š” 94.19%๋กœ, ์ผ๋ถ€ ๋ถ€์ •ํ™•ํ•œ ๊ฒฐ๊ณผ๊ฐ€ ๋‚˜์˜ฌ ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค </small>', unsafe_allow_html=True)
30
  user_input = st.text_area("์ด ๊ณณ์— ๋ฌธ์žฅ ์ž…๋ ฅ(100์ž ์ดํ•˜ ๊ถŒ์žฅ, ๋„ˆ๋ฌด ๊ธธ๋ฉด ๋ถ„์„ ๋ถˆ๊ฐ€):")
31
  if st.button('์‹œ์ž‘'):
32
  result = inference(user_input)
 
6
  # ์ „์ดํ•™์Šต์— ์‚ฌ์šฉํ•œ ํ† ํฌ๋‚˜์ด์ €์™€ ๋ชจ๋ธ ๋กœ๋“œ & ๊ฐ€์ค‘์น˜ ๋กœ๋“œ
7
  tokenizer = RobertaTokenizer.from_pretrained('beomi/KcBERT-v2023')
8
  model = RobertaForSequenceClassification.from_pretrained('beomi/KcBERT-v2023', num_labels=2)
9
+ model.load_state_dict(torch.load("pytorchmodel_518๋ง์–ธ๋ถ„๋ฅ˜_acc9140.bin", map_location=torch.device('cpu')))
10
  # ๋ชจ๋ธ์„ ํ‰๊ฐ€ ๋ชจ๋“œ๋กœ ์„ค์ •
11
  model.eval()
12
 
 
21
  probs = torch.nn.functional.softmax(logits, dim=-1)
22
  predicted_class = torch.argmax(probs, dim=1).item()
23
  predicted_label = class_labels[predicted_class]
24
+ unpredicted_label = class_labels[1-predicted_class]
25
  probability = probs[0][predicted_class].item()
26
+ return f"{predicted_label}:{probability*100:.2f}%, {unpredicted_label}:{((1-probability)*100):.2f}%"
27
 
28
  # Streamlit interface
29
  st.markdown('## 5ยท18 ๋ฏผ์ฃผํ™”์šด๋™ ๊ด€๋ จ ๋ถ€์ ์ ˆํ•œ ๋ฐœ์–ธ ํƒ์ง€')
30
+ st.markdown('<small style="color:grey;">5ยท18 ๋ฏผ์ฃผํ™”์šด๋™๊ณผ ๊ด€๋ จํ•ด ๋ฌด์žฅ ํญ๋™, ๋ถํ•œ๊ตฐ ๊ฐœ์ž…, ๊ฐ€์งœ ์œ ๊ณต์ž ๋“ฑ ๋ถ€์ ์ ˆํ•œ ์–ธ๊ธ‰๊ณผ ์ง€์—ญ-์ด๋…์— ๋Œ€ํ•œ ํ˜์˜ค์„ฑ ๋ฐœ์–ธ์ด ๋ฌธ์ œ๋˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ์•„๋ž˜์— ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜๋ฉด ์ด๋Ÿฌํ•œ ๋‚ด์šฉ์„ ์ค‘์‹ฌ์œผ๋กœ "๋ฌธ์ œ์—†์Œ/๊ด€๋ จ์—†์Œ" ๋˜๋Š” "๋ถ€์ ์ ˆ(518 ๋ง์–ธ ๊ฐ€๋Šฅ)"๋กœ ํŒ๋‹จํ•ด ๋“œ๋ฆฝ๋‹ˆ๋‹ค. ์˜ˆ์ธก ๋ชจ๋ธ์˜ ์ •ํ™•๋„๋Š” 91.40%๋กœ, ์ผ๋ถ€ ๋ถ€์ •ํ™•ํ•œ ๊ฒฐ๊ณผ๊ฐ€ ๋‚˜์˜ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค </small>', unsafe_allow_html=True)
31
  user_input = st.text_area("์ด ๊ณณ์— ๋ฌธ์žฅ ์ž…๋ ฅ(100์ž ์ดํ•˜ ๊ถŒ์žฅ, ๋„ˆ๋ฌด ๊ธธ๋ฉด ๋ถ„์„ ๋ถˆ๊ฐ€):")
32
  if st.button('์‹œ์ž‘'):
33
  result = inference(user_input)