jonghhhh commited on
Commit
eb530e9
โ€ข
1 Parent(s): bdf837e

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -0
app.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ์ถ”๋ก 
2
+ from transformers import RobertaTokenizer, RobertaForSequenceClassification
3
+ import torch
4
+ import streamlit as st
5
+
6
+ # ์ „์ดํ•™์Šต์— ์‚ฌ์šฉํ•œ ํ† ํฌ๋‚˜์ด์ €์™€ ๋ชจ๋ธ ๋กœ๋“œ & ๊ฐ€์ค‘์น˜ ๋กœ๋“œ
7
+ tokenizer = RobertaTokenizer.from_pretrained('beomi/KcBERT-v2023')
8
+ model = RobertaForSequenceClassification.from_pretrained('beomi/KcBERT-v2023', num_labels=2)
9
+ model.load_state_dict(torch.load("pytorchmodel_518๋ง์–ธ๋ถ„๋ฅ˜_acc9308.bin"))
10
+ # ๋ชจ๋ธ์„ ํ‰๊ฐ€ ๋ชจ๋“œ๋กœ ์„ค์ •
11
+ model.eval()
12
+
13
+ # ์ž…๋ ฅ ํ…์ŠคํŠธ ์˜ˆ์‹œ
14
+ class_labels = ["์ ์ ˆ(518๋ง์–ธ_NO)", "๋ถ€์ ์ ˆ(518๋ง์–ธ_YES)"]
15
+ def inference(new_text):
16
+ inputs = tokenizer(new_text, return_tensors="pt")
17
+ # ์ถ”๋ก  ์ˆ˜ํ–‰ (CPU ์‚ฌ์šฉ)
18
+ with torch.no_grad():
19
+ outputs = model(**inputs)
20
+ logits = outputs.logits
21
+ probs = torch.nn.functional.softmax(logits, dim=-1)
22
+ predicted_class = torch.argmax(probs, dim=1).item()
23
+ predicted_label = class_labels[predicted_class]
24
+ probability = probs[0][predicted_class].item()
25
+ return f"์˜ˆ์ธก: {predicted_label}, ํ™•๋ฅ : {probability:.4f}"
26
+
27
+ # Streamlit interface
28
+ st.title('5ยท18 ๋ฏผ์ฃผํ™”์šด๋™ ๊ด€๋ จ ๋ถ€์ ์ ˆํ•œ ๋ฐœ์–ธ ํƒ์ง€')
29
+ st.markdown('<small style="color:grey;">5ยท18 ๋ฏผ์ฃผํ™”์šด๋™๊ณผ ๊ด€๋ จํ•ด ๋ฌด์žฅ ํญ๋™, ๋ถํ•œ๊ตฐ ๊ฐœ์ž…, ๊ฐ€์งœ ์œ ๊ณต์ž ๋“ฑ ๋ถ€์ ์ ˆํ•œ ์–ธ๊ธ‰๊ณผ ์ง€์—ญ-์ด๋…์— ๋Œ€ํ•œ ํ˜์˜ค์„ฑ ๋ฐœ์–ธ์ด ๋ฌธ์ œ๋˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ์•„๋ž˜์— ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜๋ฉด ์ด๋Ÿฌํ•œ ๋‚ด์šฉ์„ ์ค‘์‹ฌ์œผ๋กœ ๋ฌธ์žฅ์˜ ๋ถ€์ ์ ˆ์„ฑ ์—ฌ๋ถ€๋ฅผ ํ™•๋ฅ ๊ณผ ํ•จ๊ป˜ ํŒ๋‹จํ•ด ๋“œ๋ฆฝ๋‹ˆ๋‹ค. </small>', unsafe_allow_html=True)
30
+ user_input = st.text_area("์ด ๊ณณ์— ๋ฌธ์žฅ ์ž…๋ ฅ(100์ž ์ดํ•˜ ๊ถŒ์žฅ):")
31
+ if st.button('์‹œ์ž‘'):
32
+ result = inference(user_input)
33
+ st.write(result)