Soyoung97 commited on
Commit
508ab87
β€’
1 Parent(s): 41db46d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -7
app.py CHANGED
@@ -19,7 +19,7 @@ def get_model():
19
  return model
20
 
21
 
22
- default_text = 'ν•œκ΅­μ–΄λŠ” μ €ν•œν…Œ λ„ˆλ¬΄ μ–΄λ €μš΄ μ–Έμ–΄μ΄μ—ˆμ–΄μš”. μ €λŠ” ν•œκ΅­λ§ λ°°μ›Œ μ•ˆν—€μ–΄μš”.'
23
 
24
  model = get_model()
25
  tokenizer = tokenizer()
@@ -28,16 +28,14 @@ st.title("Grammatical Error Correction for Korean: Demo")
28
  text = st.text_input("Input corrputed sentence :", value=default_text)
29
  default_text_list = ['ν•œκ΅­μ–΄λŠ” μ €ν•œν…Œ λ„ˆλ¬΄ μ–΄λ €μš΄ μ–Έμ–΄μ΄μ—ˆμ–΄μš”.', 'μ €λŠ” ν•œκ΅­λ§ λ°°μ›Œ μ•ˆν–ˆμ–΄μš”.', 'λ©λ¨Έμ΄λŠ” κ·€μ—½λ‹€', 'λŒ€ν•™μ›μƒμ‚΄λ €!', 'μˆ˜μ§€μ”¨κ°€ μ˜ˆμ©λ‹ˆκΉŒ?', 'μ§€λ‚œλ‚  μΈνƒ€λ„·μœΌλ‘œ μ°Ύμ•„λƒˆλ‹€.', 'κ·Έ 제 꿈이 ꡐ수기 λ„λŠ” κ²ƒμž…λ‹ˆλ‹€']
30
 
31
- if st.button("try another example"):
32
  text_button = random.choice(default_text_list)
 
33
 
34
  st.markdown("## Original sentence:")
35
- if text_button:
36
- st.write(text_button)
37
- else:
38
- st.write(text)
39
 
40
- if text or text_button:
41
  st.markdown("## Corrected output")
42
  with st.spinner('processing..'):
43
  raw_input_ids = tokenizer.encode(text)
 
19
  return model
20
 
21
 
22
+ default_text = 'ν•œκ΅­μ–΄λŠ” μ €ν•œν…Œ λ„ˆλ¬΄ μ–΄λ €μš΄ μ–Έμ–΄μ΄μ—ˆμ–΄μš”.'
23
 
24
  model = get_model()
25
  tokenizer = tokenizer()
 
28
  text = st.text_input("Input corrputed sentence :", value=default_text)
29
  default_text_list = ['ν•œκ΅­μ–΄λŠ” μ €ν•œν…Œ λ„ˆλ¬΄ μ–΄λ €μš΄ μ–Έμ–΄μ΄μ—ˆμ–΄μš”.', 'μ €λŠ” ν•œκ΅­λ§ λ°°μ›Œ μ•ˆν–ˆμ–΄μš”.', 'λ©λ¨Έμ΄λŠ” κ·€μ—½λ‹€', 'λŒ€ν•™μ›μƒμ‚΄λ €!', 'μˆ˜μ§€μ”¨κ°€ μ˜ˆμ©λ‹ˆκΉŒ?', 'μ§€λ‚œλ‚  μΈνƒ€λ„·μœΌλ‘œ μ°Ύμ•„λƒˆλ‹€.', 'κ·Έ 제 꿈이 ꡐ수기 λ„λŠ” κ²ƒμž…λ‹ˆλ‹€']
30
 
31
+ if st.button("try another example: "):
32
  text_button = random.choice(default_text_list)
33
+ st.write(text_button)
34
 
35
  st.markdown("## Original sentence:")
36
+ st.write(text)
 
 
 
37
 
38
+ if text:
39
  st.markdown("## Corrected output")
40
  with st.spinner('processing..'):
41
  raw_input_ids = tokenizer.encode(text)