Yuchan5386 commited on
Commit
0d9f940
ยท
verified ยท
1 Parent(s): 38a0e9f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -1,19 +1,16 @@
1
  from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
 
4
- # ๋ชจ๋ธ ๋กœ๋“œ
5
  model_name = "Yuchan5386/NaturaAI-1"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
- # ํ…์ŠคํŠธ ์ƒ์„ฑ ํ•จ์ˆ˜
10
  def generate_text(prompt):
11
  inputs = tokenizer(prompt, return_tensors="pt")
12
  outputs = model.generate(**inputs)
13
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
14
 
15
- # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ƒ์„ฑ, api=True๋กœ API ์—”๋“œํฌ์ธํŠธ ํ™œ์„ฑํ™”
16
- iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", api=True)
17
 
18
- # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์‹คํ–‰
19
- iface.launch(share=True)
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
 
 
4
  model_name = "Yuchan5386/NaturaAI-1"
5
  tokenizer = AutoTokenizer.from_pretrained(model_name)
6
  model = AutoModelForCausalLM.from_pretrained(model_name)
7
 
 
8
  def generate_text(prompt):
9
  inputs = tokenizer(prompt, return_tensors="pt")
10
  outputs = model.generate(**inputs)
11
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
12
 
13
+ iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
 
14
 
15
+ # API๋กœ ํ˜ธ์ถœํ•  ์ˆ˜ ์žˆ๋„๋ก ์„ค์ •
16
+ iface.launch(share=True, api=True)