rexthecoder commited on
Commit
7957d68
1 Parent(s): afdf68a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -7
app.py CHANGED
@@ -1,17 +1,13 @@
1
  import gradio as gr
2
- from pydantic import BaseModel
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
 
5
  tokenizer = AutoTokenizer.from_pretrained("mrm8488/flan-t5-small-finetuned-samsum")
6
 
7
  model = AutoModelForSeq2SeqLM.from_pretrained("mrm8488/flan-t5-small-finetuned-samsum")
8
 
9
- class Input(BaseModel):
10
- text: str
11
 
12
-
13
- def predict_sentiment(input: Input, words):
14
- input_ids = tokenizer(input.text, return_tensors="pt").input_ids
15
  outputs = model.generate(input_ids, max_length=words)
16
  decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
17
  return f"{decoded_output}"
@@ -19,6 +15,6 @@ def predict_sentiment(input: Input, words):
19
  conversation = gr.Textbox(lines=2, placeholder="Conversations Here...")
20
 
21
 
22
- iface = gr.Interface(fn=predict_sentiment, inputs=[Input(text=conversation), gr.Slider(10, 100)], outputs="text")
23
 
24
  iface.launch()
 
1
  import gradio as gr
 
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
 
4
  tokenizer = AutoTokenizer.from_pretrained("mrm8488/flan-t5-small-finetuned-samsum")
5
 
6
  model = AutoModelForSeq2SeqLM.from_pretrained("mrm8488/flan-t5-small-finetuned-samsum")
7
 
 
 
8
 
9
+ def predict_sentiment(input, words):
10
+ input_ids = tokenizer(input, return_tensors="pt").input_ids
 
11
  outputs = model.generate(input_ids, max_length=words)
12
  decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
13
  return f"{decoded_output}"
 
15
  conversation = gr.Textbox(lines=2, placeholder="Conversations Here...")
16
 
17
 
18
+ iface = gr.Interface(fn=predict_sentiment, inputs=[conversation, gr.Slider(10, 100)], outputs="text")
19
 
20
  iface.launch()