swcrazyfan commited on
Commit
c336de4
1 Parent(s): a242b5d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -21
app.py CHANGED
@@ -7,29 +7,29 @@ model = T5ForConditionalGeneration.from_pretrained(best_model_path)
7
  tokenizer = T5Tokenizer.from_pretrained("swcrazyfan/Kingify-2Way-T5-Large-v1_1")
8
 
9
  def tokenize_data(text, dekingify):
10
- # Tokenize the review body
11
- if dekingify == "Dekingify":
12
- input_ = "dekingify: " + str(text) + ' </s>'
13
- else:
14
- input_ = "kingify: " + str(text) + ' </s>'
15
- max_len = 512
16
- # tokenize inputs
17
- tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt')
18
 
19
- inputs={"input_ids": tokenized_inputs['input_ids'],
20
- "attention_mask": tokenized_inputs['attention_mask']}
21
- return inputs
22
 
23
  def generate_answers(text, max_length, num_beams, dekingify):
24
- inputs = tokenize_data(text, dekingify)
25
- results= model.generate(input_ids= inputs['input_ids'], attention_mask=inputs['attention_mask'], do_sample=True,
26
- num_beams=num_beams,
27
- max_length=max_length,
28
- min_length=1,
29
- early_stopping=True,
30
- num_return_sequences=1)
31
- answer = tokenizer.decode(results[0], skip_special_tokens=True)
32
- return answer
33
 
34
- iface = gr.Interface(title="DeKingify", description="Write anything below. Then, click submit to 'DeKingify' it.", fn=generate_answers, inputs=[gr.inputs.Textbox(label="Original Text",lines=10), gr.inputs.Slider(label="Maximum Length", minimum=1, maximum=512, default=512, step=1), gr.inputs.Slider(label="Number of Beams", minimum=1, maximum=50, default=5, step=1), gr.inputs.Radio(label=“What do you want to do?, choices=["Kingify", "Dekingify"])], outputs=["text"])
35
  iface.launch(inline=False)
 
7
  tokenizer = T5Tokenizer.from_pretrained("swcrazyfan/Kingify-2Way-T5-Large-v1_1")
8
 
9
  def tokenize_data(text, dekingify):
10
+ # Tokenize the review body
11
+ if dekingify == "Dekingify":
12
+ input_ = "dekingify: " + str(text) + ' </s>'
13
+ else:
14
+ input_ = "kingify: " + str(text) + ' </s>'
15
+ max_len = 512
16
+ # tokenize inputs
17
+ tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt')
18
 
19
+ inputs={"input_ids": tokenized_inputs['input_ids'],
20
+ "attention_mask": tokenized_inputs['attention_mask']}
21
+ return inputs
22
 
23
  def generate_answers(text, max_length, num_beams, dekingify):
24
+ inputs = tokenize_data(text, dekingify)
25
+ results= model.generate(input_ids= inputs['input_ids'], attention_mask=inputs['attention_mask'], do_sample=True,
26
+ num_beams=num_beams,
27
+ max_length=max_length,
28
+ min_length=1,
29
+ early_stopping=True,
30
+ num_return_sequences=1)
31
+ answer = tokenizer.decode(results[0], skip_special_tokens=True)
32
+ return answer
33
 
34
+ iface = gr.Interface(title="DeKingify", description="Write anything below. Then, click submit to 'DeKingify' it.", fn=generate_answers, inputs=[gr.inputs.Textbox(label="Original Text",lines=10), gr.inputs.Slider(label="Maximum Length", minimum=1, maximum=512, default=512, step=1), gr.inputs.Slider(label="Number of Beams", minimum=1, maximum=50, default=5, step=1), gr.inputs.Radio(label=“What do you want to do?", choices=["Kingify", "Dekingify"])], outputs=["text"])
35
  iface.launch(inline=False)