Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import torch
|
2 |
-
from transformers import
|
3 |
import gradio as gr
|
4 |
|
5 |
best_model_path = "swcrazyfan/Kingify-2Way-T5-Large-v1_1"
|
@@ -7,32 +7,48 @@ model = T5ForConditionalGeneration.from_pretrained(best_model_path)
|
|
7 |
tokenizer = T5Tokenizer.from_pretrained("swcrazyfan/Kingify-2Way-T5-Large-v1_1")
|
8 |
|
9 |
def tokenize_data(text, dekingify):
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
-
inputs={"input_ids": tokenized_inputs['input_ids'],
|
20 |
-
"attention_mask": tokenized_inputs['attention_mask']}
|
21 |
-
return inputs
|
22 |
-
|
23 |
-
#def generate_answers(text, max_length, num_beams, dekingify):
|
24 |
def generate_answers(text, dekingify):
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
34 |
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
-
iface
|
38 |
-
iface.launch(inline=False)
|
|
|
1 |
import torch
|
2 |
+
from transformers import T5ForConditionalGeneration, T5Tokenizer
|
3 |
import gradio as gr
|
4 |
|
5 |
best_model_path = "swcrazyfan/Kingify-2Way-T5-Large-v1_1"
|
|
|
7 |
tokenizer = T5Tokenizer.from_pretrained("swcrazyfan/Kingify-2Way-T5-Large-v1_1")
|
8 |
|
9 |
def tokenize_data(text, dekingify):
|
10 |
+
if dekingify == "Dekingify":
|
11 |
+
input_ = "dekingify: " + str(text) + " </s>"
|
12 |
+
else:
|
13 |
+
input_ = "kingify: " + str(text) + " </s>"
|
14 |
+
max_len = 512
|
15 |
+
tokenized_inputs = tokenizer(
|
16 |
+
input_,
|
17 |
+
padding="max_length",
|
18 |
+
truncation=True,
|
19 |
+
max_length=max_len,
|
20 |
+
return_attention_mask=True,
|
21 |
+
return_tensors="pt"
|
22 |
+
)
|
23 |
+
return {"input_ids": tokenized_inputs["input_ids"],
|
24 |
+
"attention_mask": tokenized_inputs["attention_mask"]}
|
25 |
|
|
|
|
|
|
|
|
|
|
|
26 |
def generate_answers(text, dekingify):
|
27 |
+
inputs = tokenize_data(text, dekingify)
|
28 |
+
results = model.generate(
|
29 |
+
input_ids=inputs["input_ids"],
|
30 |
+
attention_mask=inputs["attention_mask"],
|
31 |
+
do_sample=True,
|
32 |
+
num_beams=5,
|
33 |
+
max_length=512,
|
34 |
+
min_length=1,
|
35 |
+
early_stopping=True,
|
36 |
+
num_return_sequences=1
|
37 |
+
)
|
38 |
+
answer = tokenizer.decode(results[0], skip_special_tokens=True)
|
39 |
+
return answer
|
40 |
|
41 |
+
iface = gr.Interface(
|
42 |
+
title="Kingify 2Way",
|
43 |
+
description=("This is a custom AI model that translates modern English into 17th-century English or "
|
44 |
+
"'King James' English (and vice versa). Write anything below, select 'Kingify' or 'Dekingify', "
|
45 |
+
"and click submit."),
|
46 |
+
fn=generate_answers,
|
47 |
+
inputs=[
|
48 |
+
gr.Textbox(label="Original Text", lines=10),
|
49 |
+
gr.Radio(label="What do you want to do?", choices=["Kingify", "Dekingify"])
|
50 |
+
],
|
51 |
+
outputs="text"
|
52 |
+
)
|
53 |
|
54 |
+
iface.launch()
|
|