Spaces:
Sleeping
Sleeping
Sanzana Lora
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -16,27 +16,27 @@ paraphrase_tokenizer = AutoTokenizer.from_pretrained("csebuetnlp/banglat5_bangla
|
|
16 |
|
17 |
# Function to perform machine translation
|
18 |
def translate_text_en_bn(input_text):
|
19 |
-
inputs = translation_tokenizer_en_bn(
|
20 |
outputs = translation_model_en_bn.generate(**inputs)
|
21 |
translated_text = translation_tokenizer_en_bn.decode(outputs[0], skip_special_tokens=True)
|
22 |
return translated_text
|
23 |
|
24 |
def translate_text_bn_en(input_text):
|
25 |
-
inputs = translation_tokenizer_bn_en(
|
26 |
outputs = translation_model_bn_en.generate(**inputs)
|
27 |
translated_text = translation_tokenizer_bn_en.decode(outputs[0], skip_special_tokens=True)
|
28 |
return translated_text
|
29 |
|
30 |
# Function to perform summarization
|
31 |
def summarize_text(input_text):
|
32 |
-
inputs = summarization_tokenizer(
|
33 |
outputs = summarization_model.generate(**inputs)
|
34 |
summarized_text = summarization_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
35 |
return summarized_text
|
36 |
|
37 |
# Function to perform paraphrasing
|
38 |
def paraphrase_text(input_text):
|
39 |
-
inputs = paraphrase_tokenizer(
|
40 |
outputs = paraphrase_model.generate(**inputs)
|
41 |
paraphrased_text = paraphrase_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
42 |
return paraphrased_text
|
|
|
16 |
|
17 |
# Function to perform machine translation
|
18 |
def translate_text_en_bn(input_text):
|
19 |
+
inputs = translation_tokenizer_en_bn(input_text, return_tensors="pt")
|
20 |
outputs = translation_model_en_bn.generate(**inputs)
|
21 |
translated_text = translation_tokenizer_en_bn.decode(outputs[0], skip_special_tokens=True)
|
22 |
return translated_text
|
23 |
|
24 |
def translate_text_bn_en(input_text):
|
25 |
+
inputs = translation_tokenizer_bn_en(input_text, return_tensors="pt")
|
26 |
outputs = translation_model_bn_en.generate(**inputs)
|
27 |
translated_text = translation_tokenizer_bn_en.decode(outputs[0], skip_special_tokens=True)
|
28 |
return translated_text
|
29 |
|
30 |
# Function to perform summarization
|
31 |
def summarize_text(input_text):
|
32 |
+
inputs = summarization_tokenizer(input_text, return_tensors="pt")
|
33 |
outputs = summarization_model.generate(**inputs)
|
34 |
summarized_text = summarization_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
35 |
return summarized_text
|
36 |
|
37 |
# Function to perform paraphrasing
|
38 |
def paraphrase_text(input_text):
|
39 |
+
inputs = paraphrase_tokenizer(input_text, return_tensors="pt")
|
40 |
outputs = paraphrase_model.generate(**inputs)
|
41 |
paraphrased_text = paraphrase_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
42 |
return paraphrased_text
|