Faseeh / app.py
Abdulmohsena's picture
Add logging of outputs
527dced verified
raw
history blame
1.57 kB
import gradio as gr
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
import warnings
import weave
weave.init("Faseeh")
warnings.filterwarnings("ignore")
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("Abdulmohsena/Faseeh")
model = AutoModelForSeq2SeqLM.from_pretrained("Abdulmohsena/Faseeh").to(device)
class Faseeh(weave.Model):
@weave.op()
def translate(self, input_data: str, temperature: float = 0.1):
inputs = tokenizer(input_data, return_tensors='pt')
outputs = model.generate(**inputs,
temperature=temperature,
do_sample=True)[0]
prediction = tokenizer.decode(outputs, skip_special_tokens=True)
return prediction
faseeh = Faseeh()
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Faseeh: The Elequent Arabic Translator")
# Input text box
input_text = gr.Textbox(label="Input Text", placeholder="Enter text to translate from English to Classical Arabic")
# Output text box
output_text = gr.Textbox(label="ادخل نصا لتترجمه من الأنجليزية الى العربية الفصيحة")
# Button to trigger translation
translate_btn = gr.Button("Translate")
# Button action
translate_btn.click(faseeh.translate, inputs=input_text, outputs=output_text)
# Launch the Gradio app
if __name__ == "__main__":
demo.launch()