Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,658 Bytes
cb39169 76fc39a cb39169 4fb5fb4 cb39169 76fc39a 32dcc58 cb39169 ededc7b 32dcc58 cb39169 ededc7b e17ea47 cb39169 ededc7b cb39169 e17ea47 cb39169 4fb5fb4 cb39169 3a10e46 76fc39a 3a10e46 76fc39a 3a10e46 76fc39a 3a10e46 76fc39a 3a10e46 76fc39a 3a10e46 76fc39a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
# Import necessary libraries
import gradio as gr
import torch
import spaces
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, pipeline
# Load Goldfish model for Dhivehi
model_name = 'div_thaa_full'
HF_CACHE = '.hf_cache'
# Load model
goldfish_model = 'goldfish-models/' + model_name
config = AutoConfig.from_pretrained(goldfish_model, cache_dir=HF_CACHE)
tokenizer = AutoTokenizer.from_pretrained(goldfish_model, cache_dir=HF_CACHE)
model = AutoModelForCausalLM.from_pretrained(goldfish_model, config=config, cache_dir=HF_CACHE)
if torch.cuda.is_available():
model = model.cuda() # Load onto GPU
# Create text generation pipeline
text_generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
# Function to generate text
@spaces.GPU
def generate_text(input_text):
output = text_generator(input_text, max_new_tokens=25, add_special_tokens=False, do_sample=True)
return output[0]['generated_text']
# Create Gradio interface
with gr.Blocks(css="""
.thaana textarea {
font-size: 18px !important;
font-family: 'MV_Faseyha', 'Faruma', 'A_Faruma', 'Noto Sans Thaana', 'MV Boli';
line-height: 1.8 !important;
}
""") as demo:
gr.Markdown("# Demo Dhivehi Text Generator")
gr.Markdown("Generate text in Dhivehi language. This model is trained to generate coherent text based on the input prompt.")
with gr.Row():
input_text = gr.Textbox(
lines=2,
label="Enter Dhivehi Text",
rtl=True,
elem_classes="thaana"
)
output_text = gr.Textbox(
lines=2,
rtl=True,
elem_classes="thaana"
)
generate_btn = gr.Button("Generate")
generate_btn.click(
fn=generate_text,
inputs=input_text,
outputs=output_text
)
gr.Markdown("""
Model: Goldfish is a suite of monolingual language models trained for 350 languages.
This model is the Dhivehi (Thaana script). For more details, visit the
[Goldfish Models GitHub repository](https://github.com/tylerachang/goldfish).
""")
examples = gr.Examples(
examples=[
["ދިވެހިރާއްޖެ"],
["އެމެރިކާ އިންތިޚާބު"],
["ސަލާމް"],
["ދުނިޔޭގެ ސިއްޙަތު ޖަމްޢިއްޔާ"],
["ޤަދީމީ ސަގާފަތް"],
["ޑިމޮކްރަސީ"]
],
inputs=input_text,
outputs=output_text,
fn=generate_text
)
if __name__ == "__main__":
demo.launch() |