maxiw's picture
fixes
7df7460
raw
history blame
1.72 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import spaces
import re
models = {
"jinaai/reader-lm-0.5b": AutoModelForCausalLM.from_pretrained("jinaai/reader-lm-0.5b", trust_remote_code=True).to("cuda").eval(),
}
tokenizers = {
"jinaai/reader-lm-0.5b": AutoTokenizer.from_pretrained("jinaai/reader-lm-0.5b", trust_remote_code=True),
}
@spaces.GPU
def run_example(html_content, model_id="jinaai/reader-lm-0.5b"):
model = models[model_id]
tokenizer = tokenizers[model_id]
messages = [{"role": "user", "content": html_content}]
input_text=tokenizer.apply_chat_template(messages, tokenize=False)
inputs = tokenizer.encode(input_text, return_tensors="pt").to("cuda")
outputs = model.generate(inputs, max_new_tokens=1024, temperature=0, do_sample=False, repetition_penalty=1.08)
pattern = r"<\|im_start\|>assistant(.*?)<\|im_end\|>"
assistant_response = re.findall(pattern, tokenizer.decode(outputs[0]), re.DOTALL)
return assistant_response[0]
css = """
#output {
height: 500px;
overflow: auto;
border: 1px solid #ccc;
}
"""
with gr.Blocks(css=css) as demo:
gr.Markdown("""
# HTML-to-Markdown
""")
with gr.Tab(label="Main"):
with gr.Row():
with gr.Column():
model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="jinaai/reader-lm-0.5b")
html_content = gr.Textbox(label="HTML")
submit_btn = gr.Button(value="Submit")
with gr.Column():
output_text = gr.Textbox(label="Markdown")
submit_btn.click(run_example, [html_content, model_selector], [output_text])
demo.launch(debug=True)