Spaces:
Runtime error
Runtime error
from gradio import Interface | |
import gradio as gr | |
import aranizer | |
from aranizer import aranizer_bpe50k, aranizer_bpe64k, aranizer_bpe86k, aranizer_sp32k, aranizer_sp50k, aranizer_sp64k, aranizer_sp86k | |
from transformers import AutoTokenizer, logging | |
from huggingface_hub import login | |
import os | |
# Retrieve your Hugging Face token from the environment variable | |
HF_TOKEN = os.getenv('HF_TOKEN') | |
if HF_TOKEN: | |
HF_TOKEN = HF_TOKEN.strip() # Remove any leading or trailing whitespace/newlines | |
login(token=HF_TOKEN) | |
# Load additional tokenizers from transformers | |
gpt_13b_tokenizer = AutoTokenizer.from_pretrained("FreedomIntelligence/AceGPT-13B") | |
gpt_7b_tokenizer = AutoTokenizer.from_pretrained("FreedomIntelligence/AceGPT-7B") | |
jais_13b_tokenizer = AutoTokenizer.from_pretrained("inception-mbzuai/jais-13b") | |
arabert_tokenizer = AutoTokenizer.from_pretrained("aubmindlab/bert-base-arabertv2") | |
# Try to load the gated tokenizer | |
try: | |
meta_llama_tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B") | |
except Exception as e: | |
meta_llama_tokenizer = None | |
logging.warning(f"Could not load meta-llama/Meta-Llama-3-8B tokenizer: {e}") | |
cohere_command_r_v01_tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01") | |
cohere_command_r_plus_tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-plus") | |
# List of available tokenizers and a dictionary to load them | |
tokenizer_options = [ | |
"aranizer_bpe50k", "aranizer_bpe64k", "aranizer_bpe86k", | |
"aranizer_sp32k", "aranizer_sp50k", "aranizer_sp64k", "aranizer_sp86k", | |
"FreedomIntelligence/AceGPT-13B", | |
"FreedomIntelligence/AceGPT-7B", | |
"inception-mbzuai/jais-13b", | |
"aubmindlab/bert-base-arabertv2", | |
"CohereForAI/c4ai-command-r-v01", | |
"CohereForAI/c4ai-command-r-plus" | |
] | |
if meta_llama_tokenizer: | |
tokenizer_options.append("meta-llama/Meta-Llama-3-8B") | |
tokenizers = { | |
"aranizer_bpe50k": aranizer_bpe50k.get_tokenizer, | |
"aranizer_bpe64k": aranizer_bpe64k.get_tokenizer, | |
"aranizer_bpe86k": aranizer_bpe86k.get_tokenizer, | |
"aranizer_sp32k": aranizer_sp32k.get_tokenizer, | |
"aranizer_sp50k": aranizer_sp50k.get_tokenizer, | |
"aranizer_sp64k": aranizer_sp64k.get_tokenizer, | |
"aranizer_sp86k": aranizer_sp86k.get_tokenizer, | |
"FreedomIntelligence/AceGPT-13B": lambda: gpt_13b_tokenizer, | |
"FreedomIntelligence/AceGPT-7B": lambda: gpt_7b_tokenizer, | |
"inception-mbzuai/jais-13b": lambda: jais_13b_tokenizer, | |
"aubmindlab/bert-base-arabertv2": lambda: arabert_tokenizer, | |
"CohereForAI/c4ai-command-r-v01": lambda: cohere_command_r_v01_tokenizer, | |
"CohereForAI/c4ai-command-r-plus": lambda: cohere_command_r_plus_tokenizer | |
} | |
if meta_llama_tokenizer: | |
tokenizers["meta-llama/Meta-Llama-3-8B"] = lambda: meta_llama_tokenizer | |
def compare_tokenizers(tokenizer_index, text): | |
tokenizer_name = tokenizer_options[tokenizer_index] | |
tokenizer = tokenizers[tokenizer_name]() | |
tokens = tokenizer.tokenize(text) | |
encoded_output = tokenizer.encode(text, add_special_tokens=True) | |
decoded_text = tokenizer.decode(encoded_output, skip_special_tokens=True) | |
# Ensure the tokens are properly decoded | |
tokens_display = [token.encode('utf-8').decode('utf-8') if isinstance(token, bytes) else token for token in tokens] | |
# Prepare the results to be displayed in HTML format | |
tokens_html = "".join([f"<span style='background-color:#f0f0f0; padding:4px; margin:2px; border-radius:3px; border:1px solid #ccc;'>{token}</span>" for token in tokens_display]) | |
encoded_html = "".join([f"<span style='background-color:#e0f7fa; padding:4px; margin:2px; border-radius:3px; border:1px solid #00acc1;'>{token}</span>" for token in encoded_output]) | |
decoded_html = f"<div style='background-color:#e8f5e9; padding:10px; border-radius:3px; border:1px solid #4caf50;'>{decoded_text}</div>" | |
results_html = f""" | |
<div style='font-family: Arial, sans-serif;'> | |
<h3 style='color: #00796b;'>Tokenizer: {tokenizer_name}</h3> | |
<p><strong>Tokens:</strong> {tokens_html}</p> | |
<p><strong>Encoded:</strong> {encoded_html}</p> | |
<p><strong>Decoded:</strong> {decoded_html}</p> | |
</div> | |
""" | |
return results_html | |
# Define the Gradio interface components with a dropdown for model selection | |
inputs_component = [ | |
gr.Dropdown(choices=tokenizer_options, label="Select Tokenizer", type="index"), | |
gr.Textbox(lines=2, placeholder="اكتب النص هنا...", label="Input Text") | |
] | |
outputs_component = gr.HTML(label="Results") | |
# Setting up the interface | |
iface = Interface( | |
fn=compare_tokenizers, | |
inputs=inputs_component, | |
outputs=outputs_component, | |
title="Arabic Tokenizer Arena", | |
live=True | |
) | |
# Launching the Gradio app | |
iface.launch() |