Spaces:
Running
Running
from transformers import AutoTokenizer | |
import gradio as gr | |
def tokenize(input_text): | |
llama_tokens = len(llama_tokenizer(input_text, add_special_tokens=True)["input_ids"]) | |
mistral_tokens = len(mistral_tokenizer(input_text, add_special_tokens=True)["input_ids"]) | |
gpt2_tokens = len(gpt2_tokenizer(input_text, add_special_tokens=True)["input_ids"]) | |
gpt_neox_tokens = len(gpt_neox_tokenizer(input_text, add_special_tokens=True)["input_ids"]) | |
falcon_tokens = len(falcon_tokenizer(input_text, add_special_tokens=True)["input_ids"]) | |
phi2_tokens = len(phi2_tokenizer(input_text, add_special_tokens=True)["input_ids"]) | |
t5_tokens = len(t5_tokenizer(input_text, add_special_tokens=True)["input_ids"]) | |
token_lengths = { | |
"LLaMa": llama_tokens, | |
"Mistral": mistral_tokens, | |
"GPT-2/GPT-J": gpt2_tokens, | |
"GPT-NeoX": gpt_neox_tokens, | |
"Falcon": falcon_tokens, | |
"Phi-2": phi2_tokens, | |
"T5": t5_tokens | |
} | |
sorted_tokens = sorted(token_lengths.items(), key=lambda x: x[1], reverse=True) | |
result = "\n".join([f"{name}: {length}" for name, length in sorted_tokens]) | |
return result | |
if __name__ == "__main__": | |
llama_tokenizer = AutoTokenizer.from_pretrained("TheBloke/Llama-2-7B-fp16") | |
mistral_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") | |
gpt2_tokenizer = AutoTokenizer.from_pretrained("gpt2") | |
gpt_neox_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") | |
falcon_tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b") | |
phi2_tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2") | |
t5_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xxl") | |
iface = gr.Interface(fn=tokenize, inputs=gr.Textbox(lines=7), outputs="text") | |
iface.launch() | |