import gradio as gr from prompt_generator import generate_prompt tokenizers = [ "google/gemma-7b", "meta-llama/Llama-2-7b", "mistralai/Mistral-7B-v0.1", "facebook/opt-2.7b", "microsoft/phi-2", "THUDM/chatglm3-6b", "Qwen/Qwen1.5-7B-Chat", "bigscience/bloom-560m", "ise-uiuc/Magicoder-S-DS-6.7B", "google/flan-t5-base", "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "google-bert/bert-base-uncased", ] def generate(model_id, num_tokens): output_file = f"prompt_{num_tokens}.jsonl" prompt = generate_prompt(model_id, int(num_tokens), silent=True, output_file=output_file) return prompt, output_file demo = gr.Interface( fn=generate, title="Prompt Generator", description="Generate prompts with a given length for testing transformer models. " "Prompt source: https://archive.org/stream/alicesadventures19033gut/19033.txt", inputs=[ gr.Dropdown(label="Tokenizer", choices=tokenizers, allow_custom_value=True), gr.Textbox(label="Number of Tokens"), ], outputs=[gr.Textbox(label="prompt", show_copy_button=True), gr.File(label="Json file")], examples=[ ["mistralai/Mistral-7B-v0.1", 32], ["mistralai/Mistral-7B-v0.1", 64], ["mistralai/Mistral-7B-v0.1", 128], ["mistralai/Mistral-7B-v0.1", 512], ["mistralai/Mistral-7B-v0.1", 1024], ["mistralai/Mistral-7B-v0.1", 2048], ], cache_examples=False, allow_flagging=False, ) demo.launch()