import gradio as gr import os import spaces from transformers import GemmaTokenizer, AutoModelForCausalLM from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer from threading import Thread # Set an environment variable HF_TOKEN = os.environ.get("HF_TOKEN", None) DESCRIPTION = '''

Mistral 7B Instruct v0.3

This Space demonstrates the instruction-tuned model mistralai/Mistral-7B-Instruct-v0.3. The Mistral-7B-Instruct-v0.3 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-7B-v0.3, which is a Mistral-7B-v0.2 with extended vocabulary. Feel free to play with it, or duplicate to run privately!

šŸ”Ž For more details about the release and how to use the model with transformers, visit the model-card linked above.

šŸ¦• The Instruct model - Has Extended vocabulary to 32768. Supports v3 Tokenizer. Supports function calling.

''' PLACEHOLDER = """

Ask me anything...

""" css = """ h1 { text-align: center; display: block; } #duplicate-button { margin: auto; color: white; background: #1565c0; border-radius: 100vh; } """ # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3") model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3", device_map="auto") terminators = [ tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|eot_id|>") ] @spaces.GPU(duration=120) def chat_mistral7b_v0dot3(message: str, history: list, temperature: float, max_new_tokens: int ) -> str: """ Generate a streaming response using the mistralai/Mistral-7B-Instruct-v0.3 model. Args: message (str): The input message. history (list): The conversation history used by ChatInterface. temperature (float): The temperature for generating the response. max_new_tokens (int): The maximum number of new tokens to generate. Returns: str: The generated response. """ conversation = [] for user, assistant in history: conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}]) conversation.append({"role": "user", "content": message}) input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device) streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True) generate_kwargs = dict( input_ids= input_ids, streamer=streamer, max_new_tokens=max_new_tokens, do_sample=True, temperature=temperature, eos_token_id=terminators, ) # This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash. if temperature == 0: generate_kwargs['do_sample'] = False t = Thread(target=model.generate, kwargs=generate_kwargs) t.start() outputs = [] for text in streamer: outputs.append(text) #print(outputs) yield "".join(outputs) # Gradio block chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface') with gr.Blocks(fill_height=True, css=css) as demo: gr.Markdown(DESCRIPTION) gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button") gr.ChatInterface( fn=chat_mistral7b_v0dot3, chatbot=chatbot, fill_height=True, additional_inputs_accordion=gr.Accordion(label="āš™ļø Parameters", open=False, render=False), additional_inputs=[ gr.Slider(minimum=0, maximum=1, step=0.1, value=0.95, label="Temperature", render=False), gr.Slider(minimum=128, maximum=4096, step=1, value=512, label="Max new tokens", render=False ), ], examples=[ ['How to setup a human base on Mars? Give short answer.'], ['Explain theory of relativity to me like Iā€™m 8 years old.'], ['What is 9,000 * 9,000?'], ['Write a pun-filled happy birthday message to my friend Alex.'], ['Justify why a penguin might make a good king of the jungle.'] ], cache_examples=False, ) if __name__ == "__main__": demo.launch()