from transformers import AutoTokenizer, MistralForCausalLM import torch import gradio as gr import random from textwrap import wrap from transformers import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, MistralForCausalLM from peft import PeftModel, PeftConfig import torch import gradio as gr # Functions to Wrap the Prompt Correctly def wrap_text(text, width=90): lines = text.split('\n') wrapped_lines = [textwrap.fill(line, width=width) for line in lines] wrapped_text = '\n'.join(wrapped_lines) return wrapped_text def multimodal_prompt(input_text, system_prompt="", max_length=512): """ Generates text using a large language model, given a prompt and a device. Args: input_text: The input text to generate a response for. system_prompt: Optional system prompt. max_length: Maximum length of the generated text. Returns: A string containing the generated text. """ # Modify the input text to include the desired format formatted_input = f"""[INST]{input_text}[/INST]""" # Encode the input text encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False) model_inputs = encodeds.to(device) # Generate a response using the model output = model.generate( **model_inputs, max_length=max_length, use_cache=True, early_stopping=True, bos_token_id=model.config.bos_token_id, eos_token_id=model.config.eos_token_id, pad_token_id=model.config.eos_token_id, temperature=0.1, do_sample=True ) # Decode the response response_text = tokenizer.decode(output[0], skip_special_tokens=True) return response_text # Define the device device = "cuda" if torch.cuda.is_available() else "cpu" # Use the base model's ID base_model_id = "mistralai/Mistral-7B-v0.1" model_directory = "Tonic/mistralmed" # Instantiate the Tokenizer tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True, padding_side="left") # tokenizer = AutoTokenizer.from_pretrained("Tonic/mistralmed", trust_remote_code=True, padding_side="left") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = 'left' # Specify the configuration class for the model #model_config = AutoConfig.from_pretrained(base_model_id) # Load the PEFT model with the specified configuration #peft_model = AutoModelForCausalLM.from_pretrained(base_model_id, config=model_config) # Load the PEFT model peft_config = PeftConfig.from_pretrained("Tonic/mistralmed", token="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") peft_model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True) peft_model = PeftModel.from_pretrained(peft_model, "Tonic/mistralmed", token="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") class ChatBot: def __init__(self): self.history = [] def predict(self, input_text): # Encode user input user_input_ids = tokenizer.encode(input_text, return_tensors="pt") # Concatenate the user input with chat history if len(self.history) > 0: chat_history_ids = torch.cat([self.history, user_input_ids], dim=-1) else: chat_history_ids = user_input_ids # Generate a response using the PEFT model response = peft_model.generate(input_ids=chat_history_ids, max_length=512, pad_token_id=tokenizer.eos_token_id) # Update chat history self.history = chat_history_ids # Decode and return the response response_text = tokenizer.decode(response[0], skip_special_tokens=True) return response_text bot = ChatBot() title = "👋🏻Welcome to Tonic's MistralMed Chat🚀" description = "You can use this Space to test out the current model (MistralMed) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on Discord to build together." examples = [["What is the boiling point of nitrogen?"]] iface = gr.Interface( fn=bot.predict, title=title, description=description, examples=examples, inputs="text", outputs="text", theme="ParityError/Anime" ) iface.launch()