import gradio as gr from transformers import AutoTokenizer from llava import LlavaForConditionalGeneration # Load the LLaVA model and tokenizer from the Hub model = LlavaForConditionalGeneration.from_pretrained("liuhaotian/llava-v1.6-34b") tokenizer = AutoTokenizer.from_pretrained("liuhaotian/llava-v1.6-34b") # Define a function to generate a response given an input text and an optional image URL def generate_response(text, image_url=None): # Encode the input text and image URL as a single input_ids tensor if image_url: input_ids = tokenizer(f"{text} {image_url}", return_tensors="pt").input_ids else: input_ids = tokenizer(text, return_tensors="pt").input_ids # Generate a response using beam search with a length penalty of 0.8 output_ids = model.generate(input_ids, max_length=256, num_beams=5, length_penalty=0.8) # Decode the output_ids tensor into a string output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) # Return the output text return output_text # Use the HuggingFaceTGIGenerator class to automatically map inputs and outputs to Gradio components gr.Interface(generate_response, gr.HuggingFaceTGIGenerator(model), "text").launch()