sst1 commited on
Commit
aed8a0a
1 Parent(s): b5fb165

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -1,14 +1,14 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
 
3
 
4
- # Load the quantized model and tokenizer from the Hub
5
- model = AutoModelForCausalLM.from_pretrained("llava-hf/llava-1.5-7b-hf")
6
  tokenizer = AutoTokenizer.from_pretrained("llava-hf/llava-1.5-7b-hf")
7
 
8
  # Define a function to generate a response given an input text and an optional image URL
9
  def generate_response(text, image_url=None):
10
  # Encode the input text and image URL as a single input_ids tensor
11
- image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Roadrunner_Petrochelidon_pyrrhonota.jpg/1200px-Roadrunner_Petrochelidon_pyrrhonota.jpg"
12
  if image_url:
13
  input_ids = tokenizer(f"{text} <img>{image_url}</img>", return_tensors="pt").input_ids
14
  else:
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer
3
+ from llava import LlavaForConditionalGeneration
4
 
5
+ # Load the LLaVA model and tokenizer from the Hub
6
+ model = LlavaForConditionalGeneration.from_pretrained("llava-hf/llava-1.5-7b-hf")
7
  tokenizer = AutoTokenizer.from_pretrained("llava-hf/llava-1.5-7b-hf")
8
 
9
  # Define a function to generate a response given an input text and an optional image URL
10
  def generate_response(text, image_url=None):
11
  # Encode the input text and image URL as a single input_ids tensor
 
12
  if image_url:
13
  input_ids = tokenizer(f"{text} <img>{image_url}</img>", return_tensors="pt").input_ids
14
  else: