Update README.md
Browse files
README.md
CHANGED
@@ -85,19 +85,20 @@ image = Image.open(requests.get(url, stream=True).raw)
|
|
85 |
|
86 |
def run_example(prompt):
|
87 |
|
88 |
-
inputs = processor(text=prompt, images=image, return_tensors="pt")
|
89 |
generated_ids = model.generate(
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
)
|
98 |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
99 |
_processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False)
|
100 |
processed_text, entities = processor.post_process_generation(generated_text)
|
|
|
101 |
print(processed_text)
|
102 |
print(entities)
|
103 |
print(_processed_text)
|
|
|
85 |
|
86 |
def run_example(prompt):
|
87 |
|
88 |
+
inputs = processor(text=prompt, images=image, return_tensors="pt", add_eos_token=False)
|
89 |
generated_ids = model.generate(
|
90 |
+
pixel_values=inputs["pixel_values"],
|
91 |
+
input_ids=inputs["input_ids"],
|
92 |
+
attention_mask=inputs["attention_mask"],
|
93 |
+
image_embeds=None,
|
94 |
+
image_embeds_position_mask=inputs["image_embeds_position_mask"],
|
95 |
+
use_cache=True,
|
96 |
+
max_new_tokens=128,
|
97 |
)
|
98 |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
99 |
_processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False)
|
100 |
processed_text, entities = processor.post_process_generation(generated_text)
|
101 |
+
|
102 |
print(processed_text)
|
103 |
print(entities)
|
104 |
print(_processed_text)
|