wjbmattingly commited on
Commit
43787f3
1 Parent(s): 5aa28ce

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +72 -3
README.md CHANGED
@@ -1,3 +1,72 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - CATMuS/medieval
5
+ base_model:
6
+ - Qwen/Qwen2-VL-2B-Instruct
7
+ ---
8
+
9
+ ```python
10
+ import torch
11
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
12
+ from qwen_vl_utils import process_vision_info
13
+
14
+
15
+ device = "cuda" if torch.cuda.is_available() else "cpu"
16
+
17
+ model_dir = "medieval-data/qwen2-vl-2b-catmus-40000"
18
+
19
+
20
+
21
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
22
+ model_dir, torch_dtype="auto", device_map="auto"
23
+ )
24
+
25
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
26
+ image_url ="""https://datasets-server.huggingface.co/cached-assets/CATMuS/medieval/--/76c4e4124476cced0b7b487421313450cf646ce8/--/default/test/851/im/image.jpg?Expires=1726188562&Signature=rdeGLGZfuXA0e93VngajlOGZ~4RUz3W6HYe84u27vHd~X502-O0gDiT8y39mJeYyUyQOf9wXs~mlXDaT8ugP62f4gcKEEaqikBHhhbIFHYgCy48NKzJXx4bPRCND1T6JrBotOfY3LUy6XP7PNcv7e5cAXQPeGoEHH4VcU6Bt~~mLg~oD2qYzKwKQ7PcFmIYAk-4igi0MZNUuScw6dpCe9CY2aCgvJeGb3ZZySbb~9Tn7ij7p7ouG2DMVurKCsm8tMIwLrzAAv2UEl4WE0aSVFk9Rm-zPiH3qRwzElLi7FNn6BzRYmm9WPW6wuRdTGweJxDrPjBi3Roy3B~jqk4hryg__&Key-Pair-Id=K3EI6M078Z3AC3"""
27
+ messages = [
28
+ {
29
+ "role": "user",
30
+ "content": [
31
+ {
32
+ "type": "image",
33
+ "image": image_url,
34
+ },
35
+ {"type": "text", "text": "Convert this image to text."},
36
+ ],
37
+ }
38
+ ]
39
+
40
+ # Preparation for inference
41
+ text = processor.apply_chat_template(
42
+ messages, tokenize=False, add_generation_prompt=True
43
+ )
44
+ image_inputs, video_inputs = process_vision_info(messages)
45
+ inputs = processor(
46
+ text=[text],
47
+ images=image_inputs,
48
+ videos=video_inputs,
49
+ padding=True,
50
+ return_tensors="pt",
51
+ )
52
+ inputs = inputs.to(device)
53
+
54
+ # Inference: Generation of the output
55
+ generated_ids = model.generate(**inputs, max_new_tokens=4000)
56
+ generated_ids_trimmed = [
57
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
58
+ ]
59
+ output_text = processor.batch_decode(
60
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
61
+ )
62
+
63
+ print(output_text)
64
+ # Import required libraries if not already imported
65
+ from IPython.display import display, Image
66
+
67
+ # Display the output text
68
+ print(output_text)
69
+
70
+ # Display the image
71
+ display(Image(url=image_url))
72
+ ```