Added Flask to Readme

#4
by JGKaaij - opened
.gitattributes CHANGED
@@ -33,6 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- snowman.png filter=lfs diff=lfs merge=lfs -text
37
- pikachu.png filter=lfs diff=lfs merge=lfs -text
38
- pikachu_bbox.png filter=lfs diff=lfs merge=lfs -text
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
README.md CHANGED
@@ -5,12 +5,6 @@
5
  ---
6
  # Kosmos-2: Grounding Multimodal Large Language Models to the World
7
 
8
- **This model (remote code on the Hub) is deprecated. Please use https://huggingface.co/microsoft/kosmos-2-patch14-224**
9
-
10
- **There are some changes in terms of input formats: see the model card in https://huggingface.co/microsoft/kosmos-2-patch14-224**
11
-
12
- ~~**(There is an on going effort to port `Kosmos-2` directly into `transformers`. This repository (remote code) might need some more bug fixes later, including breaking changes.)**~~
13
-
14
  <a href="https://huggingface.co/ydshieh/kosmos-2-patch14-224/resolve/main/annotated_snowman.jpg" target="_blank"><figure><img src="https://huggingface.co/ydshieh/kosmos-2-patch14-224/resolve/main/annotated_snowman.jpg" width="384"><figcaption><b>[An image of a snowman warming himself by a fire.]</b></figcaption></figure></a>
15
 
16
 
@@ -32,7 +26,7 @@ processor = AutoProcessor.from_pretrained("ydshieh/kosmos-2-patch14-224", trust_
32
 
33
  prompt = "<grounding>An image of"
34
 
35
- url = "https://huggingface.co/ydshieh/kosmos-2-patch14-224/resolve/main/snowman.png"
36
  image = Image.open(requests.get(url, stream=True).raw)
37
 
38
  # The original Kosmos-2 demo saves the image first then reload it. For some images, this will give slightly different image input and change the generation outputs.
@@ -211,215 +205,3 @@ draw_entity_boxes_on_image(image, entities, show=True)
211
  Here is the annotated image:
212
 
213
  <a href="https://huggingface.co/ydshieh/kosmos-2-patch14-224/resolve/main/annotated_snowman.jpg" target="_blank"><img src="https://huggingface.co/ydshieh/kosmos-2-patch14-224/resolve/main/annotated_snowman.jpg" width="500"></a>
214
-
215
-
216
- ## Tasks
217
-
218
- This model is capable of performing different tasks through changing the prompts.
219
-
220
- First, let's define a function to run a prompt.
221
-
222
- ```python
223
- import requests
224
-
225
- from PIL import Image
226
- from transformers import AutoProcessor, AutoModelForVision2Seq
227
-
228
-
229
- model = AutoModelForVision2Seq.from_pretrained("ydshieh/kosmos-2-patch14-224", trust_remote_code=True)
230
- processor = AutoProcessor.from_pretrained("ydshieh/kosmos-2-patch14-224", trust_remote_code=True)
231
-
232
- url = "https://huggingface.co/ydshieh/kosmos-2-patch14-224/resolve/main/snowman.png"
233
- image = Image.open(requests.get(url, stream=True).raw)
234
-
235
- def run_example(prompt):
236
-
237
- inputs = processor(text=prompt, images=image, return_tensors="pt")
238
- generated_ids = model.generate(
239
- pixel_values=inputs["pixel_values"],
240
- input_ids=inputs["input_ids"][:, :-1],
241
- attention_mask=inputs["attention_mask"][:, :-1],
242
- img_features=None,
243
- img_attn_mask=inputs["img_attn_mask"][:, :-1],
244
- use_cache=True,
245
- max_new_tokens=64,
246
- )
247
- generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
248
- _processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False)
249
- processed_text, entities = processor.post_process_generation(generated_text)
250
- print(processed_text)
251
- print(entities)
252
- print(_processed_text)
253
- ```
254
-
255
- Here are the tasks `Kosmos-2` could perform:
256
-
257
- ### Multimodal Grounding
258
-
259
- #### • Phrase Grounding
260
- ```python
261
- prompt = "<grounding><phrase> a snowman</phrase>"
262
- run_example(prompt)
263
-
264
- # a snowman is warming himself by the fire
265
- # [('a snowman', (0, 9), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('the fire', (32, 40), [(0.203125, 0.015625, 0.453125, 0.859375)])]
266
-
267
- # <grounding><phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> is warming himself by<phrase> the fire</phrase><object><patch_index_0006><patch_index_0878></object>
268
- ```
269
-
270
- #### • Referring Expression Comprehension
271
- ```python
272
- prompt = "<grounding><phrase> a snowman next to a fire</phrase>"
273
- run_example(prompt)
274
-
275
- # a snowman next to a fire
276
- # [('a snowman next to a fire', (0, 24), [(0.390625, 0.046875, 0.984375, 0.828125)])]
277
-
278
- # <grounding><phrase> a snowman next to a fire</phrase><object><patch_index_0044><patch_index_0863></object>
279
- ```
280
-
281
- ### Multimodal Referring
282
-
283
- #### • Referring expression generation
284
- ```python
285
- prompt = "<grounding><phrase> It</phrase><object><patch_index_0044><patch_index_0863></object> is"
286
- run_example(prompt)
287
-
288
- # It is snowman in a hat and scarf
289
- # [('It', (0, 2), [(0.390625, 0.046875, 0.984375, 0.828125)])]
290
-
291
- # <grounding><phrase> It</phrase><object><patch_index_0044><patch_index_0863></object> is snowman in a hat and scarf
292
- ```
293
-
294
- ### Perception-Language Tasks
295
-
296
- #### • Grounded VQA
297
- ```python
298
- prompt = "<grounding> Question: What is special about this image? Answer:"
299
- run_example(prompt)
300
-
301
- # Question: What is special about this image? Answer: The image features a snowman sitting by a campfire in the snow.
302
- # [('a snowman', (71, 80), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a campfire', (92, 102), [(0.109375, 0.640625, 0.546875, 0.984375)])]
303
-
304
- # <grounding> Question: What is special about this image? Answer: The image features<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> sitting by<phrase> a campfire</phrase><object><patch_index_0643><patch_index_1009></object> in the snow.
305
- ```
306
-
307
- #### • Grounded VQA with multimodal referring via bounding boxes
308
- ```python
309
- prompt = "<grounding> Question: Where is<phrase> the fire</phrase><object><patch_index_0005><patch_index_0911></object> next to? Answer:"
310
- run_example(prompt)
311
-
312
- # Question: Where is the fire next to? Answer: Near the snowman.
313
- # [('the fire', (19, 27), [(0.171875, 0.015625, 0.484375, 0.890625)]), ('the snowman', (50, 61), [(0.390625, 0.046875, 0.984375, 0.828125)])]
314
-
315
- # <grounding> Question: Where is<phrase> the fire</phrase><object><patch_index_0005><patch_index_0911></object> next to? Answer: Near<phrase> the snowman</phrase><object><patch_index_0044><patch_index_0863></object>.
316
- ```
317
-
318
- ### Grounded Image captioning
319
-
320
- #### • Brief
321
-
322
- ```python
323
- prompt = "<grounding> An image of"
324
- run_example(prompt)
325
-
326
- # An image of a snowman warming himself by a campfire.
327
- # [('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a campfire', (41, 51), [(0.109375, 0.640625, 0.546875, 0.984375)])]
328
-
329
- # <grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a campfire</phrase><object><patch_index_0643><patch_index_1009></object>.
330
- ```
331
-
332
- #### • Detailed
333
-
334
- ```python
335
- prompt = "<grounding> Describe this image in detail:"
336
- run_example(prompt)
337
-
338
- # Describe this image in detail: The image features a snowman sitting by a campfire in the snow. He is wearing a hat, scarf, and gloves, with a pot nearby and a cup
339
- # [('a campfire', (71, 81), [(0.171875, 0.015625, 0.484375, 0.984375)]), ('a hat', (109, 114), [(0.515625, 0.046875, 0.828125, 0.234375)]), ('scarf', (116, 121), [(0.515625, 0.234375, 0.890625, 0.578125)]), ('gloves', (127, 133), [(0.515625, 0.390625, 0.640625, 0.515625)]), ('a pot', (140, 145), [(0.078125, 0.609375, 0.265625, 0.859375)])]
340
-
341
- # <grounding> Describe this image in detail: The image features a snowman sitting by<phrase> a campfire</phrase><object><patch_index_0005><patch_index_1007></object> in the snow. He is wearing<phrase> a hat</phrase><object><patch_index_0048><patch_index_0250></object>,<phrase> scarf</phrase><object><patch_index_0240><patch_index_0604></object>, and<phrase> gloves</phrase><object><patch_index_0400><patch_index_0532></object>, with<phrase> a pot</phrase><object><patch_index_0610><patch_index_0872></object> nearby and<phrase> a cup</phrase><object>
342
- ```
343
-
344
-
345
- ## Running the Flask Server
346
- _flask_kosmos2.py_ shows the implementation of a Flask server for the model.
347
- It allowes the model to be approached as a REST API.
348
-
349
- After starting the server. You can send a POST request to `http://localhost:8005/process_prompt` with the following form data:
350
- - `prompt`: For example `<grounding> an image of`
351
- - `image`: The image file as binary data
352
-
353
- This in turn will produce a reply with the following JSON format:
354
- - `message`: The Kosmos-2 generated text
355
- - `entities`: The extracted entities
356
-
357
- An easy way to test this is through an application like Postman. Make sure the image field is set to `File`.
358
-
359
- ```python
360
-
361
- from PIL import Image
362
- from transformers import AutoProcessor, AutoModelForVision2Seq
363
- from flask import Flask, request, jsonify
364
- import json
365
-
366
- app = Flask(__name__)
367
-
368
- model = AutoModelForVision2Seq.from_pretrained("ydshieh/kosmos-2-patch14-224", trust_remote_code=True)
369
- processor = AutoProcessor.from_pretrained("ydshieh/kosmos-2-patch14-224", trust_remote_code=True)
370
-
371
-
372
- @app.route('/process_prompt', methods=['POST'])
373
- def process_prompt():
374
- try:
375
- # Get the uploaded image data from the POST request
376
- uploaded_file = request.files['image']
377
- prompt = request.form.get('prompt')
378
- image = Image.open(uploaded_file.stream)
379
-
380
- print(image.size)
381
-
382
- inputs = processor(text=prompt, images=image, return_tensors="pt")
383
-
384
- generated_ids = model.generate(
385
- pixel_values=inputs["pixel_values"],
386
- input_ids=inputs["input_ids"][:, :-1],
387
- attention_mask=inputs["attention_mask"][:, :-1],
388
- img_features=None,
389
- img_attn_mask=inputs["img_attn_mask"][:, :-1],
390
- use_cache=True,
391
- max_new_tokens=64,
392
- )
393
- generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
394
-
395
- # By default, the generated text is cleanup and the entities are extracted.
396
- processed_text, entities = processor.post_process_generation(generated_text)
397
- parsed_entities = entities_to_json(entities)
398
- print(generated_text)
399
- print(processed_text)
400
- return jsonify({"message": processed_text, 'entities': parsed_entities})
401
- except Exception as e:
402
- return jsonify({"error": str(e)})
403
-
404
-
405
- def entities_to_json(entities):
406
- result = []
407
- for e in entities:
408
- label = e[0]
409
- box_coords = e[1]
410
- box_size = e[2][0]
411
- entity_result = {
412
- "label": label,
413
- "boundingBoxPosition": {"x": box_coords[0], "y": box_coords[1]},
414
- "boundingBox": {"x_min": box_size[0], "y_min": box_size[1], "x_max": box_size[2], "y_max": box_size[3]}
415
- }
416
- print(entity_result)
417
- result.append(entity_result)
418
-
419
- return result
420
-
421
-
422
- if __name__ == '__main__':
423
- app.run(host='localhost', port=8005)
424
-
425
- ```
 
5
  ---
6
  # Kosmos-2: Grounding Multimodal Large Language Models to the World
7
 
 
 
 
 
 
 
8
  <a href="https://huggingface.co/ydshieh/kosmos-2-patch14-224/resolve/main/annotated_snowman.jpg" target="_blank"><figure><img src="https://huggingface.co/ydshieh/kosmos-2-patch14-224/resolve/main/annotated_snowman.jpg" width="384"><figcaption><b>[An image of a snowman warming himself by a fire.]</b></figcaption></figure></a>
9
 
10
 
 
26
 
27
  prompt = "<grounding>An image of"
28
 
29
+ url = "https://huggingface.co/ydshieh/kosmos-2-patch14-224/resolve/main/snowman.jpg"
30
  image = Image.open(requests.get(url, stream=True).raw)
31
 
32
  # The original Kosmos-2 demo saves the image first then reload it. For some images, this will give slightly different image input and change the generation outputs.
 
205
  Here is the annotated image:
206
 
207
  <a href="https://huggingface.co/ydshieh/kosmos-2-patch14-224/resolve/main/annotated_snowman.jpg" target="_blank"><img src="https://huggingface.co/ydshieh/kosmos-2-patch14-224/resolve/main/annotated_snowman.jpg" width="500"></a>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modeling_kosmos2.py CHANGED
@@ -22,7 +22,6 @@ from typing import List, Optional, Tuple, Union
22
  import torch
23
  import torch.utils.checkpoint
24
  from torch import nn
25
- from torch.nn import CrossEntropyLoss
26
 
27
  from transformers.activations import ACT2FN
28
  from transformers.modeling_outputs import (
@@ -1008,7 +1007,7 @@ class Kosmos2TextTransformer(nn.Module):
1008
  inputs_embeds = self.embed_tokens(input_ids)
1009
 
1010
  if img_features is not None:
1011
- inputs_embeds[img_input_mask.to(dtype=torch.bool)] = img_features.view(-1, img_features.size(-1))
1012
 
1013
  inputs_embeds = inputs_embeds * self.embed_scale
1014
 
 
22
  import torch
23
  import torch.utils.checkpoint
24
  from torch import nn
 
25
 
26
  from transformers.activations import ACT2FN
27
  from transformers.modeling_outputs import (
 
1007
  inputs_embeds = self.embed_tokens(input_ids)
1008
 
1009
  if img_features is not None:
1010
+ inputs_embeds[img_input_mask.to(dtype=torch.bool)] = img_features
1011
 
1012
  inputs_embeds = inputs_embeds * self.embed_scale
1013
 
pikachu.png DELETED

Git LFS Details

  • SHA256: 7bf04b0f0b3191819ade6bd8b6c7cb388636a010b1e812e05a746564f3c9d306
  • Pointer size: 132 Bytes
  • Size of remote file: 1.18 MB
pikachu.webp DELETED
Binary file (35.4 kB)
 
pikachu_bbox.png DELETED

Git LFS Details

  • SHA256: f1c4fa11a4aea7c573949e747d11a0da3142e39ee986df40eebfd10986395ecc
  • Pointer size: 132 Bytes
  • Size of remote file: 1.01 MB
processing_kosmos2.py CHANGED
@@ -529,8 +529,7 @@ def extract_entities_with_patch_indices(text):
529
  phrase_tag, phrase, match_content = match.groups()
530
  if not phrase_tag:
531
  phrase = None
532
- # We take the starting position of `<object>`
533
- span = (match.span(0)[0], match.span(0)[0])
534
 
535
  # Split the match_content by the delimiter to get individual patch_index pairs
536
  patch_index_pairs = match_content.split('</delimiter_of_multi_objects/>')
 
529
  phrase_tag, phrase, match_content = match.groups()
530
  if not phrase_tag:
531
  phrase = None
532
+ span = (None, None)
 
533
 
534
  # Split the match_content by the delimiter to get individual patch_index pairs
535
  patch_index_pairs = match_content.split('</delimiter_of_multi_objects/>')
snowman.png DELETED

Git LFS Details

  • SHA256: b97825997df04bd823207fd145331ffc3c3b62ec4e3a3adaac83c93debe87bdf
  • Pointer size: 132 Bytes
  • Size of remote file: 1.36 MB
tokenization_kosmos2_fast.py CHANGED
@@ -137,6 +137,7 @@ class Kosmos2TokenizerFast(PreTrainedTokenizerFast):
137
  )
138
 
139
  self.vocab_file = vocab_file
 
140
 
141
  self.eod_token = "</doc>"
142
 
@@ -178,10 +179,6 @@ class Kosmos2TokenizerFast(PreTrainedTokenizerFast):
178
  # we need to set `special_tokens=False` to be the same as in the slow tokenizer.
179
  self.add_tokens(AddedToken(token, lstrip=True, rstrip=False), special_tokens=False)
180
 
181
- @property
182
- def can_save_slow_tokenizer(self) -> bool:
183
- return os.path.isfile(self.vocab_file) if self.vocab_file else False
184
-
185
  def build_inputs_with_special_tokens(
186
  self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
187
  ) -> List[int]:
 
137
  )
138
 
139
  self.vocab_file = vocab_file
140
+ self.can_save_slow_tokenizer = False if not self.vocab_file else True
141
 
142
  self.eod_token = "</doc>"
143
 
 
179
  # we need to set `special_tokens=False` to be the same as in the slow tokenizer.
180
  self.add_tokens(AddedToken(token, lstrip=True, rstrip=False), special_tokens=False)
181
 
 
 
 
 
182
  def build_inputs_with_special_tokens(
183
  self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
184
  ) -> List[int]: