ericlewis commited on
Commit
aa58885
1 Parent(s): f7036e6

Upload 11 files

Browse files
README.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-4.0
3
+ tags:
4
+ - vision
5
+ - metaclip
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png
8
+ candidate_labels: playing music, playing sports
9
+ example_title: Cat & Dog
10
+ ---
11
+
12
+ # MetaCLIP model, huge-sized version, patch resolution 14
13
+
14
+ MetaCLIP model applied to 2.5 billion data points of CommonCrawl (CC). It was introduced in the paper [Demystifying CLIP Data](https://arxiv.org/abs/2309.16671) by Xu et al. and first released in [this repository](https://github.com/facebookresearch/MetaCLIP).
15
+
16
+ Disclaimer: The team releasing MetaCLIP did not write a model card for this model so this model card has been written by the Hugging Face team.
17
+
18
+ ## Model description
19
+
20
+ The [Demystifying CLIP Data](https://arxiv.org/abs/2309.16671) paper aims to reveal CLIP’s method around training data curation. OpenAI never open-sourced code regarding their data preparation pipeline.
21
+
22
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/clip_overview.jpg"
23
+ alt="drawing" width="600"/>
24
+
25
+ <small> CLIP high-level overview. Taken from the <a href="https://arxiv.org/abs/2103.00020">CLIP paper</a>. </small>
26
+
27
+ ## Intended uses & limitations
28
+
29
+ You can use the raw model for linking images with text in a shared embedding space. This enables things like zero-shot image classification, text-based image retrieval, image-based text retrieval, etc.
30
+
31
+ ### How to use
32
+
33
+ We refer to the [docs](https://huggingface.co/docs/transformers/main/en/model_doc/clip#usage). Just replace the names of the models on the hub.
34
+
35
+ ### BibTeX entry and citation info
36
+
37
+ ```bibtex
38
+ @misc{xu2023demystifying,
39
+ title={Demystifying CLIP Data},
40
+ author={Hu Xu and Saining Xie and Xiaoqing Ellen Tan and Po-Yao Huang and Russell Howes and Vasu Sharma and Shang-Wen Li and Gargi Ghosh and Luke Zettlemoyer and Christoph Feichtenhofer},
41
+ year={2023},
42
+ eprint={2309.16671},
43
+ archivePrefix={arXiv},
44
+ primaryClass={cs.CV}
45
+ }
46
+ ```
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 49407,
3
+ "<|startoftext|>": 49406
4
+ }
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPModel"
4
+ ],
5
+ "initializer_factor": 1.0,
6
+ "logit_scale_init_value": 2.6592,
7
+ "model_type": "clip",
8
+ "projection_dim": 1024,
9
+ "text_config": {
10
+ "hidden_size": 1024,
11
+ "intermediate_size": 4096,
12
+ "model_type": "clip_text_model",
13
+ "num_attention_heads": 16,
14
+ "num_hidden_layers": 24,
15
+ "projection_dim": 1024
16
+ },
17
+ "torch_dtype": "float32",
18
+ "transformers_version": "4.34.0",
19
+ "vision_config": {
20
+ "hidden_size": 1280,
21
+ "intermediate_size": 5120,
22
+ "model_type": "clip_vision_model",
23
+ "num_attention_heads": 16,
24
+ "num_hidden_layers": 32,
25
+ "patch_size": 14,
26
+ "projection_dim": 1024
27
+ }
28
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
metaclip_h14_fullcc2.5b.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b790ce09d92080471aaa8718293126169f9d07b8147b9677bb2f14ddf8d9fff4
3
+ size 3944704310
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6173e1a29c7b449ce5ebe788936625c909b4d9059eb89bc4ec80bbdc00bc6571
3
+ size 3944549372
preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.48145466,
13
+ 0.4578275,
14
+ 0.40821073
15
+ ],
16
+ "image_processor_type": "CLIPImageProcessor",
17
+ "image_std": [
18
+ 0.26862954,
19
+ 0.26130258,
20
+ 0.27577711
21
+ ],
22
+ "processor_class": "CLIPProcessor",
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1930613c95917df0af9d07d368a20c68a181bc9a8821ac145648f0f7fcc095e5
3
+ size 3944739266
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|startoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "49406": {
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": false
11
+ },
12
+ "49407": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": false
19
+ }
20
+ },
21
+ "additional_special_tokens": [],
22
+ "bos_token": "<|startoftext|>",
23
+ "clean_up_tokenization_spaces": true,
24
+ "do_lower_case": true,
25
+ "eos_token": "<|endoftext|>",
26
+ "errors": "replace",
27
+ "model_max_length": 77,
28
+ "pad_token": "<|endoftext|>",
29
+ "processor_class": "CLIPProcessor",
30
+ "tokenizer_class": "CLIPTokenizer",
31
+ "tokenizer_file": "/Users/georgebredis/.cache/huggingface/hub/models--openai--clip-vit-base-patch32/snapshots/e6a30b603a447e251fdaca1c3056b2a16cdfebeb/tokenizer.json",
32
+ "unk_token": "<|endoftext|>"
33
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff