Datasets:

Formats:
parquet
ArXiv:
Libraries:
Datasets
Dask
ARKseal commited on
Commit
ce42599
1 Parent(s): d641ef9

Upload filter_wit.py

Browse files
Files changed (1) hide show
  1. filter_wit.py +144 -0
filter_wit.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ from glob import glob
3
+ from io import BytesIO
4
+ from pathlib import Path
5
+
6
+ import clip
7
+ import pandas as pd
8
+ import torch
9
+ import ujson
10
+ import webdataset as wds
11
+ from PIL import Image
12
+ from sentence_transformers import SentenceTransformer
13
+ from torchvision.transforms import (CenterCrop, Compose, InterpolationMode,
14
+ Normalize, Resize, ToTensor)
15
+ from tqdm import tqdm
16
+
17
+ torch.multiprocessing.set_sharing_strategy('file_system')
18
+
19
+
20
+ def load_image(jpg):
21
+ return jpg, Image.open(BytesIO(jpg))
22
+
23
+
24
+ def load_json(json):
25
+ return ujson.loads(json)
26
+
27
+
28
+ load_preprocess_map = {
29
+ 'jpg': load_image,
30
+ 'json': load_json,
31
+ }
32
+
33
+
34
+ def convert_image_to_rgb(im):
35
+ return im.convert("RGB")
36
+
37
+
38
+ # taken from https://github.com/openai/CLIP
39
+ image_transforms = Compose([
40
+ Resize(224, interpolation=InterpolationMode.BICUBIC),
41
+ CenterCrop(224),
42
+ convert_image_to_rgb,
43
+ ToTensor(),
44
+ Normalize((0.48145466, 0.4578275, 0.40821073),
45
+ (0.26862954, 0.26130258, 0.27577711)),
46
+ ])
47
+
48
+
49
+ def image_preprocess(jpgs):
50
+ jpg_orig, im = jpgs
51
+ im = image_transforms(im)
52
+ return jpg_orig, im
53
+
54
+
55
+ texts_to_check = [
56
+ 'page_title',
57
+ 'section_title',
58
+ 'hierarchical_section_title',
59
+ 'caption',
60
+ 'caption_attribution_description',
61
+ 'caption_alt_text_description',
62
+ 'context_page_description',
63
+ 'context_section_description'
64
+ ]
65
+
66
+
67
+ def meta_preprocess(meta: dict):
68
+ return {
69
+ 'captions': [meta[text] for text in texts_to_check if text in meta and meta[text]],
70
+ 'orig': meta
71
+ }
72
+
73
+
74
+ mclip_preprocess_map = {
75
+ 'jpg': image_preprocess,
76
+ 'json': meta_preprocess
77
+ }
78
+
79
+
80
+ def log(msg):
81
+ print(msg, end='\n\n\n\n')
82
+ return msg
83
+
84
+
85
+ def func(wds_dataset_str, device=None, batch_size=4, **kwargs):
86
+ nocap = 0
87
+ if device is None:
88
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
89
+
90
+ print('Loading models:')
91
+ model, _ = clip.load('ViT-B/32', device=device, jit=False)
92
+ mclip = SentenceTransformer(
93
+ 'sentence-transformers/clip-ViT-B-32-multilingual-v1', device=device)
94
+ cosine_similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
95
+ print('Finished loading models')
96
+
97
+ ds = wds.WebDataset(wds_dataset_str, shardshuffle=False).map_dict(
98
+ **load_preprocess_map).map_dict(**mclip_preprocess_map).to_tuple('jpg', 'json').batched(batch_size)
99
+ dl = wds.WebLoader(ds, batch_size=None, shuffle=False, **kwargs)
100
+
101
+ writer = wds.ShardWriter('%05d.tar', 10000)
102
+ for i, batch in enumerate(tqdm(dl)):
103
+ try:
104
+ imss, metas = batch
105
+ orig_jpgs, ims = zip(*imss)
106
+ ims = torch.stack(ims)
107
+
108
+ captionss = [meta['captions'] for meta in metas]
109
+
110
+ with torch.no_grad():
111
+ image_features = torch.unbind(
112
+ model.encode_image(ims.to(device)).float())
113
+ text_featuress = [mclip.encode(captions, convert_to_tensor=True).to(
114
+ device).float() for captions in captionss]
115
+
116
+ similarities = [
117
+ cosine_similarity(image_feature.repeat(
118
+ len(text_features), 1), text_features).tolist()
119
+ for image_feature, text_features in zip(image_features, text_featuress)
120
+ ]
121
+
122
+ captionss = [[cap for cap, sim in zip(
123
+ captions, similarity) if sim > 0.26] for captions, similarity in zip(captionss, similarities)]
124
+
125
+ for orig_jpg, captions, meta in zip(orig_jpgs, captionss, metas):
126
+ if len(captions) == 0:
127
+ nocap += 1
128
+ tqdm.write(f'No captions: {nocap}')
129
+ continue
130
+
131
+ sample = {
132
+ '__key__': f'{writer.count:08}',
133
+ 'jpg': orig_jpg,
134
+ 'txt': ''.join(captions),
135
+ 'json': ujson.dumps(meta['orig'])
136
+ }
137
+ writer.write(sample)
138
+ if i % 25 == 0:
139
+ gc.collect()
140
+ torch.cuda.empty_cache()
141
+ except Exception as e:
142
+ print(f'Error: {e}')
143
+ raise e
144
+ writer.close()