Ligeng-Zhu commited on
Commit
aeb3819
1 Parent(s): 6afd575

Upload main.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. main.py +116 -0
main.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import sys, os, os.path as osp
3
+ import asyncio
4
+
5
+ import requests
6
+ import fire
7
+ import pandas as pd
8
+ from random import random
9
+ from concurrent.futures import ProcessPoolExecutor
10
+
11
+
12
+ def ytb_download(uid, url, json_info, output_dir="ytb_videos/"):
13
+
14
+ os.makedirs(output_dir, exist_ok=True)
15
+ # uid = url.split("?v=")[-1]
16
+
17
+ video_path = osp.join(output_dir, f"{uid}.mp4")
18
+ meta_path = osp.join(output_dir, f"{uid}.json")
19
+ if osp.exists(video_path) and osp.exists(meta_path):
20
+ print(f"{uid} already downloaded.")
21
+ return 0
22
+
23
+ if osp.exists(video_path):
24
+ print(f"[video] {uid} already downloaded.")
25
+ else:
26
+ with requests.get(url) as resp:
27
+ if resp.status_code != 200:
28
+ print(f"{uid} failed to fetch.")
29
+ return -1
30
+ print(f"downloading {uid}: {url} to {output_dir}")
31
+ open(video_path, "wb").write(resp.content)
32
+
33
+ if not osp.exists(meta_path):
34
+ with open(osp.join(output_dir, f"{uid}.json"), "w") as fp:
35
+ json.dump(json_info, fp, indent=2)
36
+ return 0
37
+
38
+
39
+
40
+ async def main(csv_path, max_workers=2, shards=0, total=-1, limit=False):
41
+ PPE = ProcessPoolExecutor(max_workers=max_workers)
42
+ loop = asyncio.get_event_loop()
43
+
44
+ df = pd.read_csv(csv_path)
45
+ output_dir = csv_path.split(".")[0]
46
+
47
+ tasks = []
48
+ data_list = df.iterrows()
49
+
50
+ if total > 0:
51
+ data_list = list(df.iterrows())
52
+ chunk = len(data_list) // total
53
+ begin_idx = shards * chunk
54
+ end_idx = (shards + 1) * chunk
55
+ if shards == total - 1:
56
+ end_idx = len(data_list)
57
+ data_list = data_list[begin_idx:end_idx]
58
+ print(f"Downloading total {len(data_list)} videos")
59
+
60
+ for idx, (index, row) in enumerate(data_list):
61
+ uid = row["videoid"]
62
+ url = row["contentUrl"]
63
+
64
+ # name,page_idx,page_dir,duration
65
+ try:
66
+ json_info = {
67
+ "name": row["name"],
68
+ "url": url,
69
+ # "page_idx": row["page_idx"],
70
+ # "page_dir": row["page_dir"],
71
+ "duration": row["duration"],
72
+ }
73
+ except KeyError as e:
74
+ print(row)
75
+ print(row.keys())
76
+ print(e)
77
+ exit(0)
78
+
79
+ tasks.append(
80
+ loop.run_in_executor(PPE, ytb_download, uid, url, json_info, output_dir)
81
+ )
82
+ if idx >= 20 and limit:
83
+ break
84
+ res = await asyncio.gather(*tasks)
85
+
86
+ print(f"[{sum(res)} / {len(res)}]")
87
+
88
+ output_log = f"log_{output_dir}.json"
89
+
90
+ key = "total"
91
+ if total is not None:
92
+ key = f"{shards}-of-{total}"
93
+
94
+ json_output = {}
95
+ if osp.exists(output_log):
96
+ with open(output_log, "r") as fp:
97
+ json_output = json.load(fp)
98
+
99
+ json_output[key] = f"[{sum(res)} / {len(res)}]"
100
+
101
+ with open(output_log, "w") as fp:
102
+ json.dump(json_output, fp, indent=2)
103
+
104
+
105
+ def entry(csv="panda70m_testing.csv", shards=0, total=-1, limit=False):
106
+ asyncio.run(main(csv, shards=shards, total=total, limit=limit))
107
+
108
+
109
+ if __name__ == "__main__":
110
+ fire.Fire(entry)
111
+ # url = "https://ak.picdn.net/shutterstock/videos/1053841541/preview/stock-footage-travel-blogger-shoot-a-story-on-top-of-mountains-young-man-holds-camera-in-forest.mp4"
112
+ # uid = "1053841541"
113
+ # json_info = {}
114
+ # res = ytb_download(uid, url, json_info, output_dir="test/")
115
+ # print(res)
116
+