File size: 3,439 Bytes
aeb3819
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import json
import sys, os, os.path as osp
import asyncio

import requests
import fire
import pandas as pd
from random import random
from concurrent.futures import ProcessPoolExecutor


def ytb_download(uid, url, json_info, output_dir="ytb_videos/"):
    
    os.makedirs(output_dir, exist_ok=True)
    # uid = url.split("?v=")[-1]

    video_path = osp.join(output_dir, f"{uid}.mp4")
    meta_path = osp.join(output_dir, f"{uid}.json")
    if osp.exists(video_path) and osp.exists(meta_path):
        print(f"{uid} already downloaded.")
        return 0

    if osp.exists(video_path):
        print(f"[video] {uid} already downloaded.")
    else:
        with requests.get(url) as resp:
            if resp.status_code != 200:
                print(f"{uid} failed to fetch.")
                return -1
            print(f"downloading {uid}: {url} to {output_dir}")
            open(video_path, "wb").write(resp.content)
    
    if not osp.exists(meta_path):
        with open(osp.join(output_dir, f"{uid}.json"), "w") as fp:
            json.dump(json_info, fp, indent=2)
    return 0
    


async def main(csv_path, max_workers=2, shards=0, total=-1, limit=False):
    PPE = ProcessPoolExecutor(max_workers=max_workers)
    loop = asyncio.get_event_loop()

    df = pd.read_csv(csv_path)
    output_dir = csv_path.split(".")[0]

    tasks = []
    data_list = df.iterrows()
    
    if total > 0:
        data_list = list(df.iterrows())
        chunk = len(data_list) // total
        begin_idx = shards * chunk
        end_idx = (shards + 1) * chunk
        if shards == total - 1:
            end_idx = len(data_list)
        data_list = data_list[begin_idx:end_idx]
        print(f"Downloading total {len(data_list)} videos")
    
    for idx, (index, row) in enumerate(data_list):
        uid = row["videoid"]
        url = row["contentUrl"]

        # name,page_idx,page_dir,duration
        try:
            json_info = {
                "name": row["name"],
                "url": url,
                # "page_idx": row["page_idx"],
                # "page_dir": row["page_dir"],
                "duration": row["duration"],
            }
        except KeyError as e:
            print(row)
            print(row.keys())
            print(e)
            exit(0)

        tasks.append(
            loop.run_in_executor(PPE, ytb_download, uid, url, json_info, output_dir)
        )
        if idx >= 20 and limit:
            break
    res = await asyncio.gather(*tasks)

    print(f"[{sum(res)} / {len(res)}]")
    
    output_log = f"log_{output_dir}.json"
    
    key = "total"
    if total is not None:
        key = f"{shards}-of-{total}"
    
    json_output = {}
    if osp.exists(output_log):
        with open(output_log, "r") as fp:
            json_output = json.load(fp)
    
    json_output[key] = f"[{sum(res)} / {len(res)}]"    
    
    with open(output_log, "w") as fp:
        json.dump(json_output, fp, indent=2)


def entry(csv="panda70m_testing.csv", shards=0, total=-1, limit=False):
    asyncio.run(main(csv, shards=shards, total=total, limit=limit))


if __name__ == "__main__":
    fire.Fire(entry)
    # url = "https://ak.picdn.net/shutterstock/videos/1053841541/preview/stock-footage-travel-blogger-shoot-a-story-on-top-of-mountains-young-man-holds-camera-in-forest.mp4"
    # uid = "1053841541"
    # json_info = {}
    # res = ytb_download(uid, url, json_info, output_dir="test/")
    # print(res)