Datasets:

License:
File size: 3,926 Bytes
8711427
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import json
import re
import requests
import gzip
from urlextract import URLExtract
from pathlib import Path

# Define user ID and base URLs for Hugging Face and GitHub
utid = 'fchernow'
base = {
    'model': 'https://huggingface.co/',
    'data': 'https://huggingface.co/datasets/',
    'source': 'https://'
}
post_hf = '/raw/main/README.md'
post_gh = '/blob/main/README.md'  # or '/blob/master/README.md' if necessary

# Initialize URL extractor and DOI pattern
extU = URLExtract()
DOIpattern = r'\b(10\.\d{4,9}/[-._;()/:A-Z0-9]+)\b'
# BibTeX pattern to match entries like "@article{...}"
BibTeXPattern = r'@\w+\{[^}]+\}'

# Set a timeout for requests (in seconds)
REQUEST_TIMEOUT = 10

# Functions to extract URLs, DOIs, and BibTeX entries
def extractURLs(content):
    return extU.find_urls(content)

def extractDOIs(content):
    return re.findall(DOIpattern, content, re.IGNORECASE)

def extractBibTeX(content):
    return re.findall(BibTeXPattern, content, re.DOTALL)

# Output file for compressed JSON data
output_path = f"output_data/{utid}.json.gz"
Path("output_data").mkdir(parents=True, exist_ok=True)  # Ensure the directory exists

# Variable to track the number of successful entries
successful_entries = 0

# Open output file for writing
with gzip.open(output_path, 'wt', encoding='utf-8') as fo:

    def run(tp):
        global successful_entries  # Reference the successful_entries variable in the global scope
        # Determine which post suffix to use
        post_suffix = post_hf if tp != 'source' else post_gh

        # Open the input file based on type (model, data, or source)
        with open(f"{utid}_{tp}.txt", 'r') as f:
            line_count = 0  # Track the number of lines processed
            for line in f:
                line = line.strip()
                if not line:
                    continue
                
                # Handle GitHub entries in 'source'
                if tp == 'source':
                    if ';' in line:
                        npapers, line = line.split(';')
                    else:
                        continue

                # Construct the full URL for README retrieval
                url = base[tp] + f"{line}{post_suffix}"
                
                # Fetch README content with timeout
                try:
                    r = requests.get(url, timeout=REQUEST_TIMEOUT)
                    r.raise_for_status()
                    content = r.text
                    status = "success"
                except requests.RequestException:
                    content = ""
                    status = "failed"

                # Extract URLs, DOIs, and BibTeX entries from the README content
                urls = extractURLs(content)
                dois = extractDOIs(content)
                bibs = extractBibTeX(content)

                # Write the entry to output file, regardless of status
                res = {
                    'id': line,
                    'type': tp,
                    'url': url,
                    'content': content.replace("\n", " ") if content else "",
                    'links': urls,
                    'dois': dois,
                    'bibs': bibs,
                    'status': status
                }
                out = json.dumps(res, ensure_ascii=False)
                fo.write(out + "\n")

                # If the entry was successful, increment the counter
                if status == "success":
                    successful_entries += 1

                line_count += 1
            
            print(f"Processed {line_count} lines in {tp} file.")

    # Run for each type: model, data, and source
    run('model')
    run('data')
    run('source')

print(f"Data successfully saved to {output_path} with {successful_entries} successful entries.")