Datasets:

License:
fchernow commited on
Commit
8711427
1 Parent(s): 1dc6036

Resubmitting MP3 - deleting fork on Github

Browse files
Files changed (1) hide show
  1. fchernow.py +112 -0
fchernow.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ import requests
4
+ import gzip
5
+ from urlextract import URLExtract
6
+ from pathlib import Path
7
+
8
+ # Define user ID and base URLs for Hugging Face and GitHub
9
+ utid = 'fchernow'
10
+ base = {
11
+ 'model': 'https://huggingface.co/',
12
+ 'data': 'https://huggingface.co/datasets/',
13
+ 'source': 'https://'
14
+ }
15
+ post_hf = '/raw/main/README.md'
16
+ post_gh = '/blob/main/README.md' # or '/blob/master/README.md' if necessary
17
+
18
+ # Initialize URL extractor and DOI pattern
19
+ extU = URLExtract()
20
+ DOIpattern = r'\b(10\.\d{4,9}/[-._;()/:A-Z0-9]+)\b'
21
+ # BibTeX pattern to match entries like "@article{...}"
22
+ BibTeXPattern = r'@\w+\{[^}]+\}'
23
+
24
+ # Set a timeout for requests (in seconds)
25
+ REQUEST_TIMEOUT = 10
26
+
27
+ # Functions to extract URLs, DOIs, and BibTeX entries
28
+ def extractURLs(content):
29
+ return extU.find_urls(content)
30
+
31
+ def extractDOIs(content):
32
+ return re.findall(DOIpattern, content, re.IGNORECASE)
33
+
34
+ def extractBibTeX(content):
35
+ return re.findall(BibTeXPattern, content, re.DOTALL)
36
+
37
+ # Output file for compressed JSON data
38
+ output_path = f"output_data/{utid}.json.gz"
39
+ Path("output_data").mkdir(parents=True, exist_ok=True) # Ensure the directory exists
40
+
41
+ # Variable to track the number of successful entries
42
+ successful_entries = 0
43
+
44
+ # Open output file for writing
45
+ with gzip.open(output_path, 'wt', encoding='utf-8') as fo:
46
+
47
+ def run(tp):
48
+ global successful_entries # Reference the successful_entries variable in the global scope
49
+ # Determine which post suffix to use
50
+ post_suffix = post_hf if tp != 'source' else post_gh
51
+
52
+ # Open the input file based on type (model, data, or source)
53
+ with open(f"{utid}_{tp}.txt", 'r') as f:
54
+ line_count = 0 # Track the number of lines processed
55
+ for line in f:
56
+ line = line.strip()
57
+ if not line:
58
+ continue
59
+
60
+ # Handle GitHub entries in 'source'
61
+ if tp == 'source':
62
+ if ';' in line:
63
+ npapers, line = line.split(';')
64
+ else:
65
+ continue
66
+
67
+ # Construct the full URL for README retrieval
68
+ url = base[tp] + f"{line}{post_suffix}"
69
+
70
+ # Fetch README content with timeout
71
+ try:
72
+ r = requests.get(url, timeout=REQUEST_TIMEOUT)
73
+ r.raise_for_status()
74
+ content = r.text
75
+ status = "success"
76
+ except requests.RequestException:
77
+ content = ""
78
+ status = "failed"
79
+
80
+ # Extract URLs, DOIs, and BibTeX entries from the README content
81
+ urls = extractURLs(content)
82
+ dois = extractDOIs(content)
83
+ bibs = extractBibTeX(content)
84
+
85
+ # Write the entry to output file, regardless of status
86
+ res = {
87
+ 'id': line,
88
+ 'type': tp,
89
+ 'url': url,
90
+ 'content': content.replace("\n", " ") if content else "",
91
+ 'links': urls,
92
+ 'dois': dois,
93
+ 'bibs': bibs,
94
+ 'status': status
95
+ }
96
+ out = json.dumps(res, ensure_ascii=False)
97
+ fo.write(out + "\n")
98
+
99
+ # If the entry was successful, increment the counter
100
+ if status == "success":
101
+ successful_entries += 1
102
+
103
+ line_count += 1
104
+
105
+ print(f"Processed {line_count} lines in {tp} file.")
106
+
107
+ # Run for each type: model, data, and source
108
+ run('model')
109
+ run('data')
110
+ run('source')
111
+
112
+ print(f"Data successfully saved to {output_path} with {successful_entries} successful entries.")