Datasets:

License:
ColinC5 commited on
Commit
c669fd1
1 Parent(s): 713557d

Move MP3 submission here to avoid LFS quota on GitHub

Browse files
Files changed (2) hide show
  1. ccanonac.json.gz +3 -0
  2. ccanonac.py +66 -0
ccanonac.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b1af2337a0d31771ae8aac3cbbb4704dd0826dcd73a6430ca92120156e4e0f4
3
+ size 3588428
ccanonac.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from urlextract import URLExtract
2
+ import requests
3
+ import json
4
+ import gzip
5
+ import re
6
+
7
+ utid = 'ccanonac'
8
+ base = {'model' : 'https://huggingface.co/', 'data' : 'https://huggingface.co/datasets/', 'source' : 'https://'}
9
+ post_main = '/raw/main/README.md'
10
+ post_master = '/raw/master/README.md'
11
+ postGH_master = '/raw/master/README.md'
12
+ postGH_main = '/raw/main/README.md'
13
+
14
+ extU = URLExtract()
15
+ DOIpattern = r'\b10\.\d{4,9}/[-.;()/:\w]+'
16
+ BIBpattern = r'@\w+\{(,|[^[},]|},)+}(,*\s*)\n}'
17
+
18
+ def extractURLs(c):
19
+ res = extU.find_urls(c)
20
+ return res
21
+
22
+ def extractDOIs(c):
23
+ res = re.findall(DOIpattern, c)
24
+ return res
25
+
26
+ def extractBIBs(c):
27
+ it = re.finditer(BIBpattern, c, re.IGNORECASE)
28
+ res = [i.group(0) for i in it]
29
+ return res
30
+
31
+ fo = gzip.open(f"output/{utid}.json.gz", 'w')
32
+
33
+ def run(tp):
34
+ with open(f"input/{utid}_{tp}", 'r') as f:
35
+ for line in f:
36
+ line = line.strip().partition('#')[0]
37
+ if tp == 'source':
38
+ (npapers, line) = line.split(';')
39
+ post0 = postGH_master
40
+ else:
41
+ post0 = post_main
42
+ url = f"{base[tp]}{line}{post0}"
43
+ print(url)
44
+ response = requests.get(url)
45
+
46
+ # try other urls after errors
47
+ if response.status_code != 200:
48
+ print(response.status_code)
49
+ post0 = postGH_main if tp == 'source' else post_master
50
+ url = f"{base[tp]}{line}{post0}"
51
+ response = requests.get(url)
52
+ if response.status_code != 200:
53
+ print(response.status_code)
54
+ continue
55
+
56
+ content = response.text
57
+ urls = extractURLs(content)
58
+ dois = extractDOIs(content)
59
+ bibs = extractBIBs(content)
60
+ res = {'ID' : line, 'type' : tp, 'url' : url, 'content' : content.replace("\n", " "), 'links' : urls, 'dois' : dois, 'bibs' : bibs}
61
+ out = json.dumps(res, ensure_ascii=False)
62
+ fo.write((out+"\n").encode())
63
+
64
+ run('model')
65
+ run('data')
66
+ run('source')