Datasets:

License:
File size: 1,935 Bytes
c669fd1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
from urlextract import URLExtract
import requests
import json
import gzip
import re

utid = 'ccanonac'
base = {'model' : 'https://huggingface.co/', 'data' : 'https://huggingface.co/datasets/', 'source' : 'https://'}
post_main = '/raw/main/README.md'
post_master = '/raw/master/README.md'
postGH_master = '/raw/master/README.md'
postGH_main = '/raw/main/README.md'

extU = URLExtract()
DOIpattern = r'\b10\.\d{4,9}/[-.;()/:\w]+'
BIBpattern = r'@\w+\{(,|[^[},]|},)+}(,*\s*)\n}'

def extractURLs(c):
  res = extU.find_urls(c)
  return res

def extractDOIs(c):
  res = re.findall(DOIpattern, c)
  return res

def extractBIBs(c):
  it = re.finditer(BIBpattern, c, re.IGNORECASE)
  res = [i.group(0) for i in it]
  return res

fo = gzip.open(f"output/{utid}.json.gz", 'w')

def run(tp):
  with open(f"input/{utid}_{tp}", 'r') as f:
    for line in f:
      line = line.strip().partition('#')[0]
      if tp == 'source':
        (npapers, line) = line.split(';')
        post0 = postGH_master
      else:
        post0 = post_main
      url = f"{base[tp]}{line}{post0}"
      print(url)
      response = requests.get(url)
      
      # try other urls after errors
      if response.status_code != 200:
        print(response.status_code)
        post0 = postGH_main if tp == 'source' else post_master
        url = f"{base[tp]}{line}{post0}"
        response = requests.get(url)
        if response.status_code != 200:
          print(response.status_code)
          continue
      
      content = response.text
      urls = extractURLs(content)
      dois = extractDOIs(content)
      bibs = extractBIBs(content)
      res = {'ID' : line, 'type' : tp, 'url' : url, 'content' : content.replace("\n", " "), 'links' : urls, 'dois' : dois, 'bibs' : bibs}
      out = json.dumps(res, ensure_ascii=False)
      fo.write((out+"\n").encode())

run('model')
run('data')
run('source')