Datasets:

Modalities:
Text
Languages:
code
ArXiv:
Libraries:
Datasets
License:
taisazero commited on
Commit
f875f18
1 Parent(s): 0b4fbbd

added data loader

Browse files
Files changed (1) hide show
  1. code_clippy_github.py +213 -0
code_clippy_github.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Special thanks to @lvwerra -- we reference his repository here: https://huggingface.co/datasets/lvwerra/github-code/
17
+ """Code Clippy Github Code dataset."""
18
+
19
+ import os
20
+
21
+
22
+ import datasets
23
+ from huggingface_hub import HfApi, HfFolder
24
+ from datasets.data_files import DataFilesDict
25
+
26
+ import gzip
27
+ import json
28
+
29
+ _REPO_NAME = "CodedotAI/code_clippy_github"
30
+
31
+ _LANG_TO_EXTENSION = {
32
+ "C": [".c"],
33
+ "C#": [".cs"],
34
+ "C++": [".cpp"],
35
+ "CSS": [".css"],
36
+ "Dart" : [".dart"],
37
+ "GO": [".go"],
38
+ "HTML":[".html"],
39
+ "Java": [".java"],
40
+ "JavaScript": [".js"],
41
+ "Jupyter Notebooks (Python)": [".ipynb"],
42
+ "Kotlin" : [".kt"],
43
+ "Lisp" : [".lisp"],
44
+ "Matlab" : [".m"],
45
+ "PHP": [".php"],
46
+ "Perl": [".pl"],
47
+ "Python": [".py"],
48
+ "R" : [".r"],
49
+ "Ruby": [".rb"],
50
+ "Rust": [".rs"],
51
+ "SQL": [".sql"],
52
+ "Shell": [".sh"],
53
+ "Swift" : [".swift"],
54
+ "TypeScript": [".ts"],
55
+ }
56
+
57
+ _LICENSES = [
58
+ 'mit',
59
+ 'apache-2.0',
60
+ 'gpl-2.0',
61
+ 'gpl-3.0',
62
+ 'bsd-3-clause',
63
+ 'bsd-2-clause',
64
+ 'unlicense',
65
+ 'apacheagpl-3.0',
66
+ 'lgpl-3.0',
67
+ 'cc0-1.0',
68
+ 'epl-1.0',
69
+ 'lgpl-2.1',
70
+ 'mpl-2.0',
71
+ 'isc',
72
+ 'artistic-2.0'
73
+ ]
74
+
75
+ _DESCRIPTION = """\
76
+ The Code Clippy dataset consists of various public codebases from GitHub in 22 programming languages with 23 extensions \
77
+ totalling about 16 TB of data when uncompressed. The dataset was created from the public GitHub dataset on Google BiqQuery.
78
+ """
79
+
80
+ _HOMEPAGE = "https://cloud.google.com/blog/topics/public-datasets/github-on-bigquery-analyze-all-the-open-source-code/"
81
+
82
+
83
+ _EXTENSION_TO_LANG = {}
84
+ for lang in _LANG_TO_EXTENSION:
85
+ for extension in _LANG_TO_EXTENSION[lang]:
86
+ _EXTENSION_TO_LANG[extension] = lang
87
+
88
+
89
+
90
+ _LANG_CONFIGS = ["all"] + list(_LANG_TO_EXTENSION.keys())
91
+ _LICENSE_CONFIGS = ["all"] + _LICENSES
92
+
93
+ class CodeClippyGithubConfig(datasets.BuilderConfig):
94
+ """BuilderConfig for the Code Clippy Github dataset."""
95
+
96
+ def __init__(self, *args, languages=["all"], licenses=["all"], **kwargs):
97
+ """BuilderConfig for the Code Clippy Github dataset.
98
+ Args:
99
+ languages (:obj:`List[str]`): List of languages to load.
100
+ licenses (:obj:`List[str]`): List of licenses to load.
101
+ **kwargs: keyword arguments forwarded to super.
102
+ """
103
+ super().__init__(
104
+ *args,
105
+ name="+".join(languages)+"-"+"+".join(licenses),
106
+ **kwargs,
107
+ )
108
+
109
+ languages = set(languages)
110
+ licenses = set(licenses)
111
+
112
+ assert all([language in _LANG_CONFIGS for language in languages]), f"Language not in {_LANG_CONFIGS}."
113
+ assert all([license in _LICENSE_CONFIGS for license in licenses]), f"License not in {_LICENSE_CONFIGS}."
114
+
115
+ if "all" in languages:
116
+ assert len(languages)==1, "Passed 'all' together with other languages."
117
+ self.filter_languages = False
118
+ else:
119
+ self.filter_languages = True
120
+
121
+ if "all" in licenses:
122
+ assert len(licenses)==1, "Passed 'all' together with other licenses."
123
+ self.filter_licenses = False
124
+ else:
125
+ self.filter_licenses = True
126
+
127
+ self.languages = set(languages)
128
+ self.licenses = set(licenses)
129
+
130
+
131
+
132
+ class CodeClippyGithub(datasets.GeneratorBasedBuilder):
133
+ """Code Clippy Github dataset."""
134
+
135
+ VERSION = datasets.Version("1.0.0")
136
+
137
+ BUILDER_CONFIG_CLASS = CodeClippyGithubConfig
138
+ BUILDER_CONFIGS = [CodeClippyGithubConfig(languages=[lang], licenses=[license]) for lang in _LANG_CONFIGS
139
+ for license in _LICENSE_CONFIGS]
140
+ DEFAULT_CONFIG_NAME = "all-all"
141
+
142
+
143
+ def _info(self):
144
+ return datasets.DatasetInfo(
145
+ description=_DESCRIPTION,
146
+ features=datasets.Features({"code_text": datasets.Value("string"),
147
+ "repo_name": datasets.Value("string"),
148
+ "path": datasets.Value("string"),
149
+ "language": datasets.Value("string"),
150
+ "license": datasets.Value("string"),
151
+ "size": datasets.Value("int32")}),
152
+ supervised_keys=None,
153
+ homepage=_HOMEPAGE,
154
+ license="Multiple: see the 'license' field of each sample.",
155
+
156
+ )
157
+
158
+ def _split_generators(self, dl_manager):
159
+
160
+ hfh_dataset_info = HfApi(datasets.config.HF_ENDPOINT).dataset_info(
161
+ _REPO_NAME,
162
+ timeout=100.0,
163
+ )
164
+
165
+ patterns = datasets.data_files.get_patterns_in_dataset_repository(hfh_dataset_info)
166
+ data_files = datasets.data_files.DataFilesDict.from_hf_repo(
167
+ patterns,
168
+ dataset_info=hfh_dataset_info,
169
+ )
170
+
171
+ files = dl_manager.download_and_extract(data_files["train"])
172
+ return [
173
+ datasets.SplitGenerator(
174
+ name=datasets.Split.TRAIN,
175
+ gen_kwargs={
176
+ "files": files,
177
+ },
178
+ ),
179
+ ]
180
+
181
+ def _generate_examples(self, files):
182
+ key = 0
183
+ for file_idx, file in enumerate(files):
184
+ with gzip.open(file, "rb") as f:
185
+ #parquet_file = pq.ParquetFile(f)
186
+ uncompressed_data = f.readlines()
187
+
188
+ for batch_idx, code_base in enumerate(uncompressed_data):
189
+ j_dict = json.loads(code_base.decode('utf-8'))
190
+ # pa_table = pa.Table.from_batches([record_batch])
191
+
192
+
193
+ lang = lang_from_name(j_dict['path'])
194
+ license = j_dict["license"]
195
+
196
+ if self.config.filter_languages and not lang in self.config.languages:
197
+ continue
198
+ if self.config.filter_licenses and not license in self.config.licenses:
199
+ continue
200
+
201
+ yield key, {"code": j_dict['content'],
202
+ "repo_name": j_dict['repo_name'],
203
+ "path": j_dict['path'],
204
+ "license": license,
205
+ "language": lang,
206
+ "size": int(j_dict['f0_'])}
207
+ key += 1
208
+
209
+
210
+ def lang_from_name(name):
211
+ for extension in _EXTENSION_TO_LANG:
212
+ if name.endswith(extension):
213
+ return _EXTENSION_TO_LANG[extension]