albertvillanova HF staff commited on
Commit
c9f4dd8
1 Parent(s): 92f6b6f

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (0c6f27ab3a54d9923a8951116e451770f5a2b6d7)
- Delete loading script (8f3ac6e2f1ef58f0392afdc29a1867005dd3e13f)
- Delete loading script auxiliary file (a2f20bce3c6ef88461b3e07a1f37be7ba053d0aa)
- Delete loading script auxiliary file (94cfda1afa272e7264633790ebd09e026aa2c096)

README.md CHANGED
@@ -30,16 +30,25 @@ dataset_info:
30
  dtype: string
31
  splits:
32
  - name: train
33
- num_bytes: 96225611
34
  num_examples: 100000
35
  - name: validation
36
- num_bytes: 1749751
37
  num_examples: 2000
38
  - name: test
39
- num_bytes: 1609306
40
  num_examples: 2000
41
- download_size: 100769638
42
- dataset_size: 99584668
 
 
 
 
 
 
 
 
 
43
  ---
44
  # Dataset Card for "code_x_glue_tc_text_to_code"
45
 
 
30
  dtype: string
31
  splits:
32
  - name: train
33
+ num_bytes: 96225531
34
  num_examples: 100000
35
  - name: validation
36
+ num_bytes: 1749743
37
  num_examples: 2000
38
  - name: test
39
+ num_bytes: 1609298
40
  num_examples: 2000
41
+ download_size: 34258354
42
+ dataset_size: 99584572
43
+ configs:
44
+ - config_name: default
45
+ data_files:
46
+ - split: train
47
+ path: data/train-*
48
+ - split: validation
49
+ path: data/validation-*
50
+ - split: test
51
+ path: data/test-*
52
  ---
53
  # Dataset Card for "code_x_glue_tc_text_to_code"
54
 
code_x_glue_tc_text_to_code.py DELETED
@@ -1,69 +0,0 @@
1
- import json
2
- from typing import List
3
-
4
- import datasets
5
-
6
- from .common import Child
7
- from .generated_definitions import DEFINITIONS
8
-
9
-
10
- _DESCRIPTION = """We use concode dataset which is a widely used code generation dataset from Iyer's EMNLP 2018 paper Mapping Language to Code in Programmatic Context. See paper for details."""
11
- _CITATION = """@article{iyer2018mapping,
12
- title={Mapping language to code in programmatic context},
13
- author={Iyer, Srinivasan and Konstas, Ioannis and Cheung, Alvin and Zettlemoyer, Luke},
14
- journal={arXiv preprint arXiv:1808.09588},
15
- year={2018}
16
- }"""
17
-
18
-
19
- class CodeXGlueTcTextToCodeImpl(Child):
20
- _DESCRIPTION = _DESCRIPTION
21
- _CITATION = _CITATION
22
-
23
- _FEATURES = {
24
- "id": datasets.Value("int32"), # Index of the sample
25
- "nl": datasets.Value("string"), # The natural language description of the task
26
- "code": datasets.Value("string"), # The programming source code for the task
27
- }
28
-
29
- _SUPERVISED_KEYS = ["code"]
30
-
31
- SPLITS = {"train": datasets.Split.TRAIN, "dev": datasets.Split.VALIDATION, "test": datasets.Split.TEST}
32
-
33
- def generate_urls(self, split_name):
34
- yield "data", f"concode/{split_name}.json"
35
-
36
- def _generate_examples(self, split_name, file_paths):
37
- with open(file_paths["data"], encoding="utf-8") as f:
38
- for idx, line in enumerate(f):
39
- entry = json.loads(line)
40
- entry["id"] = idx
41
- yield idx, entry
42
-
43
-
44
- CLASS_MAPPING = {
45
- "CodeXGlueTcTextToCode": CodeXGlueTcTextToCodeImpl,
46
- }
47
-
48
-
49
- class CodeXGlueTcTextToCode(datasets.GeneratorBasedBuilder):
50
- BUILDER_CONFIG_CLASS = datasets.BuilderConfig
51
- BUILDER_CONFIGS = [
52
- datasets.BuilderConfig(name=name, description=info["description"]) for name, info in DEFINITIONS.items()
53
- ]
54
-
55
- def _info(self):
56
- name = self.config.name
57
- info = DEFINITIONS[name]
58
- if info["class_name"] in CLASS_MAPPING:
59
- self.child = CLASS_MAPPING[info["class_name"]](info)
60
- else:
61
- raise RuntimeError(f"Unknown python class for dataset configuration {name}")
62
- ret = self.child._info()
63
- return ret
64
-
65
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
66
- return self.child._split_generators(dl_manager=dl_manager)
67
-
68
- def _generate_examples(self, split_name, file_paths):
69
- return self.child._generate_examples(split_name, file_paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
common.py DELETED
@@ -1,75 +0,0 @@
1
- from typing import List
2
-
3
- import datasets
4
-
5
-
6
- # Citation, taken from https://github.com/microsoft/CodeXGLUE
7
- _DEFAULT_CITATION = """@article{CodeXGLUE,
8
- title={CodeXGLUE: A Benchmark Dataset and Open Challenge for Code Intelligence},
9
- year={2020},}"""
10
-
11
-
12
- class Child:
13
- _DESCRIPTION = None
14
- _FEATURES = None
15
- _CITATION = None
16
- SPLITS = {"train": datasets.Split.TRAIN}
17
- _SUPERVISED_KEYS = None
18
-
19
- def __init__(self, info):
20
- self.info = info
21
-
22
- def homepage(self):
23
- return self.info["project_url"]
24
-
25
- def _info(self):
26
- # This is the description that will appear on the datasets page.
27
- return datasets.DatasetInfo(
28
- description=self.info["description"] + "\n\n" + self._DESCRIPTION,
29
- features=datasets.Features(self._FEATURES),
30
- homepage=self.homepage(),
31
- citation=self._CITATION or _DEFAULT_CITATION,
32
- supervised_keys=self._SUPERVISED_KEYS,
33
- )
34
-
35
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
36
- SPLITS = self.SPLITS
37
- _URL = self.info["raw_url"]
38
- urls_to_download = {}
39
- for split in SPLITS:
40
- if split not in urls_to_download:
41
- urls_to_download[split] = {}
42
-
43
- for key, url in self.generate_urls(split):
44
- if not url.startswith("http"):
45
- url = _URL + "/" + url
46
- urls_to_download[split][key] = url
47
-
48
- downloaded_files = {}
49
- for k, v in urls_to_download.items():
50
- downloaded_files[k] = dl_manager.download_and_extract(v)
51
-
52
- return [
53
- datasets.SplitGenerator(
54
- name=SPLITS[k],
55
- gen_kwargs={"split_name": k, "file_paths": downloaded_files[k]},
56
- )
57
- for k in SPLITS
58
- ]
59
-
60
- def check_empty(self, entries):
61
- all_empty = all([v == "" for v in entries.values()])
62
- all_non_empty = all([v != "" for v in entries.values()])
63
-
64
- if not all_non_empty and not all_empty:
65
- raise RuntimeError("Parallel data files should have the same number of lines.")
66
-
67
- return all_empty
68
-
69
-
70
- class TrainValidTestChild(Child):
71
- SPLITS = {
72
- "train": datasets.Split.TRAIN,
73
- "valid": datasets.Split.VALIDATION,
74
- "test": datasets.Split.TEST,
75
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3da8b3957ef33fa28cdffd5878ce6f1d35d2b88e02efe622008673e459e3ca73
3
+ size 526047
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52922bd26d5a10042cce4b716310c5b34807cdf0328c8727d2384ba5d64001c0
3
+ size 33097826
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27fb25c6d2cbd2e86726b8093d472086059979866c9e6f02b2896cd60cd4b167
3
+ size 634481
generated_definitions.py DELETED
@@ -1,12 +0,0 @@
1
- DEFINITIONS = {
2
- "default": {
3
- "class_name": "CodeXGlueTcTextToCode",
4
- "dataset_type": "Text-Code",
5
- "description": "CodeXGLUE text-to-code dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Text-Code/text-to-code",
6
- "dir_name": "text-to-code",
7
- "name": "default",
8
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Text-Code/text-to-code",
9
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Text-Code/text-to-code/dataset",
10
- "sizes": {"test": 2000, "train": 100000, "validation": 2000},
11
- }
12
- }