parquet-converter commited on
Commit
b5a687a
1 Parent(s): 68f25c6

Update parquet files

Browse files
tldr-docs.jsonl → data/tldr-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f88829c99c75fc2e7ed7ff2181410189fb21243728f6a5062b79553f9b0aa78d
3
- size 105837490
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6eee946721fbb5b064cf730f66c47766fd0f0d939329d1d78d7296ee5f5fcc87
3
+ size 119876
data/tldr-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99aeb1b9f08e38ef9e2402aea7b0ebc38d92b4c46d9ee6823d1f344635bc6eda
3
+ size 1099909
data/tldr-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2258024105323a402c06d2e4b2bdb2f1afb9c60c77b88a40d8a9a4f527574d2
3
+ size 296552
docs/tldr-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:709a1c37ea78618bcf0f2378f2742676a69ec283e2ff3081cabe39dd0f54f465
3
+ size 28536438
tldr-dev.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
tldr-test.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
tldr-train.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
tldr.py DELETED
@@ -1,117 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """CoNaLa dataset."""
16
-
17
- import json
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @article{zhou2022doccoder,
23
- title={DocCoder: Generating Code by Retrieving and Reading Docs},
24
- author={Zhou, Shuyan and Alon, Uri and Xu, Frank F and JIang, Zhengbao and Neubig, Graham},
25
- journal={arXiv preprint arXiv:2207.05987},
26
- year={2022}
27
- }
28
- """
29
-
30
- _DESCRIPTION = """This is the re-split of CoNaLa dataset. For each code snippet in the dev and test set, at least one function is held out from the training set. This split aims at testing a code generation model's capacity in generating unseen functions.
31
- We further make sure that examples from the same StackOverflow post (same question_id before -) are in the same split."""
32
-
33
- _HOMEPAGE = "https://github.com/shuyanzhou/docprompting"
34
- _URLs = {
35
- "docs": "tldr-docs.jsonl",
36
- "data": {"train": "tldr-train.jsonl", "validation": "tldr-dev.jsonl", "test": "tldr-test.jsonl" },
37
- }
38
-
39
- class DocPromptingConala(datasets.GeneratorBasedBuilder):
40
- """TLDR natural language to bash generation dataset."""
41
-
42
- VERSION = datasets.Version("1.1.0")
43
-
44
-
45
- BUILDER_CONFIGS = [
46
- datasets.BuilderConfig(
47
- name="data",
48
- version=datasets.Version("1.1.0"),
49
- description=_DESCRIPTION,
50
- ),
51
- datasets.BuilderConfig(name="docs", version=datasets.Version("1.1.0"), description=_DESCRIPTION),
52
- ]
53
-
54
- DEFAULT_CONFIG_NAME = "data"
55
-
56
-
57
- def _info(self):
58
- if self.config.name == "data":
59
- features=datasets.Features({"question_id": datasets.Value("string"),
60
- "nl": datasets.Value("string"),
61
- "cmd": datasets.Value("string"),
62
- "oracle_man": datasets.Sequence(feature=datasets.Value("string")),
63
- "cmd_name": datasets.Value("string"),
64
- "tldr_cmd_name": datasets.Value("string"),
65
- "manual_exist": datasets.Value("bool"),
66
- "matching_info": datasets.Sequence(
67
- {
68
- 'token': datasets.Value("string"),
69
- 'oracle_man': datasets.Sequence(feature=datasets.Value("string"))
70
- }
71
- )
72
- })
73
- else:
74
- features=datasets.Features({"doc_id": datasets.Value("string"),
75
- "doc_content": datasets.Value("string"),
76
- })
77
- return datasets.DatasetInfo(
78
- description=_DESCRIPTION,
79
- features=features,
80
- supervised_keys=None,
81
- citation=_CITATION,
82
- homepage=_HOMEPAGE)
83
-
84
- def _split_generators(self, dl_manager):
85
- """Returns SplitGenerators."""
86
- config_urls = _URLs[self.config.name]
87
- data_dir = dl_manager.download_and_extract(config_urls)
88
- if self.config.name == "data":
89
- return [
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TRAIN,
92
- gen_kwargs={"filepath": data_dir["train"], "split": "train"},
93
- ),
94
- datasets.SplitGenerator(
95
- name=datasets.Split.TEST,
96
- gen_kwargs={"filepath": data_dir["test"], "split": "test"},
97
- ),
98
- datasets.SplitGenerator(
99
- name=datasets.Split.VALIDATION,
100
- gen_kwargs={"filepath": data_dir["validation"], "split": "validation"},
101
- ),
102
- ]
103
- else:
104
- return [
105
- datasets.SplitGenerator(
106
- name=datasets.Split.TRAIN,
107
- gen_kwargs={"filepath": data_dir, "split": "train"},
108
- ),
109
- ]
110
-
111
-
112
- def _generate_examples(self, filepath, split):
113
- key = 0
114
- for line in open(filepath, encoding="utf-8"):
115
- line = json.loads(line)
116
- yield key, line
117
- key += 1