sbmaruf commited on
Commit
cc74a7c
1 Parent(s): 58b891d

data loader updated

Browse files
Files changed (1) hide show
  1. xCodeEval.py +35 -4
xCodeEval.py CHANGED
@@ -560,7 +560,20 @@ _SHARDS = {
560
  {"type":"file","name":"Ruby.jsonl"},
561
  {"type":"file","name":"Rust.jsonl"}
562
  ]},
563
- {"type":"report","directories":0,"files":133}
 
 
 
 
 
 
 
 
 
 
 
 
 
564
  ],
565
  "program_synthesis":[
566
  {"type":"directory","name":"program_synthesis/test","contents":[
@@ -1927,7 +1940,6 @@ def get_file_name(task_name, split_name):
1927
  BASE_URL = "https://huggingface.co/datasets/NTU-NLP-sg/xCodeEval/resolve/main/{task_name}/{split}/{file_name}"
1928
  PROBLEM_DESC_URL = "https://huggingface.co/datasets/NTU-NLP-sg/xCodeEval/resolve/main/problem_descriptions.jsonl"
1929
  UNIT_TEST_DB_URL = "https://huggingface.co/datasets/NTU-NLP-sg/xCodeEval/resolve/main/unittest_db.json"
1930
-
1931
 
1932
  class xCodeEvalConfig(datasets.BuilderConfig):
1933
  """BuilderConfig"""
@@ -2005,7 +2017,7 @@ class xCodeEval(datasets.GeneratorBasedBuilder):
2005
  train_urls = [ BASE_URL.format(task_name=task_name, split="train", file_name=urllib.parse.quote(file_name)) for file_name in TRAIN_FILE_NAMES]
2006
  validation_urls = [ BASE_URL.format(task_name=task_name, split="validation", file_name=urllib.parse.quote(file_name)) for file_name in VALIDATION_FILE_NAMES]
2007
  test_urls = [ BASE_URL.format(task_name=task_name, split="test", file_name=urllib.parse.quote(file_name)) for file_name in TEST_FILE_NAMES]
2008
-
2009
  # train_urls = [ BASE_URL.format(task_name=task_name, split="train", file_name=file_name) for file_name in TRAIN_FILE_NAMES]
2010
  # validation_urls = [ BASE_URL.format(task_name=task_name, split="validation", file_name=file_name) for file_name in VALIDATION_FILE_NAMES]
2011
  # test_urls = [ BASE_URL.format(task_name=task_name, split="test", file_name=file_name) for file_name in TEST_FILE_NAMES]
@@ -2018,13 +2030,20 @@ class xCodeEval(datasets.GeneratorBasedBuilder):
2018
  # validation_downloaded_files = validation_urls
2019
  # test_downloaded_files = test_urls
2020
 
 
 
 
 
 
 
 
2021
  prob_desc_file, unit_test_db_file = None, None
2022
  if task_name in _PROBLEM_DESC_REQ_TASK:
2023
  prob_desc_file = dl_manager.download(PROBLEM_DESC_URL)
2024
  if task_name in _UNIT_TEST_REQ_TASK:
2025
  unit_test_db_file = dl_manager.download(UNIT_TEST_DB_URL)
2026
 
2027
- return [
2028
  datasets.SplitGenerator(
2029
  name=datasets.Split.TRAIN,
2030
  gen_kwargs={
@@ -2048,6 +2067,18 @@ class xCodeEval(datasets.GeneratorBasedBuilder):
2048
  }
2049
  ),
2050
  ]
 
 
 
 
 
 
 
 
 
 
 
 
2051
 
2052
  def _generate_examples(self, filepaths, problem_description_file = None, unit_test_db_file = None):
2053
  """This function returns the examples"""
 
560
  {"type":"file","name":"Ruby.jsonl"},
561
  {"type":"file","name":"Rust.jsonl"}
562
  ]},
563
+ {"type":"directory","name":"code_translation/validation_small","contents":[
564
+ {"type":"file","name":"C#.jsonl"},
565
+ {"type":"file","name":"C++.jsonl"},
566
+ {"type":"file","name":"C.jsonl"},
567
+ {"type":"file","name":"Go.jsonl"},
568
+ {"type":"file","name":"Java.jsonl"},
569
+ {"type":"file","name":"Javascript.jsonl"},
570
+ {"type":"file","name":"Kotlin.jsonl"},
571
+ {"type":"file","name":"PHP.jsonl"},
572
+ {"type":"file","name":"Python.jsonl"},
573
+ {"type":"file","name":"Ruby.jsonl"},
574
+ {"type":"file","name":"Rust.jsonl"}
575
+ ]},
576
+ {"type":"report","directories":0,"files":144}
577
  ],
578
  "program_synthesis":[
579
  {"type":"directory","name":"program_synthesis/test","contents":[
 
1940
  BASE_URL = "https://huggingface.co/datasets/NTU-NLP-sg/xCodeEval/resolve/main/{task_name}/{split}/{file_name}"
1941
  PROBLEM_DESC_URL = "https://huggingface.co/datasets/NTU-NLP-sg/xCodeEval/resolve/main/problem_descriptions.jsonl"
1942
  UNIT_TEST_DB_URL = "https://huggingface.co/datasets/NTU-NLP-sg/xCodeEval/resolve/main/unittest_db.json"
 
1943
 
1944
  class xCodeEvalConfig(datasets.BuilderConfig):
1945
  """BuilderConfig"""
 
2017
  train_urls = [ BASE_URL.format(task_name=task_name, split="train", file_name=urllib.parse.quote(file_name)) for file_name in TRAIN_FILE_NAMES]
2018
  validation_urls = [ BASE_URL.format(task_name=task_name, split="validation", file_name=urllib.parse.quote(file_name)) for file_name in VALIDATION_FILE_NAMES]
2019
  test_urls = [ BASE_URL.format(task_name=task_name, split="test", file_name=urllib.parse.quote(file_name)) for file_name in TEST_FILE_NAMES]
2020
+
2021
  # train_urls = [ BASE_URL.format(task_name=task_name, split="train", file_name=file_name) for file_name in TRAIN_FILE_NAMES]
2022
  # validation_urls = [ BASE_URL.format(task_name=task_name, split="validation", file_name=file_name) for file_name in VALIDATION_FILE_NAMES]
2023
  # test_urls = [ BASE_URL.format(task_name=task_name, split="test", file_name=file_name) for file_name in TEST_FILE_NAMES]
 
2030
  # validation_downloaded_files = validation_urls
2031
  # test_downloaded_files = test_urls
2032
 
2033
+ if task_name == "code_translation":
2034
+ VALIDATION_SMALL_FILE_NAMES = get_file_name(task_name, "validation_small")
2035
+ validation_small_urls = [ BASE_URL.format(task_name=task_name, split="validation_small", file_name=urllib.parse.quote(file_name)) for file_name in VALIDATION_SMALL_FILE_NAMES]
2036
+ validation_small_downloaded_files = dl_manager.download(validation_small_urls)
2037
+ # validation_small_urls = [ BASE_URL.format(task_name=task_name, split="validation_small", file_name=file_name) for file_name in VALIDATION_SMALL_FILE_NAMES]
2038
+ # validation_small_downloaded_files = validation_small_urls
2039
+
2040
  prob_desc_file, unit_test_db_file = None, None
2041
  if task_name in _PROBLEM_DESC_REQ_TASK:
2042
  prob_desc_file = dl_manager.download(PROBLEM_DESC_URL)
2043
  if task_name in _UNIT_TEST_REQ_TASK:
2044
  unit_test_db_file = dl_manager.download(UNIT_TEST_DB_URL)
2045
 
2046
+ split_info = [
2047
  datasets.SplitGenerator(
2048
  name=datasets.Split.TRAIN,
2049
  gen_kwargs={
 
2067
  }
2068
  ),
2069
  ]
2070
+ if task_name == "code_translation":
2071
+ split_info.append(
2072
+ datasets.SplitGenerator(
2073
+ name="validation_small",
2074
+ gen_kwargs={
2075
+ "filepaths": validation_small_downloaded_files,
2076
+ "problem_description_file": prob_desc_file,
2077
+ "unit_test_db_file": unit_test_db_file,
2078
+ }
2079
+ ),
2080
+ )
2081
+ return split_info
2082
 
2083
  def _generate_examples(self, filepaths, problem_description_file = None, unit_test_db_file = None):
2084
  """This function returns the examples"""