holylovenia commited on
Commit
6dbbf64
1 Parent(s): 06cdd3a

Upload indosum.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indosum.py +12 -12
indosum.py CHANGED
@@ -4,9 +4,9 @@ from typing import Dict, List, Tuple
4
 
5
  import datasets
6
 
7
- from nusacrowd.utils.configs import NusantaraConfig
8
- from nusacrowd.utils.constants import Tasks
9
- from nusacrowd.utils import schemas
10
  import jsonlines
11
  from nltk.tokenize.treebank import TreebankWordDetokenizer
12
 
@@ -43,18 +43,18 @@ _SUPPORTED_TASKS = [Tasks.SUMMARIZATION]
43
 
44
  _SOURCE_VERSION = "1.0.0"
45
 
46
- _NUSANTARA_VERSION = "1.0.0"
47
 
48
 
49
  class IndoSUM(datasets.GeneratorBasedBuilder):
50
  """INDOSUM is a new benchmark dataset for Indonesian text summarization. The dataset consists of news articles and manually constructed summaries."""
51
 
52
  SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
53
- NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
54
 
55
  BUILDER_CONFIGS = (
56
  [
57
- NusantaraConfig(
58
  name="indosum_fold{fold_number}_source".format(fold_number=i),
59
  version=_SOURCE_VERSION,
60
  description="indosum source schema",
@@ -64,11 +64,11 @@ class IndoSUM(datasets.GeneratorBasedBuilder):
64
  ]
65
  +
66
  [
67
- NusantaraConfig(
68
- name="indosum_fold{fold_number}_nusantara_t2t".format(fold_number=i),
69
- version=_NUSANTARA_VERSION,
70
  description="indosum Nusantara schema",
71
- schema="nusantara_t2t",
72
  subset_id="indosum_fold{fold_number}".format(fold_number=i),
73
  ) for i in range(5)
74
  ]
@@ -88,7 +88,7 @@ class IndoSUM(datasets.GeneratorBasedBuilder):
88
  }
89
  )
90
 
91
- elif self.config.schema == "nusantara_t2t":
92
  features = schemas.text2text_features
93
 
94
  return datasets.DatasetInfo(
@@ -189,7 +189,7 @@ class IndoSUM(datasets.GeneratorBasedBuilder):
189
  yield i, ex
190
  i+=1
191
 
192
- elif self.config.schema == "nusantara_t2t":
193
  i = 0
194
  with jsonlines.open(filepath) as f:
195
  for each_data in f.iter():
 
4
 
5
  import datasets
6
 
7
+ from seacrowd.utils.configs import SEACrowdConfig
8
+ from seacrowd.utils.constants import Tasks
9
+ from seacrowd.utils import schemas
10
  import jsonlines
11
  from nltk.tokenize.treebank import TreebankWordDetokenizer
12
 
 
43
 
44
  _SOURCE_VERSION = "1.0.0"
45
 
46
+ _SEACROWD_VERSION = "2024.06.20"
47
 
48
 
49
  class IndoSUM(datasets.GeneratorBasedBuilder):
50
  """INDOSUM is a new benchmark dataset for Indonesian text summarization. The dataset consists of news articles and manually constructed summaries."""
51
 
52
  SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
53
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
54
 
55
  BUILDER_CONFIGS = (
56
  [
57
+ SEACrowdConfig(
58
  name="indosum_fold{fold_number}_source".format(fold_number=i),
59
  version=_SOURCE_VERSION,
60
  description="indosum source schema",
 
64
  ]
65
  +
66
  [
67
+ SEACrowdConfig(
68
+ name="indosum_fold{fold_number}_seacrowd_t2t".format(fold_number=i),
69
+ version=_SEACROWD_VERSION,
70
  description="indosum Nusantara schema",
71
+ schema="seacrowd_t2t",
72
  subset_id="indosum_fold{fold_number}".format(fold_number=i),
73
  ) for i in range(5)
74
  ]
 
88
  }
89
  )
90
 
91
+ elif self.config.schema == "seacrowd_t2t":
92
  features = schemas.text2text_features
93
 
94
  return datasets.DatasetInfo(
 
189
  yield i, ex
190
  i+=1
191
 
192
+ elif self.config.schema == "seacrowd_t2t":
193
  i = 0
194
  with jsonlines.open(filepath) as f:
195
  for each_data in f.iter():