sh110495 commited on
Commit
ffa90f5
1 Parent(s): 690e45a

Make optimization data

Browse files
.gitattributes CHANGED
@@ -25,3 +25,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ train_data.csv filter=lfs diff=lfs merge=lfs -text
29
+ validation_data.csv filter=lfs diff=lfs merge=lfs -text
summarization_optimization.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import pandas as pd
3
+ import datasets
4
+ import csv
5
+ from datasets.tasks import Summarization
6
+
7
+ logger = datasets.logging.get_logger(__name__)
8
+
9
+
10
+ _DESCRIPTION = """\
11
+ Aihub Document summarization data
12
+ """
13
+ _URL = "https://huggingface.co/datasets/metamong1/summarization_optimization/resolve/main/"
14
+ _URLS = {
15
+ "train_data": _URL + "train_data.csv",
16
+ "validation_data": _URL + "validation_data.csv",
17
+ }
18
+
19
+ class SummarizationOptimization(datasets.GeneratorBasedBuilder):
20
+
21
+ BUILDER_CONFIGS = [
22
+ datasets.BuilderConfig(
23
+ name="Summarization Part Data",
24
+ version=datasets.Version("1.0.0", ""),
25
+ description="Text Summarization & Generation Title for optimization",
26
+ ),
27
+ ]
28
+
29
+ def _info(self):
30
+ return datasets.DatasetInfo(
31
+ description=_DESCRIPTION,
32
+ features=datasets.Features(
33
+ {
34
+ "doc_id": datasets.Value("string"),
35
+ "title": datasets.Value("string"),
36
+ "text": datasets.Value("string"),
37
+ "doc_type": datasets.Value("string"),
38
+ "file": datasets.Value("string"),
39
+ }
40
+ ),
41
+ # No default supervised_keys (as we have to pass both question
42
+ # and context as input).
43
+ supervised_keys=None,
44
+ homepage="https://huggingface.co/datasets/metamong1/summarization_optimization",
45
+ )
46
+
47
+ def _split_generators(self, dl_manager):
48
+ downloaded_files = dl_manager.download_and_extract(_URLS)
49
+
50
+ return [
51
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train_data"]}),
52
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation_data"]}),
53
+ ]
54
+
55
+ def _generate_examples(self, filepath):
56
+ """This function returns the examples in the raw (text) form."""
57
+ logger.info("generating examples from = %s", filepath)
58
+ with open(filepath, newline='', encoding="utf-8") as csvfile:
59
+ reader = csv.reader(csvfile, delimiter=",")
60
+ feature_name = next(reader)
61
+
62
+ idx = 0
63
+ for row in reader:
64
+ features = {
65
+ "doc_id" : row[0],
66
+ "title" : row[1],
67
+ "text" : row[2],
68
+ "doc_type" : row[3],
69
+ "file" : row[4],
70
+ }
71
+
72
+ yield idx, features
73
+ idx += 1
train_data.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63aa4cabc7c14d9c42586f06b12b095beb3469ab2bd7154abd97d3f6bf09d9e2
3
+ size 178145961
validation_data.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f3ebe76e1f6f25406a0c951bede30fe1ce738ad83a239b2a08256cba54a964
3
+ size 44623623