moussaKam commited on
Commit
ed7409d
1 Parent(s): 1e5fcfa

Upload json data card

Browse files
Files changed (1) hide show
  1. OrangeSum.json +67 -0
OrangeSum.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "gem": {
3
+ "rationale": {
4
+ "sole-task-dataset": "no",
5
+ "sole-language-task-dataset": "N/A",
6
+ "distinction-description": "N/A"
7
+ },
8
+ "curation": {
9
+ "has-additional-curation": "no",
10
+ "modification-types": [],
11
+ "modification-description": "N/A",
12
+ "has-additional-splits": "no",
13
+ "additional-splits-description": "N/A",
14
+ "additional-splits-capacicites": "N/A"
15
+ },
16
+ "starting": {
17
+ "research-pointers": "Papers about abstractive summarization using seq2seq models:\nhttps://aclanthology.org/K16-1028/\nhttps://aclanthology.org/P17-1099/\nhttps://aclanthology.org/2020.acl-main.703\nhttps://aclanthology.org/2021.emnlp-main.740/\n\nPapers about (pretrained) Transformers:\nhttps://papers.nips.cc/paper/2017/hash/3f5ee243547dee91fbd053c1c4a845aa-Abstract.html\nhttps://aclanthology.org/N19-1423/\nhttps://aclanthology.org/2020.acl-main.703",
18
+ "technical-terms": "No unique technical words in this data card."
19
+ }
20
+ },
21
+ "results": {
22
+ "results": {
23
+ "other-metrics-definitions": "N/A",
24
+ "has-previous-results": "no",
25
+ "current-evaluation": "N/A",
26
+ "previous-results": "N/A",
27
+ "metrics": [
28
+ "ROUGE",
29
+ "BERT-Score"
30
+ ],
31
+ "model-abilities": "The ability of the model to generate human like titles and abstracts for given news articles.",
32
+ "original-evaluation": "Automatic Evaluation: Rouge-1, Rouge-2, RougeL and BERTScore were used.\n\nHuman evalutaion: a human evaluation study was conducted with 11 French native speakers. The evaluators were PhD students from the computer science department of the university of the authors, working in NLP and other fields of AI. They volunteered after receiving an email announcement. the best-Worst Scaling (Louviere et al.,2015) was used. Two summaries from two different systems, along with their input document, were presented to a human annotator who had to decide which one was better. The evaluators were asked to base their judgments on accuracy (does the summary contain accurate facts?), informativeness (is important in-formation captured?) and fluency (is the summary written in well-formed French?)."
33
+ }
34
+ },
35
+ "considerations": {
36
+ "pii": {
37
+ "risks-description": "N/A"
38
+ },
39
+ "licenses": {
40
+ "dataset-restrictions-other": "N/A",
41
+ "data-copyright-other": "N/A",
42
+ "dataset-restrictions": [
43
+ "open license - commercial use allowed"
44
+ ],
45
+ "data-copyright": [
46
+ "open license - commercial use allowed"
47
+ ]
48
+ },
49
+ "limitations": {}
50
+ },
51
+ "context": {
52
+ "previous": {
53
+ "is-deployed": "no",
54
+ "described-risks": "N/A",
55
+ "changes-from-observation": "N/A"
56
+ },
57
+ "underserved": {
58
+ "helps-underserved": "no",
59
+ "underserved-description": "N/A"
60
+ },
61
+ "biases": {
62
+ "has-biases": "no",
63
+ "bias-analyses": "N/A",
64
+ "speaker-distibution": "The dataset contains news articles written by professional authors."
65
+ }
66
+ }
67
+ }