|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Generics KB: A Knowledge Base of Generic Statements""" |
|
|
|
|
|
import ast |
|
import csv |
|
|
|
import datasets |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {GenericsKB: A Knowledge Base of Generic Statements}, |
|
authors={Sumithra Bhakthavatsalam, Chloe Anastasiades, Peter Clark}, |
|
year={2020}, |
|
publisher = {Allen Institute for AI}, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The GenericsKB contains 3.4M+ generic sentences about the world, i.e., sentences expressing general truths such as "Dogs bark," and "Trees remove carbon dioxide from the atmosphere." Generics are potentially useful as a knowledge source for AI systems requiring general world knowledge. The GenericsKB is the first large-scale resource containing naturally occurring generic sentences (as opposed to extracted or crowdsourced triples), and is rich in high-quality, general, semantically complete statements. Generics were primarily extracted from three large text sources, namely the Waterloo Corpus, selected parts of Simple Wikipedia, and the ARC Corpus. A filtered, high-quality subset is also available in GenericsKB-Best, containing 1,020,868 sentences. We recommend you start with GenericsKB-Best. |
|
""" |
|
|
|
_HOMEPAGE = "https://allenai.org/data/genericskb" |
|
|
|
_LICENSE = "cc-by-4.0" |
|
|
|
_BASE_URL = "data/{0}" |
|
|
|
_URLS = { |
|
"generics_kb_best": _BASE_URL.format("GenericsKB-Best.tsv.gz"), |
|
"generics_kb": _BASE_URL.format("GenericsKB.tsv.gz"), |
|
"generics_kb_simplewiki": _BASE_URL.format("GenericsKB-SimpleWiki-With-Context.jsonl.gz"), |
|
"generics_kb_waterloo": _BASE_URL.format("GenericsKB-Waterloo-With-Context.jsonl.gz"), |
|
} |
|
|
|
|
|
class GenericsKb(datasets.GeneratorBasedBuilder): |
|
"""The GenericsKB is the first large-scale resource containing naturally occurring generic sentences, and is rich in high-quality, general, semantically complete statements.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="generics_kb_best", |
|
version=VERSION, |
|
description="This is the default and recommended config. Comprises of GENERICSKB generics with a score > 0.234 ", |
|
), |
|
datasets.BuilderConfig( |
|
name="generics_kb", version=VERSION, description="This GENERICSKB that contains 3,433,000 sentences." |
|
), |
|
datasets.BuilderConfig( |
|
name="generics_kb_simplewiki", |
|
version=VERSION, |
|
description="SimpleWikipedia is a filtered scrape of SimpleWikipedia pages (simple.wikipedia.org)", |
|
), |
|
datasets.BuilderConfig( |
|
name="generics_kb_waterloo", |
|
version=VERSION, |
|
description="The Waterloo corpus is 280GB of English plain text, gathered by Charles Clarke (Univ. Waterloo) using a webcrawler in 2001 from .edu domains.", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "generics_kb_best" |
|
|
|
def _info(self): |
|
if self.config.name == "generics_kb_waterloo" or self.config.name == "generics_kb_simplewiki": |
|
featuredict = { |
|
"source_name": datasets.Value("string"), |
|
"sentence": datasets.Value("string"), |
|
"sentences_before": datasets.Sequence(datasets.Value("string")), |
|
"sentences_after": datasets.Sequence(datasets.Value("string")), |
|
"concept_name": datasets.Value("string"), |
|
"quantifiers": datasets.Sequence(datasets.Value("string")), |
|
"id": datasets.Value("string"), |
|
"bert_score": datasets.Value("float64"), |
|
} |
|
if self.config.name == "generics_kb_simplewiki": |
|
featuredict["headings"] = datasets.Sequence(datasets.Value("string")) |
|
featuredict["categories"] = datasets.Sequence(datasets.Value("string")) |
|
|
|
features = datasets.Features(featuredict) |
|
|
|
else: |
|
features = datasets.Features( |
|
{ |
|
"source": datasets.Value("string"), |
|
"term": datasets.Value("string"), |
|
"quantifier_frequency": datasets.Value("string"), |
|
"quantifier_number": datasets.Value("string"), |
|
"generic_sentence": datasets.Value("string"), |
|
"score": datasets.Value("float64"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
filepath = dl_manager.download_and_extract(_URLS[self.config.name]) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": filepath, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples.""" |
|
|
|
if self.config.name == "generics_kb_waterloo" or self.config.name == "generics_kb_simplewiki": |
|
with open(filepath, encoding="utf-8") as f: |
|
for id_, row in enumerate(f): |
|
data = ast.literal_eval(row) |
|
|
|
result = { |
|
"source_name": data["source"]["name"], |
|
"sentence": data["knowledge"]["sentence"], |
|
"sentences_before": data["knowledge"]["context"]["sentences_before"], |
|
"sentences_after": data["knowledge"]["context"]["sentences_after"], |
|
"concept_name": data["knowledge"]["key_concepts"][0]["concept_name"], |
|
"quantifiers": data["knowledge"]["key_concepts"][0]["quantifiers"], |
|
"id": data["id"], |
|
"bert_score": data["bert_score"], |
|
} |
|
if self.config.name == "generics_kb_simplewiki": |
|
result["headings"] = data["knowledge"]["context"]["headings"] |
|
result["categories"] = data["knowledge"]["context"]["categories"] |
|
|
|
yield id_, result |
|
else: |
|
with open(filepath, encoding="utf-8") as f: |
|
|
|
next(f) |
|
|
|
read_tsv = csv.reader(f, delimiter="\t") |
|
|
|
for id_, row in enumerate(read_tsv): |
|
quantifier = row[2] |
|
quantifier_frequency = "" |
|
quantifier_number = "" |
|
|
|
if quantifier != "": |
|
quantifier = ast.literal_eval(quantifier) |
|
if "frequency" in quantifier.keys(): |
|
quantifier_frequency = quantifier["frequency"] |
|
if "number" in quantifier.keys(): |
|
quantifier_number = quantifier["number"] |
|
yield id_, { |
|
"source": row[0], |
|
"term": row[1], |
|
"quantifier_frequency": quantifier_frequency, |
|
"quantifier_number": quantifier_number, |
|
"generic_sentence": row[3], |
|
"score": row[4], |
|
} |
|
|