fdRE / fdRE.py
leonadase's picture
Update fdRE.py
72d4d1b
raw
history blame
4.17 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datasets
from datasets.tasks import TextClassification
_CITATION = """\
@author tianjie
fdRE
Chinese
}
"""
_DESCRIPTION = """\
fdRE是一个中文的轴承故障诊断领域的关系抽取数据集
该数据集主要包含正向从属、反向从属以及无关三类标签
"""
_URL = "https://huggingface.co/datasets/leonadase/fdRE/resolve/main/fdRE.zip"
class SemEval2010Task8(datasets.GeneratorBasedBuilder):
"""The SemEval-2010 Task 8 focuses on Multi-way classification of semantic relations between pairs of nominals.
The task was designed to compare different approaches to semantic relation classification
and to provide a standard testbed for future research."""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=datasets.Features(
{
"sentence": datasets.Value("string"),
"relation": datasets.ClassLabel(
names=[
"Part_Of(E1,E2)",
"Part_Of(E2,E1)",
"Other",
]
),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=datasets.info.SupervisedKeysData(input="sentence", output="relation"),
# Homepage of the dataset for documentation
citation=_CITATION,
task_templates=[TextClassification(text_column="sentence", label_column="relation")],
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
# data_dir = os.path.join(dl_dir, "fdRE")
data_dir = dl_dir
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "train.txt"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, "test.txt"),
},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, "r", encoding="us-ascii") as file:
lines = file.readlines()
num_lines_per_sample = 4
for i in range(0, len(lines), num_lines_per_sample):
idx = int(lines[i].split("\t")[0])
sentence = lines[i].split("\t")[1][1:-2] # remove " at the start and "\n at the end
relation = lines[i + 1][:-1] # remove \n at the end
yield idx, {
"sentence": sentence,
"relation": relation,
}