|
import datasets |
|
import pyarrow.parquet as pq |
|
|
|
_CITATION = '' |
|
|
|
_DESCRIPTION = '' |
|
|
|
_HOMEPAGE = '' |
|
|
|
_LICENSE = '' |
|
|
|
_BASE_URL = 'https://huggingface.co/datasets/AresEkb/test/resolve/main/' |
|
|
|
_FEATURES = { |
|
'domains': datasets.Features({ |
|
'reg_number': datasets.Value('string'), |
|
'standard_name': datasets.Value('string'), |
|
'name': datasets.Value('string'), |
|
'purpose': datasets.Value('string'), |
|
'embeddings': datasets.Sequence(datasets.Value('float32')), |
|
}), |
|
} |
|
|
|
class ProfStandardsDatasetBuilder(datasets.ArrowBasedBuilder): |
|
|
|
VERSION = datasets.Version('0.0.1') |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig('domains', VERSION), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=_FEATURES[self.config.name], |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
url = _BASE_URL + self.config.name + '.parquet' |
|
file_path = dl_manager.download(url) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={'file_path': file_path}, |
|
), |
|
] |
|
|
|
def _generate_tables(self, file_path): |
|
if file_path.startswith(_BASE_URL): |
|
file_path = file_path[len(_BASE_URL):] |
|
yield self.config.name, pq.read_table(file_path) |
|
|