Datasets:

Tasks:
Other
Languages:
English
ArXiv:
License:
albertvillanova HF staff commited on
Commit
daafc0f
1 Parent(s): b7aaebf

Delete legacy JSON metadata

Browse files

Delete legacy `dataset_infos.json`.

Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "Corpus of domain names scraped from Common Crawl and manually annotated to add word boundaries (e.g. \"commoncrawl\" to \"common crawl\"). Breaking domain names such as \"openresearch\" into component words \"open\" and \"research\" is important for applications such as Text-to-Speech synthesis and web search. Common Crawl is an open repository of web crawl data that can be accessed and analyzed by anyone. Specifically, we scraped the plaintext (WET) extracts for domain names from URLs that contained diverse letter casing (e.g. \"OpenBSD\"). Although in the previous example, segmentation is trivial using letter casing, this was not always the case (e.g. \"NASA\"), so we had to manually annotate the data. The dataset is stored as plaintext file where each line is an example of space separated segments of a domain name. The examples are stored in their original letter casing, but harder and more interesting examples can be generated by lowercasing the input first.", "citation": "@inproceedings{zrs2020urlsegmentation,\n title={Semi-supervised URL Segmentation with Recurrent Neural Networks Pre-trained on Knowledge Graph Entities},\n author={Hao Zhang and Jae Ro and Richard William Sproat},\n booktitle={The 28th International Conference on Computational Linguistics (COLING 2020)},\n year={2020}\n}\n", "homepage": "https://github.com/google-research-datasets/common-crawl-domain-names", "license": "MIT License", "features": {"example": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "crawl_domain", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 321134, "num_examples": 17572, "dataset_name": "crawl_domain"}, "test": {"name": "test", "num_bytes": 39712, "num_examples": 2170, "dataset_name": "crawl_domain"}, "validation": {"name": "validation", "num_bytes": 36018, "num_examples": 1953, "dataset_name": "crawl_domain"}}, "download_checksums": {"https://raw.githubusercontent.com/google-research-datasets/common-crawl-domain-names/master/data/train.txt": {"num_bytes": 268410, "checksum": "b365c298088374e5d0b59f2092f7a750f5d393f1728e465e7ecd6db0ac0a70a9"}, "https://raw.githubusercontent.com/google-research-datasets/common-crawl-domain-names/master/data/test.txt": {"num_bytes": 33198, "checksum": "5413bd66e817fb5e84b4ef10121eddf7ee3b51922d84f71027cf1b3be66fb290"}, "https://raw.githubusercontent.com/google-research-datasets/common-crawl-domain-names/master/data/eval.txt": {"num_bytes": 30155, "checksum": "c3b4e500a57159f18310b2ad52297b6d56577894f25666c639c018fb91992b9a"}}, "download_size": 331763, "post_processing_size": null, "dataset_size": 396864, "size_in_bytes": 728627}}