parquet-converter commited on
Commit
e669434
·
1 Parent(s): 79987d1

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,27 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,76 +0,0 @@
1
- ---
2
- language:
3
- - tr
4
- thumbnail:
5
- tags:
6
- - dataset
7
- - turkish
8
- - ted-multi
9
- - cleaned
10
-
11
- license: apache-2.0
12
- datasets:
13
- - ted-multi
14
-
15
- ---
16
-
17
- # Turkish Ted talk translations
18
- # Created from ted-multi dataset
19
-
20
- adding processing steps here if you want another language
21
-
22
-
23
- ```python
24
- #using Turkish as target
25
- target_lang="tr" # change to your target lang
26
-
27
-
28
- from datasets import load_dataset
29
- #ted-multi is a multiple language translated dataset
30
- #fits for our case , not to big and curated but need a simple processing
31
-
32
- dataset = load_dataset("ted_multi")
33
- dataset.cleanup_cache_files()
34
-
35
- #original from patrick's
36
- #chars_to_ignore_regex = '[,?.!\-\;\:\"“%‘”�—’…–]' # change to the ignored characters of your fine-tuned model
37
-
38
- #will use cahya/wav2vec2-base-turkish-artificial-cv
39
- #checking inside model repository to find which chars removed (no run.sh)
40
- chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\‘\”\'\`…\’»«]'
41
-
42
-
43
- import re
44
-
45
- def extract_target_lang_entries(batch):
46
- #specific mapping for ted_multi dataset
47
- #need to find index of language in each translation as it can shift
48
- try:
49
- target_index_for_lang= batch["translations"]["language"].index(target_lang)
50
- except ValueError:
51
- #target not in list empty it for later processing
52
- batch["text"] = None
53
- return batch
54
-
55
- #index_translation_pairs = zip(batch, target_index_for_batch)
56
- text= batch["translations"]["translation"][target_index_for_lang]
57
- batch["text"] = re.sub(chars_to_ignore_regex, "", text.lower())
58
- return batch
59
-
60
-
61
- #this dataset has additional columns need to say it
62
- cols_to_remove = ['translations', 'talk_name']
63
- dataset = dataset.map(extract_target_lang_entries, remove_columns=cols_to_remove)
64
-
65
-
66
- #on preocessing we tagged None for empty ones
67
- dataset_cleaned = dataset.filter(lambda x: x['text'] is not None)
68
- dataset_cleaned
69
-
70
- from huggingface_hub import notebook_login
71
-
72
- notebook_login()
73
-
74
- dataset_cleaned.push_to_hub(f"{target_lang}_ted_talk_translated")
75
-
76
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"gorkemgoknar--tr_ted_talk_translated": {"description": "Massively multilingual (60 language) data set derived from TED Talk transcripts.\nEach record consists of parallel arrays of language and text. Missing and\nincomplete translations will be filtered out.\n", "citation": "@InProceedings{qi-EtAl:2018:N18-2,\n author = {Qi, Ye and Sachan, Devendra and Felix, Matthieu and Padmanabhan, Sarguna and Neubig, Graham},\n title = {When and Why Are Pre-Trained Word Embeddings Useful for Neural Machine Translation?},\n booktitle = {Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers)},\n month = {June},\n year = {2018},\n address = {New Orleans, Louisiana},\n publisher = {Association for Computational Linguistics},\n pages = {529--535},\n abstract = {The performance of Neural Machine Translation (NMT) systems often suffers in low-resource scenarios where sufficiently large-scale parallel corpora cannot be obtained. Pre-trained word embeddings have proven to be invaluable for improving performance in natural language analysis tasks, which often suffer from paucity of data. However, their utility for NMT has not been extensively explored. In this work, we perform five sets of experiments that analyze when we can expect pre-trained word embeddings to help in NMT tasks. We show that such embeddings can be surprisingly effective in some cases -- providing gains of up to 20 BLEU points in the most favorable setting.},\n url = {http://www.aclweb.org/anthology/N18-2084}\n}\n", "homepage": "https://github.com/neulab/word-embeddings-for-nmt", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "ted_multi_translate", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 14008706.562739734, "num_examples": 182470, "dataset_name": "tr_ted_talk_translated"}, "validation": {"name": "validation", "num_bytes": 288426.0869565217, "num_examples": 4045, "dataset_name": "tr_ted_talk_translated"}, "test": {"name": "test", "num_bytes": 369201.66796062666, "num_examples": 5029, "dataset_name": "tr_ted_talk_translated"}}, "download_checksums": null, "download_size": 12232993, "post_processing_size": null, "dataset_size": 14666334.317656882, "size_in_bytes": 26899327.317656882}}
 
 
data/validation-00000-of-00001.parquet → gorkemgoknar--tr_ted_talk_translated/parquet-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:450aff4333150972dc2e667f256543ced54b10666400df565c4f8f76c0e762c8
3
- size 253130
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa117e6bd2ef1d87343b51e2a2ee17b106ef5ca6a1a1b432c0024f1eebb74745
3
+ size 315041
data/train-00000-of-00001.parquet → gorkemgoknar--tr_ted_talk_translated/parquet-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:454bdfa9325d33b7ac3d447bb26e23bedfed5b6b24ca20e7a5156b653ddf392e
3
- size 11669146
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7da98efe4e4db95d489633cbf057ff89e348950b2d80a0143f46114040b485b
3
+ size 11849587
data/test-00000-of-00001.parquet → gorkemgoknar--tr_ted_talk_translated/parquet-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f4426e4e79fe10a29aa01af871bfddeb7f21ab5e53ca58fe0c774c350061be97
3
- size 310717
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50e4a8df27045fae40b2362037f61e7062f575e40ff2c853b044bda69d428e09
3
+ size 258345