ivangtorre commited on
Commit
9e91ea9
1 Parent(s): fde05c0

changing and deleting files

Browse files
Generate_metadata.ipynb ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "91b21cf6",
6
+ "metadata": {},
7
+ "source": [
8
+ "## Generate the datasets for uploading"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": null,
14
+ "id": "e1a3d25b",
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": []
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": 14,
22
+ "id": "aa925968",
23
+ "metadata": {
24
+ "scrolled": true
25
+ },
26
+ "outputs": [
27
+ {
28
+ "name": "stdout",
29
+ "output_type": "stream",
30
+ "text": [
31
+ "[]\n",
32
+ "[]\n",
33
+ "['kotiria000263.wav', 'kotiria000265.wav', 'kotiria000273.wav', 'kotiria000285.wav', 'kotiria000289.wav', 'kotiria000291.wav', 'kotiria000294.wav', 'kotiria000295.wav', 'kotiria000297.wav', 'kotiria000300.wav', 'kotiria000306.wav', 'kotiria000308.wav']\n",
34
+ "[]\n",
35
+ "['waikhana000740.wav', 'waikhana000745.wav', 'waikhana000746.wav']\n"
36
+ ]
37
+ },
38
+ {
39
+ "data": {
40
+ "application/vnd.jupyter.widget-view+json": {
41
+ "model_id": "15adf9d48a44440dac871ce9f432294c",
42
+ "version_major": 2,
43
+ "version_minor": 0
44
+ },
45
+ "text/plain": [
46
+ "Uploading the dataset shards: 0%| | 0/3 [00:00<?, ?it/s]"
47
+ ]
48
+ },
49
+ "metadata": {},
50
+ "output_type": "display_data"
51
+ },
52
+ {
53
+ "data": {
54
+ "application/vnd.jupyter.widget-view+json": {
55
+ "model_id": "aa491992d4fa43688c71ea1e09b25ca0",
56
+ "version_major": 2,
57
+ "version_minor": 0
58
+ },
59
+ "text/plain": [
60
+ "Map: 0%| | 0/1583 [00:00<?, ? examples/s]"
61
+ ]
62
+ },
63
+ "metadata": {},
64
+ "output_type": "display_data"
65
+ },
66
+ {
67
+ "data": {
68
+ "application/vnd.jupyter.widget-view+json": {
69
+ "model_id": "0f560606c9094daf92d9f5328f18b2dd",
70
+ "version_major": 2,
71
+ "version_minor": 0
72
+ },
73
+ "text/plain": [
74
+ "Creating parquet from Arrow format: 0%| | 0/16 [00:00<?, ?ba/s]"
75
+ ]
76
+ },
77
+ "metadata": {},
78
+ "output_type": "display_data"
79
+ },
80
+ {
81
+ "data": {
82
+ "application/vnd.jupyter.widget-view+json": {
83
+ "model_id": "538b15ad0bbe4684a09ae610fce7ab8c",
84
+ "version_major": 2,
85
+ "version_minor": 0
86
+ },
87
+ "text/plain": [
88
+ "Map: 0%| | 0/1583 [00:00<?, ? examples/s]"
89
+ ]
90
+ },
91
+ "metadata": {},
92
+ "output_type": "display_data"
93
+ },
94
+ {
95
+ "data": {
96
+ "application/vnd.jupyter.widget-view+json": {
97
+ "model_id": "391fb889ea15447ca8ec509a04de2ebe",
98
+ "version_major": 2,
99
+ "version_minor": 0
100
+ },
101
+ "text/plain": [
102
+ "Creating parquet from Arrow format: 0%| | 0/16 [00:00<?, ?ba/s]"
103
+ ]
104
+ },
105
+ "metadata": {},
106
+ "output_type": "display_data"
107
+ },
108
+ {
109
+ "data": {
110
+ "application/vnd.jupyter.widget-view+json": {
111
+ "model_id": "960a0088fc564383a32d2f6f0816b215",
112
+ "version_major": 2,
113
+ "version_minor": 0
114
+ },
115
+ "text/plain": [
116
+ "Map: 0%| | 0/1583 [00:00<?, ? examples/s]"
117
+ ]
118
+ },
119
+ "metadata": {},
120
+ "output_type": "display_data"
121
+ },
122
+ {
123
+ "data": {
124
+ "application/vnd.jupyter.widget-view+json": {
125
+ "model_id": "3355cb1c65d84a24b1a146a17e43b1c4",
126
+ "version_major": 2,
127
+ "version_minor": 0
128
+ },
129
+ "text/plain": [
130
+ "Creating parquet from Arrow format: 0%| | 0/16 [00:00<?, ?ba/s]"
131
+ ]
132
+ },
133
+ "metadata": {},
134
+ "output_type": "display_data"
135
+ },
136
+ {
137
+ "ename": "ValueError",
138
+ "evalue": "Features of the new split don't match the features of the existing splits on the hub: {'audio': Audio(sampling_rate=None, mono=True, decode=True, id=None), 'source_processed': Value(dtype='string', id=None), 'source_raw': Value(dtype='string', id=None), 'target_raw': Value(dtype='string', id=None)} != {'audio': Audio(sampling_rate=None, mono=True, decode=True, id=None)}",
139
+ "output_type": "error",
140
+ "traceback": [
141
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
142
+ "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
143
+ "Input \u001b[0;32mIn [14]\u001b[0m, in \u001b[0;36m<cell line: 38>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 31\u001b[0m a \u001b[38;5;241m=\u001b[39m flatten(a)\n\u001b[1;32m 32\u001b[0m audio_dataset \u001b[38;5;241m=\u001b[39m Dataset\u001b[38;5;241m.\u001b[39mfrom_dict({\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124maudio\u001b[39m\u001b[38;5;124m\"\u001b[39m: flatten(df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfile_name\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mvalues\u001b[38;5;241m.\u001b[39mtolist()),\n\u001b[1;32m 33\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msource_processed\u001b[39m\u001b[38;5;124m\"\u001b[39m: flatten(df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msource_processed\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mvalues\u001b[38;5;241m.\u001b[39mtolist()),\n\u001b[1;32m 34\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msource_raw\u001b[39m\u001b[38;5;124m\"\u001b[39m: flatten(df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msource_raw\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mvalues\u001b[38;5;241m.\u001b[39mtolist()),\n\u001b[1;32m 35\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtarget_raw\u001b[39m\u001b[38;5;124m\"\u001b[39m: flatten(df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtarget_raw\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mvalues\u001b[38;5;241m.\u001b[39mtolist()),\n\u001b[1;32m 36\u001b[0m },\n\u001b[1;32m 37\u001b[0m )\u001b[38;5;241m.\u001b[39mcast_column(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124maudio\u001b[39m\u001b[38;5;124m\"\u001b[39m, Audio())\n\u001b[0;32m---> 38\u001b[0m \u001b[43maudio_dataset\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpush_to_hub\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mivangtorre/second_americas_nlp_2022\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msplit\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 40\u001b[0m df\u001b[38;5;241m.\u001b[39mto_csv(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtrain.csv\u001b[39m\u001b[38;5;124m\"\u001b[39m, sep\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;130;01m\\t\u001b[39;00m\u001b[38;5;124m'\u001b[39m, index\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m)\n\u001b[1;32m 42\u001b[0m df \u001b[38;5;241m=\u001b[39m generate_df(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mquechua\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdev\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
144
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:5707\u001b[0m, in \u001b[0;36mDataset.push_to_hub\u001b[0;34m(self, repo_id, config_name, set_default, split, data_dir, commit_message, commit_description, private, token, revision, branch, create_pr, max_shard_size, num_shards, embed_external_files)\u001b[0m\n\u001b[1;32m 5705\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m repo_info\u001b[38;5;241m.\u001b[39msplits \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlist\u001b[39m(repo_info\u001b[38;5;241m.\u001b[39msplits) \u001b[38;5;241m!=\u001b[39m [split]:\n\u001b[1;32m 5706\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_info\u001b[38;5;241m.\u001b[39mfeatures \u001b[38;5;241m!=\u001b[39m repo_info\u001b[38;5;241m.\u001b[39mfeatures:\n\u001b[0;32m-> 5707\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 5708\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFeatures of the new split don\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt match the features of the existing splits on the hub: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_info\u001b[38;5;241m.\u001b[39mfeatures\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m != \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mrepo_info\u001b[38;5;241m.\u001b[39mfeatures\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 5709\u001b[0m )\n\u001b[1;32m 5711\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m split \u001b[38;5;129;01min\u001b[39;00m repo_info\u001b[38;5;241m.\u001b[39msplits:\n\u001b[1;32m 5712\u001b[0m repo_info\u001b[38;5;241m.\u001b[39mdownload_size \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m=\u001b[39m deleted_size\n",
145
+ "\u001b[0;31mValueError\u001b[0m: Features of the new split don't match the features of the existing splits on the hub: {'audio': Audio(sampling_rate=None, mono=True, decode=True, id=None), 'source_processed': Value(dtype='string', id=None), 'source_raw': Value(dtype='string', id=None), 'target_raw': Value(dtype='string', id=None)} != {'audio': Audio(sampling_rate=None, mono=True, decode=True, id=None)}"
146
+ ]
147
+ }
148
+ ],
149
+ "source": [
150
+ "import pandas as pd\n",
151
+ "from datasets import Dataset, Audio\n",
152
+ "\n",
153
+ "def generate_df(language, split):\n",
154
+ " # QUECHUA TRAIN\n",
155
+ " with open(\"./../\"+language +\"_\"+split+\".tsv\") as f:\n",
156
+ " lines = f.read().splitlines()\n",
157
+ " lines2 = [l.split(\"\\t\") for l in lines if len(l.split(\"\\t\"))==4]\n",
158
+ " asd = [l.split(\"\\t\")[0] for l in lines if len(l.split(\"\\t\"))>4]\n",
159
+ " print(asd)\n",
160
+ " df1 = pd.DataFrame(lines2[1::], columns =lines2[0:1])\n",
161
+ " df1 = df1.assign(split=[split]*df1.shape[0])\n",
162
+ " df1 = df1.assign(subset=[language]*df1.shape[0])\n",
163
+ " df1 = df1.rename(columns={'wav': 'file_name'})\n",
164
+ " df1['file_name'] = 'data/' + language + '/' + split +'/' + df1['file_name'].astype(str)\n",
165
+ " return df1\n",
166
+ "\n",
167
+ "df = generate_df(\"quechua\", \"train\")\n",
168
+ "df = pd.concat([df, generate_df(\"guarani\", \"train\")])\n",
169
+ "df = pd.concat([df, generate_df(\"kotiria\", \"train\")])\n",
170
+ "df = pd.concat([df, generate_df(\"bribri\", \"train\")])\n",
171
+ "df = pd.concat([df, generate_df(\"waikhana\", \"train\")])\n",
172
+ "cols = df.columns.tolist()\n",
173
+ "cols = cols[-1:] + cols[:-1]\n",
174
+ "df = df[cols]\n",
175
+ "\n",
176
+ "def flatten(xss):\n",
177
+ " return [x for xs in xss for x in xs]\n",
178
+ "\n",
179
+ "a = flatten(df[\"file_name\"].values.tolist())\n",
180
+ "a = flatten(a)\n",
181
+ "audio_dataset = Dataset.from_dict({\"audio\": flatten(df[\"file_name\"].values.tolist()),\n",
182
+ " \"source_processed\": flatten(df[\"source_processed\"].values.tolist()),\n",
183
+ " \"source_raw\": flatten(df[\"source_raw\"].values.tolist()),\n",
184
+ " \"target_raw\": flatten(df[\"target_raw\"].values.tolist()),\n",
185
+ " },\n",
186
+ " ).cast_column(\"audio\", Audio())\n",
187
+ "audio_dataset.push_to_hub(\"ivangtorre/second_americas_nlp_2022\", split=\"train\")\n",
188
+ "\n",
189
+ "df.to_csv(\"train.csv\", sep='\\t', index=None)\n",
190
+ "\n",
191
+ "df = generate_df(\"quechua\", \"dev\")\n",
192
+ "df = pd.concat([df, generate_df(\"guarani\", \"dev\")])\n",
193
+ "df = pd.concat([df, generate_df(\"kotiria\", \"dev\")])\n",
194
+ "df = pd.concat([df, generate_df(\"bribri\", \"dev\")])\n",
195
+ "df = pd.concat([df, generate_df(\"waikhana\", \"dev\")])\n",
196
+ "cols = df.columns.tolist()\n",
197
+ "cols = cols[-1:] + cols[:-1]\n",
198
+ "df = df[cols]\n",
199
+ "df.to_csv(\"dev.csv\", sep='\\t', index=None)\n",
200
+ "\n",
201
+ "a = df[\"file_name\"].values.tolist()\n",
202
+ "a = flatten(a)\n",
203
+ "#audio_dataset = Dataset.from_dict({\"audio\": a}).cast_column(\"audio\", Audio())\n",
204
+ "#audio_dataset.push_to_hub(\"ivangtorre/second_americas_nlp_2022\", split=\"dev\")\n",
205
+ "\n"
206
+ ]
207
+ },
208
+ {
209
+ "cell_type": "code",
210
+ "execution_count": 6,
211
+ "id": "4ce2eeb3",
212
+ "metadata": {},
213
+ "outputs": [
214
+ {
215
+ "data": {
216
+ "text/plain": [
217
+ "{'audio': {'path': 'data/quechua/train/quechua000000.wav',\n",
218
+ " 'array': array([0.00045776, 0.00042725, 0.00018311, ..., 0.00286865, 0.00186157,\n",
219
+ " 0.00253296]),\n",
220
+ " 'sampling_rate': 16000}}"
221
+ ]
222
+ },
223
+ "execution_count": 6,
224
+ "metadata": {},
225
+ "output_type": "execute_result"
226
+ }
227
+ ],
228
+ "source": [
229
+ "audio_dataset[0]"
230
+ ]
231
+ },
232
+ {
233
+ "cell_type": "code",
234
+ "execution_count": 10,
235
+ "id": "bd39f2f4",
236
+ "metadata": {},
237
+ "outputs": [
238
+ {
239
+ "data": {
240
+ "text/html": [
241
+ "<div>\n",
242
+ "<style scoped>\n",
243
+ " .dataframe tbody tr th:only-of-type {\n",
244
+ " vertical-align: middle;\n",
245
+ " }\n",
246
+ "\n",
247
+ " .dataframe tbody tr th {\n",
248
+ " vertical-align: top;\n",
249
+ " }\n",
250
+ "\n",
251
+ " .dataframe thead tr th {\n",
252
+ " text-align: left;\n",
253
+ " }\n",
254
+ "</style>\n",
255
+ "<table border=\"1\" class=\"dataframe\">\n",
256
+ " <thead>\n",
257
+ " <tr>\n",
258
+ " <th></th>\n",
259
+ " <th>subset</th>\n",
260
+ " <th>file_name</th>\n",
261
+ " <th>source_processed</th>\n",
262
+ " <th>source_raw</th>\n",
263
+ " <th>target_raw</th>\n",
264
+ " <th>split</th>\n",
265
+ " </tr>\n",
266
+ " </thead>\n",
267
+ " <tbody>\n",
268
+ " <tr>\n",
269
+ " <th>0</th>\n",
270
+ " <td>quechua</td>\n",
271
+ " <td>data/quechua/train/quechua000000.wav</td>\n",
272
+ " <td>wañuchisunchu kay suwakunata</td>\n",
273
+ " <td>wañuchisunchu kay suwakunata</td>\n",
274
+ " <td>matemos a esos ladrones</td>\n",
275
+ " <td>train</td>\n",
276
+ " </tr>\n",
277
+ " <tr>\n",
278
+ " <th>1</th>\n",
279
+ " <td>quechua</td>\n",
280
+ " <td>data/quechua/train/quechua000001.wav</td>\n",
281
+ " <td>imaninkichikmi qamkuna</td>\n",
282
+ " <td>imaninkichikmi qamkuna</td>\n",
283
+ " <td>que dicen ustedes</td>\n",
284
+ " <td>train</td>\n",
285
+ " </tr>\n",
286
+ " <tr>\n",
287
+ " <th>2</th>\n",
288
+ " <td>quechua</td>\n",
289
+ " <td>data/quechua/train/quechua000002.wav</td>\n",
290
+ " <td>hatun urqukunapi kunturkunapas uyarirqan</td>\n",
291
+ " <td>hatun urqukunapi kunturkunapas uyarirqan</td>\n",
292
+ " <td>en grandes montañas hasta los condores escuchaban</td>\n",
293
+ " <td>train</td>\n",
294
+ " </tr>\n",
295
+ " <tr>\n",
296
+ " <th>3</th>\n",
297
+ " <td>quechua</td>\n",
298
+ " <td>data/quechua/train/quechua000003.wav</td>\n",
299
+ " <td>ninsi winsislaw maqtaqa tumpa machasqaña</td>\n",
300
+ " <td>ninsi winsislaw maqtaqa tumpa machasqaña</td>\n",
301
+ " <td>dice el joven wessceslao cuando ya estaba borr...</td>\n",
302
+ " <td>train</td>\n",
303
+ " </tr>\n",
304
+ " <tr>\n",
305
+ " <th>4</th>\n",
306
+ " <td>quechua</td>\n",
307
+ " <td>data/quechua/train/quechua000004.wav</td>\n",
308
+ " <td>huk qilli chuspi chuspi misapi kimsantin suwak...</td>\n",
309
+ " <td>huk qilli chuspi chuspi misapi kimsantin suwak...</td>\n",
310
+ " <td>una sucia mosca en la mesa con los tres ladron...</td>\n",
311
+ " <td>train</td>\n",
312
+ " </tr>\n",
313
+ " <tr>\n",
314
+ " <th>...</th>\n",
315
+ " <td>...</td>\n",
316
+ " <td>...</td>\n",
317
+ " <td>...</td>\n",
318
+ " <td>...</td>\n",
319
+ " <td>...</td>\n",
320
+ " <td>...</td>\n",
321
+ " </tr>\n",
322
+ " <tr>\n",
323
+ " <th>1411</th>\n",
324
+ " <td>waikhana</td>\n",
325
+ " <td>data/waikhana/train/waikhana001414.wav</td>\n",
326
+ " <td>masiaha malia masinapea</td>\n",
327
+ " <td>masiaha malia masinapea, ()</td>\n",
328
+ " <td>Nos tambem sabemos (as historias antigas)</td>\n",
329
+ " <td>train</td>\n",
330
+ " </tr>\n",
331
+ " <tr>\n",
332
+ " <th>1412</th>\n",
333
+ " <td>waikhana</td>\n",
334
+ " <td>data/waikhana/train/waikhana001415.wav</td>\n",
335
+ " <td>a'lide mu:sale ya'uaha yu:'u:</td>\n",
336
+ " <td>a'lide mu:sale ya'uaha yu:'u:</td>\n",
337
+ " <td>Tudo isso estou explicando para voces.</td>\n",
338
+ " <td>train</td>\n",
339
+ " </tr>\n",
340
+ " <tr>\n",
341
+ " <th>1413</th>\n",
342
+ " <td>waikhana</td>\n",
343
+ " <td>data/waikhana/train/waikhana001416.wav</td>\n",
344
+ " <td>a'lide tina a'likodo pekasonoko a'li gravaka'a...</td>\n",
345
+ " <td>a'lide tina a'likodo pekasonoko a'li gravaka'a...</td>\n",
346
+ " <td>Tudo isso essa branca vai gravar.</td>\n",
347
+ " <td>train</td>\n",
348
+ " </tr>\n",
349
+ " <tr>\n",
350
+ " <th>1414</th>\n",
351
+ " <td>waikhana</td>\n",
352
+ " <td>data/waikhana/train/waikhana001417.wav</td>\n",
353
+ " <td>sayeotha ninokata mipe</td>\n",
354
+ " <td>sayeotha ninokata mipe</td>\n",
355
+ " <td>Ela disse que vai fazer tudo isso,</td>\n",
356
+ " <td>train</td>\n",
357
+ " </tr>\n",
358
+ " <tr>\n",
359
+ " <th>1415</th>\n",
360
+ " <td>waikhana</td>\n",
361
+ " <td>data/waikhana/train/waikhana001418.wav</td>\n",
362
+ " <td>yu:'u:le ~o'o ihide yu:'u: akaye</td>\n",
363
+ " <td>yu:'u:le ~o'o ihide yu:'u: akaye</td>\n",
364
+ " <td>Para mim, e' ate aqui, meus irmaos.</td>\n",
365
+ " <td>train</td>\n",
366
+ " </tr>\n",
367
+ " </tbody>\n",
368
+ "</table>\n",
369
+ "<p>4749 rows × 6 columns</p>\n",
370
+ "</div>"
371
+ ],
372
+ "text/plain": [
373
+ " subset file_name \\\n",
374
+ "0 quechua data/quechua/train/quechua000000.wav \n",
375
+ "1 quechua data/quechua/train/quechua000001.wav \n",
376
+ "2 quechua data/quechua/train/quechua000002.wav \n",
377
+ "3 quechua data/quechua/train/quechua000003.wav \n",
378
+ "4 quechua data/quechua/train/quechua000004.wav \n",
379
+ "... ... ... \n",
380
+ "1411 waikhana data/waikhana/train/waikhana001414.wav \n",
381
+ "1412 waikhana data/waikhana/train/waikhana001415.wav \n",
382
+ "1413 waikhana data/waikhana/train/waikhana001416.wav \n",
383
+ "1414 waikhana data/waikhana/train/waikhana001417.wav \n",
384
+ "1415 waikhana data/waikhana/train/waikhana001418.wav \n",
385
+ "\n",
386
+ " source_processed \\\n",
387
+ "0 wañuchisunchu kay suwakunata \n",
388
+ "1 imaninkichikmi qamkuna \n",
389
+ "2 hatun urqukunapi kunturkunapas uyarirqan \n",
390
+ "3 ninsi winsislaw maqtaqa tumpa machasqaña \n",
391
+ "4 huk qilli chuspi chuspi misapi kimsantin suwak... \n",
392
+ "... ... \n",
393
+ "1411 masiaha malia masinapea \n",
394
+ "1412 a'lide mu:sale ya'uaha yu:'u: \n",
395
+ "1413 a'lide tina a'likodo pekasonoko a'li gravaka'a... \n",
396
+ "1414 sayeotha ninokata mipe \n",
397
+ "1415 yu:'u:le ~o'o ihide yu:'u: akaye \n",
398
+ "\n",
399
+ " source_raw \\\n",
400
+ "0 wañuchisunchu kay suwakunata \n",
401
+ "1 imaninkichikmi qamkuna \n",
402
+ "2 hatun urqukunapi kunturkunapas uyarirqan \n",
403
+ "3 ninsi winsislaw maqtaqa tumpa machasqaña \n",
404
+ "4 huk qilli chuspi chuspi misapi kimsantin suwak... \n",
405
+ "... ... \n",
406
+ "1411 masiaha malia masinapea, () \n",
407
+ "1412 a'lide mu:sale ya'uaha yu:'u: \n",
408
+ "1413 a'lide tina a'likodo pekasonoko a'li gravaka'a... \n",
409
+ "1414 sayeotha ninokata mipe \n",
410
+ "1415 yu:'u:le ~o'o ihide yu:'u: akaye \n",
411
+ "\n",
412
+ " target_raw split \n",
413
+ "0 matemos a esos ladrones train \n",
414
+ "1 que dicen ustedes train \n",
415
+ "2 en grandes montañas hasta los condores escuchaban train \n",
416
+ "3 dice el joven wessceslao cuando ya estaba borr... train \n",
417
+ "4 una sucia mosca en la mesa con los tres ladron... train \n",
418
+ "... ... ... \n",
419
+ "1411 Nos tambem sabemos (as historias antigas) train \n",
420
+ "1412 Tudo isso estou explicando para voces. train \n",
421
+ "1413 Tudo isso essa branca vai gravar. train \n",
422
+ "1414 Ela disse que vai fazer tudo isso, train \n",
423
+ "1415 Para mim, e' ate aqui, meus irmaos. train \n",
424
+ "\n",
425
+ "[4749 rows x 6 columns]"
426
+ ]
427
+ },
428
+ "execution_count": 10,
429
+ "metadata": {},
430
+ "output_type": "execute_result"
431
+ }
432
+ ],
433
+ "source": [
434
+ "df"
435
+ ]
436
+ },
437
+ {
438
+ "cell_type": "code",
439
+ "execution_count": 2,
440
+ "id": "a1f02703",
441
+ "metadata": {
442
+ "scrolled": true
443
+ },
444
+ "outputs": [],
445
+ "source": [
446
+ "#from datasets import load_dataset\n",
447
+ "#dataset = load_dataset(\"audiofolder\", data_dir=\"second_americas_nlp_2022\")\n"
448
+ ]
449
+ },
450
+ {
451
+ "cell_type": "markdown",
452
+ "id": "5eaa7c93",
453
+ "metadata": {},
454
+ "source": [
455
+ "# EVALUATE MODELS\n"
456
+ ]
457
+ },
458
+ {
459
+ "cell_type": "markdown",
460
+ "id": "2e4e15c9",
461
+ "metadata": {},
462
+ "source": [
463
+ "## QUECHUA"
464
+ ]
465
+ },
466
+ {
467
+ "cell_type": "code",
468
+ "execution_count": 8,
469
+ "id": "e165f4bf",
470
+ "metadata": {
471
+ "scrolled": true
472
+ },
473
+ "outputs": [
474
+ {
475
+ "data": {
476
+ "application/vnd.jupyter.widget-view+json": {
477
+ "model_id": "9c96f2ce38474bc990e57387acd56fc8",
478
+ "version_major": 2,
479
+ "version_minor": 0
480
+ },
481
+ "text/plain": [
482
+ "Map: 0%| | 0/250 [00:00<?, ? examples/s]"
483
+ ]
484
+ },
485
+ "metadata": {},
486
+ "output_type": "display_data"
487
+ },
488
+ {
489
+ "ename": "LibsndfileError",
490
+ "evalue": "Error opening 'data/quechua/dev/quechua000573.wav': System error.",
491
+ "output_type": "error",
492
+ "traceback": [
493
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
494
+ "\u001b[0;31mLibsndfileError\u001b[0m Traceback (most recent call last)",
495
+ "Input \u001b[0;32mIn [8]\u001b[0m, in \u001b[0;36m<cell line: 25>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 22\u001b[0m batch[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtranscription\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m processor\u001b[38;5;241m.\u001b[39mbatch_decode(predicted_ids)\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m batch\n\u001b[0;32m---> 25\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mquechua\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmap\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmap_to_pred\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbatched\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 27\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCER:\u001b[39m\u001b[38;5;124m\"\u001b[39m, cer(result[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msource_processed\u001b[39m\u001b[38;5;124m\"\u001b[39m], result[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtranscription\u001b[39m\u001b[38;5;124m\"\u001b[39m]))\n",
496
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:602\u001b[0m, in \u001b[0;36mtransmit_tasks.<locals>.wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 600\u001b[0m \u001b[38;5;28mself\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDataset\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m=\u001b[39m kwargs\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mself\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 601\u001b[0m \u001b[38;5;66;03m# apply actual function\u001b[39;00m\n\u001b[0;32m--> 602\u001b[0m out: Union[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDataset\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDatasetDict\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 603\u001b[0m datasets: List[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDataset\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(out\u001b[38;5;241m.\u001b[39mvalues()) \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(out, \u001b[38;5;28mdict\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m [out]\n\u001b[1;32m 604\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m dataset \u001b[38;5;129;01min\u001b[39;00m datasets:\n\u001b[1;32m 605\u001b[0m \u001b[38;5;66;03m# Remove task templates if a column mapping of the template is no longer valid\u001b[39;00m\n",
497
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:567\u001b[0m, in \u001b[0;36mtransmit_format.<locals>.wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 560\u001b[0m self_format \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 561\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtype\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_format_type,\n\u001b[1;32m 562\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mformat_kwargs\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_format_kwargs,\n\u001b[1;32m 563\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcolumns\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_format_columns,\n\u001b[1;32m 564\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124moutput_all_columns\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_output_all_columns,\n\u001b[1;32m 565\u001b[0m }\n\u001b[1;32m 566\u001b[0m \u001b[38;5;66;03m# apply actual function\u001b[39;00m\n\u001b[0;32m--> 567\u001b[0m out: Union[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDataset\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDatasetDict\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 568\u001b[0m datasets: List[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDataset\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(out\u001b[38;5;241m.\u001b[39mvalues()) \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(out, \u001b[38;5;28mdict\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m [out]\n\u001b[1;32m 569\u001b[0m \u001b[38;5;66;03m# re-apply format to the output\u001b[39;00m\n",
498
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:3156\u001b[0m, in \u001b[0;36mDataset.map\u001b[0;34m(self, function, with_indices, with_rank, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint, desc)\u001b[0m\n\u001b[1;32m 3150\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m transformed_dataset \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 3151\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m hf_tqdm(\n\u001b[1;32m 3152\u001b[0m unit\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m examples\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 3153\u001b[0m total\u001b[38;5;241m=\u001b[39mpbar_total,\n\u001b[1;32m 3154\u001b[0m desc\u001b[38;5;241m=\u001b[39mdesc \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mMap\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 3155\u001b[0m ) \u001b[38;5;28;01mas\u001b[39;00m pbar:\n\u001b[0;32m-> 3156\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m rank, done, content \u001b[38;5;129;01min\u001b[39;00m Dataset\u001b[38;5;241m.\u001b[39m_map_single(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mdataset_kwargs):\n\u001b[1;32m 3157\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m done:\n\u001b[1;32m 3158\u001b[0m shards_done \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n",
499
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:3547\u001b[0m, in \u001b[0;36mDataset._map_single\u001b[0;34m(shard, function, with_indices, with_rank, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, new_fingerprint, rank, offset)\u001b[0m\n\u001b[1;32m 3543\u001b[0m indices \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(\n\u001b[1;32m 3544\u001b[0m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;241m*\u001b[39m(\u001b[38;5;28mslice\u001b[39m(i, i \u001b[38;5;241m+\u001b[39m batch_size)\u001b[38;5;241m.\u001b[39mindices(shard\u001b[38;5;241m.\u001b[39mnum_rows)))\n\u001b[1;32m 3545\u001b[0m ) \u001b[38;5;66;03m# Something simpler?\u001b[39;00m\n\u001b[1;32m 3546\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 3547\u001b[0m batch \u001b[38;5;241m=\u001b[39m \u001b[43mapply_function_on_filtered_inputs\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3548\u001b[0m \u001b[43m \u001b[49m\u001b[43mbatch\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3549\u001b[0m \u001b[43m \u001b[49m\u001b[43mindices\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3550\u001b[0m \u001b[43m \u001b[49m\u001b[43mcheck_same_num_examples\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mshard\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlist_indexes\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m>\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3551\u001b[0m \u001b[43m \u001b[49m\u001b[43moffset\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moffset\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3552\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3553\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m NumExamplesMismatchError:\n\u001b[1;32m 3554\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m DatasetTransformationNotAllowedError(\n\u001b[1;32m 3555\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUsing `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3556\u001b[0m ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n",
500
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:3416\u001b[0m, in \u001b[0;36mDataset._map_single.<locals>.apply_function_on_filtered_inputs\u001b[0;34m(pa_inputs, indices, check_same_num_examples, offset)\u001b[0m\n\u001b[1;32m 3414\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m with_rank:\n\u001b[1;32m 3415\u001b[0m additional_args \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m (rank,)\n\u001b[0;32m-> 3416\u001b[0m processed_inputs \u001b[38;5;241m=\u001b[39m \u001b[43mfunction\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mfn_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43madditional_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mfn_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3417\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(processed_inputs, LazyDict):\n\u001b[1;32m 3418\u001b[0m processed_inputs \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 3419\u001b[0m k: v \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m processed_inputs\u001b[38;5;241m.\u001b[39mdata\u001b[38;5;241m.\u001b[39mitems() \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m processed_inputs\u001b[38;5;241m.\u001b[39mkeys_to_format\n\u001b[1;32m 3420\u001b[0m }\n",
501
+ "Input \u001b[0;32mIn [8]\u001b[0m, in \u001b[0;36mmap_to_pred\u001b[0;34m(batch)\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mmap_to_pred\u001b[39m(batch):\n\u001b[0;32m---> 16\u001b[0m wav, curr_sample_rate \u001b[38;5;241m=\u001b[39m \u001b[43msf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mbatch\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfile_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfloat32\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 17\u001b[0m feats \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mfrom_numpy(wav)\u001b[38;5;241m.\u001b[39mfloat()\n\u001b[1;32m 18\u001b[0m feats \u001b[38;5;241m=\u001b[39m F\u001b[38;5;241m.\u001b[39mlayer_norm(feats, feats\u001b[38;5;241m.\u001b[39mshape) \u001b[38;5;66;03m# Normalization performed during finetuning\u001b[39;00m\n",
502
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/soundfile.py:285\u001b[0m, in \u001b[0;36mread\u001b[0;34m(file, frames, start, stop, dtype, always_2d, fill_value, out, samplerate, channels, format, subtype, endian, closefd)\u001b[0m\n\u001b[1;32m 199\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread\u001b[39m(file, frames\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m, start\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m, stop\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, dtype\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mfloat64\u001b[39m\u001b[38;5;124m'\u001b[39m, always_2d\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m 200\u001b[0m fill_value\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, out\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, samplerate\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, channels\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 201\u001b[0m \u001b[38;5;28mformat\u001b[39m\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, subtype\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, endian\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, closefd\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m):\n\u001b[1;32m 202\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Provide audio data from a sound file as NumPy array.\u001b[39;00m\n\u001b[1;32m 203\u001b[0m \n\u001b[1;32m 204\u001b[0m \u001b[38;5;124;03m By default, the whole file is read from the beginning, but the\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 283\u001b[0m \n\u001b[1;32m 284\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 285\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[43mSoundFile\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfile\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mr\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msamplerate\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mchannels\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 286\u001b[0m \u001b[43m \u001b[49m\u001b[43msubtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mendian\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mformat\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mclosefd\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[1;32m 287\u001b[0m frames \u001b[38;5;241m=\u001b[39m f\u001b[38;5;241m.\u001b[39m_prepare_read(start, stop, frames)\n\u001b[1;32m 288\u001b[0m data \u001b[38;5;241m=\u001b[39m f\u001b[38;5;241m.\u001b[39mread(frames, dtype, always_2d, fill_value, out)\n",
503
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/soundfile.py:658\u001b[0m, in \u001b[0;36mSoundFile.__init__\u001b[0;34m(self, file, mode, samplerate, channels, subtype, endian, format, closefd)\u001b[0m\n\u001b[1;32m 655\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_mode \u001b[38;5;241m=\u001b[39m mode\n\u001b[1;32m 656\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_info \u001b[38;5;241m=\u001b[39m _create_info_struct(file, mode, samplerate, channels,\n\u001b[1;32m 657\u001b[0m \u001b[38;5;28mformat\u001b[39m, subtype, endian)\n\u001b[0;32m--> 658\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_file \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_open\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfile\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode_int\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mclosefd\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 659\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mset\u001b[39m(mode)\u001b[38;5;241m.\u001b[39missuperset(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mr+\u001b[39m\u001b[38;5;124m'\u001b[39m) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mseekable():\n\u001b[1;32m 660\u001b[0m \u001b[38;5;66;03m# Move write position to 0 (like in Python file objects)\u001b[39;00m\n\u001b[1;32m 661\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mseek(\u001b[38;5;241m0\u001b[39m)\n",
504
+ "File \u001b[0;32m~/.local/lib/python3.10/site-packages/soundfile.py:1216\u001b[0m, in \u001b[0;36mSoundFile._open\u001b[0;34m(self, file, mode_int, closefd)\u001b[0m\n\u001b[1;32m 1213\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m file_ptr \u001b[38;5;241m==\u001b[39m _ffi\u001b[38;5;241m.\u001b[39mNULL:\n\u001b[1;32m 1214\u001b[0m \u001b[38;5;66;03m# get the actual error code\u001b[39;00m\n\u001b[1;32m 1215\u001b[0m err \u001b[38;5;241m=\u001b[39m _snd\u001b[38;5;241m.\u001b[39msf_error(file_ptr)\n\u001b[0;32m-> 1216\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m LibsndfileError(err, prefix\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError opening \u001b[39m\u001b[38;5;132;01m{0!r}\u001b[39;00m\u001b[38;5;124m: \u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mformat(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname))\n\u001b[1;32m 1217\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m mode_int \u001b[38;5;241m==\u001b[39m _snd\u001b[38;5;241m.\u001b[39mSFM_WRITE:\n\u001b[1;32m 1218\u001b[0m \u001b[38;5;66;03m# Due to a bug in libsndfile version <= 1.0.25, frames != 0\u001b[39;00m\n\u001b[1;32m 1219\u001b[0m \u001b[38;5;66;03m# when opening a named pipe in SFM_WRITE mode.\u001b[39;00m\n\u001b[1;32m 1220\u001b[0m \u001b[38;5;66;03m# See http://github.com/erikd/libsndfile/issues/77.\u001b[39;00m\n\u001b[1;32m 1221\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_info\u001b[38;5;241m.\u001b[39mframes \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n",
505
+ "\u001b[0;31mLibsndfileError\u001b[0m: Error opening 'data/quechua/dev/quechua000573.wav': System error."
506
+ ]
507
+ }
508
+ ],
509
+ "source": [
510
+ "from datasets import load_dataset\n",
511
+ "from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor\n",
512
+ "import torch\n",
513
+ "from jiwer import cer\n",
514
+ "import torch.nn.functional as F\n",
515
+ "from datasets import load_dataset\n",
516
+ "import soundfile as sf\n",
517
+ "\n",
518
+ "americasnlp = load_dataset(\"ivangtorre/second_americas_nlp_2022\", split=\"dev\")\n",
519
+ "quechua = americasnlp.filter(lambda language: language['subset']=='quechua')\n",
520
+ "\n",
521
+ "model = Wav2Vec2ForCTC.from_pretrained(\"ivangtorre/wav2vec2-xlsr-300m-quechua\")\n",
522
+ "processor = Wav2Vec2Processor.from_pretrained(\"ivangtorre/wav2vec2-xlsr-300m-quechua\")\n",
523
+ "\n",
524
+ "def map_to_pred(batch):\n",
525
+ " wav, curr_sample_rate = sf.read(batch[\"file_name\"][0], dtype=\"float32\")\n",
526
+ " feats = torch.from_numpy(wav).float()\n",
527
+ " feats = F.layer_norm(feats, feats.shape) # Normalization performed during finetuning\n",
528
+ " feats = torch.unsqueeze(feats, 0)\n",
529
+ " logits = model(feats).logits\n",
530
+ " predicted_ids = torch.argmax(logits, dim=-1)\n",
531
+ " batch[\"transcription\"] = processor.batch_decode(predicted_ids)\n",
532
+ " return batch\n",
533
+ "\n",
534
+ "result = quechua.map(map_to_pred, batched=True, batch_size=1)\n",
535
+ "\n",
536
+ "print(\"CER:\", cer(result[\"source_processed\"], result[\"transcription\"]))\n"
537
+ ]
538
+ },
539
+ {
540
+ "cell_type": "markdown",
541
+ "id": "8e29bc13",
542
+ "metadata": {},
543
+ "source": [
544
+ "## BRIBRI\n"
545
+ ]
546
+ },
547
+ {
548
+ "cell_type": "code",
549
+ "execution_count": 7,
550
+ "id": "7cdec414",
551
+ "metadata": {},
552
+ "outputs": [
553
+ {
554
+ "data": {
555
+ "text/plain": [
556
+ "'data/quechua/dev/quechua000573.wav'"
557
+ ]
558
+ },
559
+ "execution_count": 7,
560
+ "metadata": {},
561
+ "output_type": "execute_result"
562
+ }
563
+ ],
564
+ "source": [
565
+ "quechua[0:1][\"file_name\"][0]"
566
+ ]
567
+ }
568
+ ],
569
+ "metadata": {
570
+ "kernelspec": {
571
+ "display_name": "Python 3 (ipykernel)",
572
+ "language": "python",
573
+ "name": "python3"
574
+ },
575
+ "language_info": {
576
+ "codemirror_mode": {
577
+ "name": "ipython",
578
+ "version": 3
579
+ },
580
+ "file_extension": ".py",
581
+ "mimetype": "text/x-python",
582
+ "name": "python",
583
+ "nbconvert_exporter": "python",
584
+ "pygments_lexer": "ipython3",
585
+ "version": "3.10.12"
586
+ }
587
+ },
588
+ "nbformat": 4,
589
+ "nbformat_minor": 5
590
+ }
data/dev-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2bb0b949ff9e10341892b1219e62bca1da4098e78364e311b3bd4fe4f483b68
3
- size 331374610
 
 
 
 
data/train-00000-of-00003.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a73d6e92b350d872facbc0ef895ffb4d71f822423e72ea933a3125404641e085
3
- size 328567068
 
 
 
 
data/train-00001-of-00003.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:952fdf0e45f0147e763996373506cd4ca5e20ee7e77091705f8cc3ebf4dcbd39
3
- size 237976428
 
 
 
 
data/train-00002-of-00003.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d7a0f461ed17dd77747671a86793e370b976818e918fef0fddb41b9c9eba3e8
3
- size 198370386