diff --git "a/src/laboratory.ipynb" "b/src/laboratory.ipynb" new file mode 100644--- /dev/null +++ "b/src/laboratory.ipynb" @@ -0,0 +1,2576 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "63594f228ab14d9796bbf24112269a52", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(HTML(value='
Dict[str, torch.Tensor]:\n", + " # split inputs and labels since they have to be of different lengths and need different padding methods\n", + " # first treat the audio inputs by simply returning torch tensors\n", + " input_features = [{\"input_features\": feature[\"input_features\"]} for feature in features]\n", + " batch = self.processor.feature_extractor.pad(input_features, return_tensors=\"pt\")\n", + "\n", + " # get the tokenized label sequences\n", + " label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features]\n", + "\n", + " # pad the labels to max length\n", + " labels_batch = self.processor.tokenizer.pad(label_features, return_tensors=\"pt\")\n", + "\n", + " # replace padding with -100 to ignore loss correctly\n", + " labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100)\n", + "\n", + " # if bos token is appended in previous tokenization step,\n", + " # cut bos token here as it's append later anyways\n", + " if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item():\n", + " labels = labels[:, 1:]\n", + "\n", + " batch[\"labels\"] = labels\n", + "\n", + " return batch\n", + "data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor)" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [], + "source": [ + "from transformers import WhisperForConditionalGeneration\n", + "\n", + "\n", + "model = WhisperForConditionalGeneration.from_pretrained(\n", + " model_name_or_path, device_map=\"auto\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "DatasetDict({\n", + " test: Dataset({\n", + " features: ['input_features', 'labels'],\n", + " num_rows: 857\n", + " })\n", + "})" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "evaluation_dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 108/108 [09:19<00:00, 5.18s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "wer=24.938214396045723\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "from torch.utils.data import DataLoader\n", + "from tqdm import tqdm\n", + "import numpy as np\n", + "import gc\n", + "import evaluate\n", + "metric = evaluate.load(\"wer\")\n", + "eval_dataloader = DataLoader(evaluation_dataset['test'], batch_size=8, collate_fn=data_collator)\n", + "\n", + "model.eval()\n", + "for step, batch in enumerate(tqdm(eval_dataloader)):\n", + " with torch.cuda.amp.autocast():\n", + " with torch.no_grad():\n", + " generated_tokens = (\n", + " model.generate(\n", + " input_features=batch[\"input_features\"].to(\"cuda\"),\n", + " decoder_input_ids=batch[\"labels\"][:, :4].to(\"cuda\"),\n", + " max_new_tokens=255,\n", + " )\n", + " .cpu()\n", + " .numpy()\n", + " )\n", + " labels = batch[\"labels\"].cpu().numpy()\n", + " labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n", + " decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)\n", + " decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n", + " metric.add_batch(\n", + " predictions=decoded_preds,\n", + " references=decoded_labels,\n", + " )\n", + " del generated_tokens, labels, batch\n", + " gc.collect()\n", + "wer = 100 * metric.compute()\n", + "print(f\"{wer=}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 54/54 [07:20<00:00, 8.15s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "wer=24.934352795798578 and normalized_wer=13.639508070714834\n", + "{'eval/wer': 24.934352795798578, 'eval/normalized_wer': 13.639508070714834}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "import gc\n", + "import numpy as np\n", + "from tqdm import tqdm\n", + "from torch.utils.data import DataLoader\n", + "from transformers.models.whisper.english_normalizer import BasicTextNormalizer\n", + "\n", + "eval_dataloader = DataLoader(evaluation_dataset['test'], batch_size=16, collate_fn=data_collator)\n", + "forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task='transcribe')\n", + "normalizer = BasicTextNormalizer()\n", + "\n", + "predictions = []\n", + "references = []\n", + "normalized_predictions = []\n", + "normalized_references = []\n", + "import evaluate\n", + "metric = evaluate.load(\"wer\")\n", + "model.eval()\n", + "for step, batch in enumerate(tqdm(eval_dataloader)):\n", + " with torch.cuda.amp.autocast():\n", + " with torch.no_grad():\n", + " generated_tokens = (\n", + " model.generate(\n", + " input_features=batch[\"input_features\"].to(\"cuda\"),\n", + " forced_decoder_ids=forced_decoder_ids,\n", + " max_new_tokens=255,\n", + " )\n", + " .cpu()\n", + " .numpy()\n", + " )\n", + " labels = batch[\"labels\"].cpu().numpy()\n", + " labels = np.where(labels != -100, labels, processor.tokenizer.pad_token_id)\n", + " decoded_preds = processor.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)\n", + " decoded_labels = processor.tokenizer.batch_decode(labels, skip_special_tokens=True)\n", + " predictions.extend(decoded_preds)\n", + " references.extend(decoded_labels)\n", + " normalized_predictions.extend([normalizer(pred).strip() for pred in decoded_preds])\n", + " normalized_references.extend([normalizer(label).strip() for label in decoded_labels])\n", + " del generated_tokens, labels, batch\n", + " gc.collect()\n", + "wer = 100 * metric.compute(predictions=predictions, references=references)\n", + "normalized_wer = 100 * metric.compute(predictions=normalized_predictions, references=normalized_references)\n", + "eval_metrics = {\"eval/wer\": wer, \"eval/normalized_wer\": normalized_wer}\n", + "\n", + "print(f\"{wer=} and {normalized_wer=}\")\n", + "print(eval_metrics)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Lora\n" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "from dataclasses import dataclass\n", + "from typing import Any, Dict, List, Union\n", + "\n", + "@dataclass\n", + "class DataCollatorSpeechSeq2SeqWithPadding:\n", + " processor: Any\n", + "\n", + " def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n", + " # split inputs and labels since they have to be of different lengths and need different padding methods\n", + " # first treat the audio inputs by simply returning torch tensors\n", + " input_features = [{\"input_features\": feature[\"input_features\"]} for feature in features]\n", + " batch = self.processor.feature_extractor.pad(input_features, return_tensors=\"pt\")\n", + "\n", + " # get the tokenized label sequences\n", + " label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features]\n", + "\n", + " # pad the labels to max length\n", + " labels_batch = self.processor.tokenizer.pad(label_features, return_tensors=\"pt\")\n", + "\n", + " # replace padding with -100 to ignore loss correctly\n", + " labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100)\n", + "\n", + " # if bos token is appended in previous tokenization step,\n", + " # cut bos token here as it's append later anyways\n", + " if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item():\n", + " labels = labels[:, 1:]\n", + "\n", + " batch[\"labels\"] = labels\n", + "\n", + " return batch" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from transformers import (\n", + " AutomaticSpeechRecognitionPipeline,\n", + " WhisperForConditionalGeneration,\n", + " WhisperTokenizer,\n", + " WhisperProcessor,\n", + ")\n", + "from peft import PeftModel, PeftConfig\n", + "\n", + "peft_model_id = \"DuyTa/vi-whisper-medium-Lora\"\n", + "\n", + "language = \"Vietnamese\"\n", + "task = \"transcribe\"\n", + "\n", + "peft_config = PeftConfig.from_pretrained(peft_model_id)\n", + "model = WhisperForConditionalGeneration.from_pretrained(\n", + " peft_config.base_model_name_or_path,\n", + ")\n", + "model = PeftModel.from_pretrained(model, peft_model_id)\n", + "model.to(\"cuda\").half()\n", + "\n", + "processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": {}, + "outputs": [], + "source": [ + "data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor)" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 108/108 [12:31<00:00, 6.96s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "wer_lora=24.934352795798578\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "from torch.utils.data import DataLoader\n", + "from tqdm import tqdm\n", + "import numpy as np\n", + "import gc\n", + "import evaluate\n", + "metric = evaluate.load(\"wer\")\n", + "eval_dataloader = DataLoader(evaluation_dataset['test'], batch_size=8, collate_fn=data_collator)\n", + "\n", + "model.eval()\n", + "for step, batch in enumerate(tqdm(eval_dataloader)):\n", + " with torch.cuda.amp.autocast():\n", + " with torch.no_grad():\n", + " generated_tokens = (\n", + " model.generate(\n", + " input_features=batch[\"input_features\"].to(\"cuda\"),\n", + " decoder_input_ids=batch[\"labels\"][:, :4].to(\"cuda\"),\n", + " max_new_tokens=255,\n", + " )\n", + " .cpu()\n", + " .numpy()\n", + " )\n", + " labels = batch[\"labels\"].cpu().numpy()\n", + " labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n", + " decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)\n", + " decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n", + " metric.add_batch(\n", + " predictions=decoded_preds,\n", + " references=decoded_labels,\n", + " )\n", + " del generated_tokens, labels, batch\n", + " gc.collect()\n", + "wer_lora = 100 * metric.compute()\n", + "print(f\"{wer_lora=}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 54/54 [09:20<00:00, 10.39s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "wer=24.934352795798578 and normalized_wer=13.624135280553421\n", + "{'eval/wer': 24.934352795798578, 'eval/normalized_wer': 13.624135280553421}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "import gc\n", + "import numpy as np\n", + "from tqdm import tqdm\n", + "from torch.utils.data import DataLoader\n", + "from transformers.models.whisper.english_normalizer import BasicTextNormalizer\n", + "\n", + "eval_dataloader = DataLoader(evaluation_dataset['test'], batch_size=16, collate_fn=data_collator)\n", + "forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task='transcribe')\n", + "normalizer = BasicTextNormalizer()\n", + "\n", + "predictions = []\n", + "references = []\n", + "normalized_predictions = []\n", + "normalized_references = []\n", + "import evaluate\n", + "metric = evaluate.load(\"wer\")\n", + "model.eval()\n", + "for step, batch in enumerate(tqdm(eval_dataloader)):\n", + " with torch.cuda.amp.autocast():\n", + " with torch.no_grad():\n", + " generated_tokens = (\n", + " model.generate(\n", + " input_features=batch[\"input_features\"].to(\"cuda\"),\n", + " forced_decoder_ids=forced_decoder_ids,\n", + " max_new_tokens=255,\n", + " )\n", + " .cpu()\n", + " .numpy()\n", + " )\n", + " labels = batch[\"labels\"].cpu().numpy()\n", + " labels = np.where(labels != -100, labels, processor.tokenizer.pad_token_id)\n", + " decoded_preds = processor.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)\n", + " decoded_labels = processor.tokenizer.batch_decode(labels, skip_special_tokens=True)\n", + " predictions.extend(decoded_preds)\n", + " references.extend(decoded_labels)\n", + " normalized_predictions.extend([normalizer(pred).strip() for pred in decoded_preds])\n", + " normalized_references.extend([normalizer(label).strip() for label in decoded_labels])\n", + " del generated_tokens, labels, batch\n", + " gc.collect()\n", + "wer = 100 * metric.compute(predictions=predictions, references=references)\n", + "normalized_wer = 100 * metric.compute(predictions=normalized_predictions, references=normalized_references)\n", + "eval_metrics = {\"eval/wer\": wer, \"eval/normalized_wer\": normalized_wer}\n", + "\n", + "print(f\"{wer=} and {normalized_wer=}\")\n", + "print(eval_metrics)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Quantization Whisper Lora" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "from whisper_quant import WhisperModel\n", + "\n", + "model_size = \"medium\"\n", + "\n", + "# Run on GPU with FP16\n", + "model = WhisperModel(model_size, device=\"cuda\", compute_type=\"float16\")\n", + "\n", + "# or run on GPU with INT8\n", + "# model = WhisperModel(model_size, device=\"cuda\", compute_type=\"int8_float16\")\n", + "# or run on CPU with INT8\n", + "# model = WhisperModel(model_size, device=\"cpu\", compute_type=\"int8\")\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "segments, info = model.transcribe(\"audio.wav\", beam_size=1, language ='vi', temperature= 0)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0.00s -> 7.10s] Hai, đây tức là một kẻ ăn mày vậy, anh ta chưa kịp quay đi thì đã thấy mấy con chó vàng chạy sồng sộc ra cứ nhảy sổ vào chân anh.\n" + ] + } + ], + "source": [ + "for segment in segments:\n", + " print(\"[%.2fs -> %.2fs] %s\" % (segment.start, segment.end, segment.text))" + ] + }, + { + "cell_type": "code", + "execution_count": 56, + "metadata": {}, + "outputs": [], + "source": [ + "normalizer = BasicTextNormalizer()\n", + "norm = normalizer(segment.text)" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "' hai đây tức là một kẻ ăn mầy vậy anh ta chưa kịp quay đi thì đã thấy mấy con chó vàng chạy sồng sộc ra cứ nhảy sổ vào chân anh '" + ] + }, + "execution_count": 57, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "norm" + ] + }, + { + "cell_type": "code", + "execution_count": 119, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from dataclasses import dataclass\n", + "\n", + "@dataclass\n", + "class DataCollatorSpeechSeq2SeqWithPadding:\n", + " processor: Any\n", + "\n", + " def __call__(self, features):\n", + " audios = []\n", + " for feature in features:\n", + " audios.append(feature[\"audio\"])\n", + " batch = {\n", + " \"audio\": [feature[\"audio\"]['array'] for feature in features],\n", + " \"transcription\": [feature[\"transcription\"] for feature in features]\n", + " }\n", + " return batch\n", + "data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor='No')" + ] + }, + { + "cell_type": "code", + "execution_count": 120, + "metadata": {}, + "outputs": [], + "source": [ + "from torch.utils.data import DataLoader\n", + "from tqdm import tqdm\n", + "import numpy as np\n", + "import gc\n", + "import evaluate\n", + "metric = evaluate.load(\"wer\")\n", + "eval_dataloader = DataLoader(fleurs['test'], batch_size=16, collate_fn=data_collator)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import re\n", + "for data in eval_dataloader:\n", + " audios = data['audio']\n", + " transcriptions = data['transcription']\n", + " final = []\n", + " for audio in data['audio']:\n", + " print(\"-\" * 20)\n", + " segments, info = model.transcribe(audio, beam_size=1, language='vi')\n", + " out = [out.text for out in segments]\n", + " pred = ''.join(out)\n", + " norm_pred = normalizer(pred)\n", + " final.append(norm_pred)\n", + "cleaned_text_list = [re.sub(r'\\s+', ' ', text.strip()) for text in final]\n", + " \n", + "\n", + " print(cleaned_text_list)\n", + " print(transcriptions)\n", + " break\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for step, batch in enumerate(tqdm(eval_dataloader)):\n", + " with torch.cuda.amp.autocast():\n", + " with torch.no_grad():\n", + "\n", + " labels = batch[\"transcription\"]\n", + " print(labels)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for step, batch in enumerate(tqdm(eval_dataloader)):\n", + " with torch.cuda.amp.autocast():\n", + " with torch.no_grad():\n", + " final = []\n", + " labels = batch[\"transcription\"]\n", + " for audio in batch[\"audio\"]:\n", + " \n", + " segments, _ = model.transcribe(audio, beam_size=1, language='vi')\n", + " out = [out.text for out in segments]\n", + " pred = ''.join(out)\n", + " norm_pred = normalizer(pred)\n", + " final.append(norm_pred)\n", + " cleaned_text_list = [re.sub(r'\\s+', ' ', text.strip()) for text in final]\n", + " print(cleaned_text_list)\n", + " print(labels)\n", + " metric.add_batch(\n", + " predictions=cleaned_text_list,\n", + " references=labels,\n", + " )\n", + " del labels, batch, final\n", + " gc.collect()\n", + "wer_lora = 100 * metric.compute()\n", + "print(f\"{wer_lora=}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjIAAAHHCAYAAACle7JuAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAABDf0lEQVR4nO3dd3gU5d7/8c+SwKaQQgKBREMCSJci5SBFEhAPXbCAYiFwEPARRIoe5aiUgxKKKJwDYkETDyJ2UFHaQYIQigHERwVBkBJQCCokIUCCyf37w1/mYUkhgYTdkffruvbSuad9Z3eyfHbmnhmHMcYIAADAhiq4uwAAAIBLRZABAAC2RZABAAC2RZABAAC2RZABAAC2RZABAAC2RZABAAC2RZABAAC2RZABAAC2RZABLlFSUpIcDoeSkpLcXQr+v3fffVchISE6deqUu0u5qJdeekk1a9ZUdnZ2mS97xYoVat68uXx8fORwOHTy5MkyX8flcjgcmjRpUqnnO3DggBwOhxITE8u8JtgTQQYe7d1335XD4dCSJUsKjGvWrJkcDofWrl1bYFzNmjXVrl27K1HiRSUmJsrhcBT52rx5s7tLLNSgQYNc6nQ6napXr54mTJigs2fPXtIyd+7cqUmTJunAgQNlW6yk3NxcTZw4UQ8//LAqV64sSWrUqJGaNWtWYNolS5bI4XAoJiamwLjXX39dDodDq1atklS6z+/CcYGBgYqJidGnn35aYD2DBg1STk6OXn755bJ6CyRJv/76q/r37y9fX1/NmzdPCxculL+/f6HTnr9tGzZsKDDeGKPIyEg5HA716tWrTOsEyoq3uwsAitOhQwdJ0oYNG3TbbbdZ7RkZGfr222/l7e2t5ORkderUyRqXmpqq1NRU3X333Ve83uL885//VK1atQq0X3fddW6opmScTqcWLFggSUpPT9dHH32kKVOmaN++fVq0aFGpl7dz505NnjxZsbGxio6OLtNaP/nkE+3evVvDhg2z2jp06KDXXntN6enpCgoKstqTk5Pl7e2tlJQUnTt3ThUrVnQZ5+XlpbZt27osv6Sf3y233KKBAwfKGKODBw9q/vz56t27t5YvX66uXbta0/n4+CguLk7PP/+8Hn74YTkcjst+DyQpJSVFmZmZmjJlirp06VKieXx8fPTWW29Zf2/51q1bp8OHD8vpdJZJbUB5IMjAo0VERKhWrVoFfi1u2rRJxhj169evwLj84Qu/lEvLGKOzZ8/K19f3spaTr3v37mrVqlWp5vn999+Vl5enSpUqFRiXlZVV5C/tkijJ9nl7e+u+++6zhh966CG1a9dOixcv1vPPP6/q1atf8vrLWkJCgtq3b69rrrnGauvQoYNeffVVbdy4Ud27d7fak5OT1b9/f7311lvatm2bbrzxRmvchg0b1LRpUwUEBLgsv6SfX7169VzeszvuuEONGjXSnDlzXIKMJPXv318zZszQ2rVr1blz51Jvc2HS0tIkScHBwSWep0ePHnrvvff0r3/9S97e//fPwltvvaWWLVvql19+KZPagPLAqSV4vA4dOuirr77SmTNnrLbk5GQ1btxY3bt31+bNm5WXl+cyzuFwqH379pL+CANTpkxRnTp15HQ6FR0drX/84x8F+iZER0erV69eWrlypVq1aiVfX1/rsP/hw4fVt29f+fv7KywsTGPGjCnzvg355/6fe+45zZ4926o3/3SMw+HQzp07dc8996hKlSpWUCuL7Ssph8OhDh06yBijH3/80Wo/ePCgHnroIdWvX1++vr4KDQ1Vv379XE4hJSYmql+/fpKkTp06Wac0zu9jtHz5ct10003y9/dXQECAevbsqe++++6idZ09e1YrVqwocAQi/z1KTk52mXb79u26/fbbVbt2bZdxx48f1549ey47BJ+vYcOGqlq1qvbt21dgXMuWLRUSEqKPPvqoRMt677331LJlS/n6+qpq1aq67777dOTIEWt8bGys4uLiJEmtW7eWw+HQoEGDLrrcAQMG6Ndff9Xq1auttpycHL3//vu65557Cp0nKytL48aNU2RkpJxOp+rXr6/nnntOxhiX6bKzszVmzBhVq1ZNAQEBuvXWW3X48OFCl3nkyBH97W9/U/Xq1eV0OtW4cWO9/vrrF63/6NGjGjx4sK699lo5nU6Fh4erT58+5XIKE56HIzLweB06dNDChQu1ZcsWxcbGSvrjH6Z27dqpXbt2Sk9P17fffqumTZta4xo0aKDQ0FBJ0gMPPKA33nhDd955p8aNG6ctW7YoPj5eu3btKtD3Zvfu3RowYICGDx+uoUOHqn79+jpz5oxuvvlmHTp0SKNGjVJERIQWLlyozz//vFTbkZ6eXuCXrcPhsOrMl5CQoLNnz2rYsGFyOp0KCQmxxvXr109169bV1KlTrX8wLnf7Siv/H4cqVapYbSkpKdq4caPuvvtuXXvttTpw4IDmz5+v2NhY7dy5U35+furYsaNGjRqlf/3rX/rHP/6hhg0bSpL134ULFyouLk5du3bV9OnTdfr0ac2fP98KssWditq2bZtycnLUokULl/batWsrIiLC5ahdSkqKcnJyrP0nOTlZ48aNkyRt3LhRUuFH80r6+RU234kTJ1SnTp1Cx7do0cIlTBUlMTFRgwcPVuvWrRUfH69jx45pzpw5Sk5O1ldffaXg4GA9+eSTql+/vl555RXrVFhR6z1fdHS02rZtq8WLF1tHrpYvX6709HTdfffd+te//uUyvTFGt956q9auXashQ4aoefPmWrlypR577DEdOXJEL7zwgjXtAw88oDfffFP33HOP2rVrp88//1w9e/YsUMOxY8d04403yuFwaOTIkapWrZqWL1+uIUOGKCMjQ6NHjy6y/jvuuEPfffedHn74YUVHRystLU2rV6/WoUOHyvwUJjyQATzcd999ZySZKVOmGGOMOXfunPH39zdvvPGGMcaY6tWrm3nz5hljjMnIyDBeXl5m6NChxhhjduzYYSSZBx54wGWZjz76qJFkPv/8c6stKirKSDIrVqxwmXb27NlGknn33XettqysLHPdddcZSWbt2rXF1p+QkGAkFfpyOp3WdPv37zeSTGBgoElLS3NZxsSJE40kM2DAAJf2sti+osTFxRl/f39z/Phxc/z4cbN3717z3HPPGYfDYa6//nqTl5dnTXv69OkC82/atMlIMv/5z3+stvfee6/Q9ywzM9MEBwdbn1u+o0ePmqCgoALtF1qwYIGRZL755psC4/r162d8fX1NTk6OMcaY+Ph4U6tWLWOMMS+++KIJCwuzps1/344cOWK1lfTzM8YYSWbIkCHm+PHjJi0tzWzdutV069bNSDIzZ84stPZhw4YZX1/fYrcvJyfHhIWFmeuvv96cOXPGal+2bJmRZCZMmFCg3pSUlGKXeeG0c+fONQEBAdZn2a9fP9OpUydjzB/7Ts+ePa35li5daiSZZ555xmV5d955p3E4HGbv3r3GmP/bPx966CGX6e655x4jyUycONFqGzJkiAkPDze//PKLy7R33323CQoKsurK/ztJSEgwxhhz4sSJYt9f/Plxagker2HDhgoNDbV+VX/99dfKysqyrkrK/1Ut/dF3Jjc31/pF/dlnn0mSxo4d67LM/F/gF15NUqtWrQL9GD777DOFh4frzjvvtNr8/PxcOpWWxLx587R69WqX1/LlywtMd8cdd6hatWqFLuPBBx8sUJt0edtXnKysLFWrVk3VqlXTddddp0cffVTt27fXRx995NI59fx+NufOndOvv/6q6667TsHBwdq+fftF17N69WqdPHlSAwYM0C+//GK9vLy81KZNm0KvTDvfr7/+Ksn1KFG+Dh066MyZM9q2bZuk/zuaJ0nt27dXWlqafvjhB2tcrVq1FBERUWA5Jf38XnvtNVWrVk1hYWFq1aqV1qxZo7///e8FPqN8VapU0ZkzZ3T69Okit2/r1q1KS0vTQw89JB8fH6u9Z8+eatCgQaFXRZVW//79debMGS1btkyZmZlatmxZkaeVPvvsM3l5eWnUqFEu7ePGjZMxxnpf8vfPC6e78OiKMUYffPCBevfuLWOMyz7QtWtXpaenF7kf+fr6qlKlSkpKStKJEycuZdNhc5xagsdzOBxq166dvvjiC+Xl5Sk5OVlhYWHW1SLt2rXT3LlzJf1fX4j8IHPw4EFVqFChwJUlNWrUUHBwsA4ePOjSXthVKQcPHtR1111X4KqS0p6W+ctf/lKizqKF1VDUuLLYvuL4+Pjok08+kfRHP6EZM2YoLS2tQAfhM2fOKD4+XgkJCTpy5IhLP4n09PSLric/SBTV4TUwMLBE9ZoL+mdIrv1k2rRpo40bN+qZZ56RJF1//fUKDAxUcnKyIiMjtW3bNt11112FLrukn1+fPn00cuRI5eTkKCUlRVOnTtXp06dVoULhvxvzay7uqqX8z7Gwfa5BgwaFXjpdWtWqVVOXLl301ltv6fTp08rNzXUJ7xfWExERUaBDdP5pwvx68/fPC09vXbgdx48f18mTJ/XKK6/olVdeKXSd+Z2YL+R0OjV9+nSNGzdO1atX14033qhevXpp4MCBqlGjxsU3HLZHkIEtdOjQQZ988om++eYbl1/U0h9BJv/c/IYNGxQREaHatWu7zF/SS1vL6gqly1FcDUWNK6/t8/LyculA27VrVzVo0EDDhw/Xxx9/bLU//PDDSkhI0OjRo9W2bVsFBQXJ4XDo7rvvdumIXZT8aRYuXFjoPz7nX0lTmPx+KidOnNC1117rMq5Zs2YKCAjQhg0b1KNHD/3222/W/lOhQgW1adNGGzZsUJ06dZSTk3PZHX2vvfZa6z3r0aOHqlatqpEjR6pTp066/fbbC0x/4sQJ+fn5ecS+d88992jo0KE6evSounfvXqorny5H/ud/3333WZ2VL5TfB64wo0ePVu/evbV06VKtXLlSTz/9tOLj4/X555/rhhtuKJea4Tk4tQRbOP9+MsnJydYVSdIfV344nU4lJSVpy5YtLuOioqKUl5dn/eLPd+zYMZ08eVJRUVEXXXdUVJT27dtX4Nf+7t27L2eTykRZbF9phIeHa8yYMfrkk09cbgT3/vvvKy4uTrNmzdKdd96pW265RR06dChwR9miAlf+L/awsDB16dKlwCu/k3dRGjRoIEnav39/gXFeXl668cYblZycrA0bNigwMFBNmjSxxuefmrzwaF5ZGT58uOrUqaOnnnqq0CNG+/fvt45kFCX/cyxsn9u9e3eZfc633XabKlSooM2bNxd5Wim/np9++kmZmZku7d9//71Lvfn754VXbF24HflXNOXm5hb6+Xfp0kVhYWHF1l6nTh2NGzdOq1at0rfffqucnBzNmjWrxNsO+yLIwBZatWolHx8fLVq0SEeOHHE5IuN0OtWiRQvNmzdPWVlZLv8Q9ejRQ5I0e/Zsl+U9//zzklTo1RMX6tGjh3766Se9//77Vtvp06eLPAR+JZXF9pXWww8/LD8/P02bNs1q8/LyKvCP9L///W/l5ua6tOXf9+bCgNO1a1cFBgZq6tSpOnfuXIF1Hj9+vNiaWrZsqUqVKmnr1q2Fju/QoYOOHz+uhIQEtWnTxuU0T7t27bR792599NFHCg0NvWioKC1vb2+NGzdOu3btKvQy6+3bt1/0LtStWrVSWFiYXnrpJZfL6pcvX65du3aV2edcuXJlzZ8/X5MmTVLv3r2LnK5Hjx7Kzc21Tunme+GFF+RwOKwrn/L/e+FVTxfur15eXrrjjjv0wQcf6Ntvvy2wvuI+/9OnTxe403SdOnUUEBBQLo9/gOfh1BJsoVKlSmrdurXWr18vp9Opli1buoxv166d9evr/CDTrFkzxcXF6ZVXXtHJkycVExOjL7/8Um+88Yb69u3rckfgogwdOlRz587VwIEDtW3bNoWHh2vhwoXy8/Mr1TYsX77c+sV6Ye0XngorqbLYvtIKDQ3V4MGD9eKLL2rXrl1q2LChevXqpYULFyooKEiNGjXSpk2b9N///rfApcnNmzeXl5eXpk+frvT0dDmdTnXu3FlhYWGaP3++7r//frVo0UJ33323qlWrpkOHDunTTz9V+/btC/yjeT4fHx/99a9/1X//+1/985//LDA+f5/YtGlTgef75F/yu3nzZvXu3bvIo0aX8/kNGjRIEyZM0PTp09W3b1+rfdu2bfrtt9/Up0+fYuevWLGipk+frsGDBysmJkYDBgywLr+Ojo7WmDFjip2/NIo6tXO+3r17q1OnTnryySd14MABNWvWTKtWrdJHH32k0aNHW0fYmjdvrgEDBujFF19Uenq62rVrpzVr1mjv3r0Fljlt2jStXbtWbdq00dChQ9WoUSP99ttv2r59u/773//qt99+K7SWPXv26Oabb1b//v3VqFEjeXt7a8mSJTp27JjH3d0b5cRt10sBpTR+/HgjybRr167AuA8//NBIMgEBAeb33393GXfu3DkzefJkU6tWLVOxYkUTGRlpxo8fb86ePesy3YWXmJ7v4MGD5tZbbzV+fn6matWq5pFHHjErVqy47Muvdd5lpPmXlRZ2GWn+5dfHjx8vMK4stq8w+ZdfF2bfvn3Gy8vLxMXFGWP+uAR28ODBpmrVqqZy5cqma9eu5vvvvzdRUVHWNPleffVVU7t2bePl5VXg/Vu7dq3p2rWrCQoKMj4+PqZOnTpm0KBBZuvWrRet98MPPzQOh8McOnSowLisrCzj7e1tJJlVq1YVGN+0aVMjyUyfPr3AuJJ+fsb8cfn1iBEjCq1v0qRJBbb38ccfNzVr1nS5lL0477zzjrnhhhuM0+k0ISEh5t577zWHDx8utN7SXn5dnML2nczMTDNmzBgTERFhKlasaOrWrWtmzpxZYFvOnDljRo0aZUJDQ42/v7/p3bu3SU1NLXD5tTHGHDt2zIwYMcJERkaaihUrmho1apibb77ZvPLKK9Y0F15+/csvv5gRI0aYBg0aGH9/fxMUFGTatGnjcrsE/Lk5jCnkpC0A2Exubq4aNWqk/v37a8qUKe4u56Kys7MVHR2tJ554Qo888oi7ywFsiz4yAP4UvLy89M9//lPz5s3TqVOn3F3ORSUkJKhixYoF7g0EoHQ4IgMAAGyLIzIAAMC2CDIAAMC2CDIAAMC2CDIAAMC2/vQ3xMvLy9NPP/2kgICAEj+PBgAAuJcxRpmZmYqIiCjyoavSVRBkfvrpJ0VGRrq7DAAAcAlSU1MLPAz2fH/6IJP/mPnU1FQFBga6uRoAAFASGRkZioyMtP4dL8qfPsjkn04KDAwkyAAAYDMX6xZCZ18AAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbbg0yX3zxhXr37q2IiAg5HA4tXbq0yGkffPBBORwOzZ49+4rVBwAAPJtbg0xWVpaaNWumefPmFTvdkiVLtHnzZkVERFyhygAAgB14u3Pl3bt3V/fu3Yud5siRI3r44Ye1cuVK9ezZ8wpVBgAA7MCj+8jk5eXp/vvv12OPPabGjRu7uxwAAOBh3HpE5mKmT58ub29vjRo1qsTzZGdnKzs72xrOyMgoj9IAAPkcDndXAHcyxq2r99gjMtu2bdOcOXOUmJgoRyn+SOLj4xUUFGS9IiMjy7FKAADgTh4bZNavX6+0tDTVrFlT3t7e8vb21sGDBzVu3DhFR0cXOd/48eOVnp5uvVJTU69c0QAA4Iry2FNL999/v7p06eLS1rVrV91///0aPHhwkfM5nU45nc7yLg8AAHgAtwaZU6dOae/evdbw/v37tWPHDoWEhKhmzZoKDQ11mb5ixYqqUaOG6tevf6VLBQAAHsitQWbr1q3q1KmTNTx27FhJUlxcnBITE91UFQAAsAu3BpnY2FiZUvR2PnDgQPkVAwAAbMdjO/sCAABcDEEGAADYFkEGAADYlsdefm0H3MwSbr6hpRyT2Qmvdmaim3dCwM04IgMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGyLIAMAAGzLrUHmiy++UO/evRURESGHw6GlS5da486dO6fHH39cTZo0kb+/vyIiIjRw4ED99NNP7isYAAB4FLcGmaysLDVr1kzz5s0rMO706dPavn27nn76aW3fvl0ffvihdu/erVtvvdUNlQIAAE/k7c6Vd+/eXd27dy90XFBQkFavXu3SNnfuXP3lL3/RoUOHVLNmzStRIgAA8GBuDTKllZ6eLofDoeDg4CKnyc7OVnZ2tjWckZFxBSoDAADuYJvOvmfPntXjjz+uAQMGKDAwsMjp4uPjFRQUZL0iIyOvYJUAAOBKskWQOXfunPr37y9jjObPn1/stOPHj1d6err1Sk1NvUJVAgCAK83jTy3lh5iDBw/q888/L/ZojCQ5nU45nc4rVB0AAHAnjw4y+SHmhx9+0Nq1axUaGurukgAAgAdxa5A5deqU9u7daw3v379fO3bsUEhIiMLDw3XnnXdq+/btWrZsmXJzc3X06FFJUkhIiCpVquSusgEAgIdwa5DZunWrOnXqZA2PHTtWkhQXF6dJkybp448/liQ1b97cZb61a9cqNjb2SpUJAAA8lFuDTGxsrIwxRY4vbhwAAIAtrloCAAAoDEEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYFkEGAADYlluDzBdffKHevXsrIiJCDodDS5cudRlvjNGECRMUHh4uX19fdenSRT/88IN7igUAAB7HrUEmKytLzZo107x58wodP2PGDP3rX//SSy+9pC1btsjf319du3bV2bNnr3ClAADAE3m7c+Xdu3dX9+7dCx1njNHs2bP11FNPqU+fPpKk//znP6pevbqWLl2qu++++0qWCgAAPJDH9pHZv3+/jh49qi5dulhtQUFBatOmjTZt2lTkfNnZ2crIyHB5AQCAPyePDTJHjx6VJFWvXt2lvXr16ta4wsTHxysoKMh6RUZGlmudAADAfTw2yFyq8ePHKz093Xqlpqa6uyQAAFBOPDbI1KhRQ5J07Ngxl/Zjx45Z4wrjdDoVGBjo8gIAAH9OHhtkatWqpRo1amjNmjVWW0ZGhrZs2aK2bdu6sTIAAOAp3HrV0qlTp7R3715reP/+/dqxY4dCQkJUs2ZNjR49Ws8884zq1q2rWrVq6emnn1ZERIT69u3rvqIBAIDHcGuQ2bp1qzp16mQNjx07VpIUFxenxMRE/f3vf1dWVpaGDRumkydPqkOHDlqxYoV8fHzcVTIAAPAgDmOMcXcR5SkjI0NBQUFKT08v8/4yDkeZLg425O6/HsdkdsKrnZnoAV/hfBle3crpi7Ck/357bB8ZAACAiyHIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2/K+1BkPHTqkgwcP6vTp06pWrZoaN24sp9NZlrUBAAAUq1RB5sCBA5o/f77efvttHT58WMYYa1ylSpV00003adiwYbrjjjtUoQIHewAAQPkqcdoYNWqUmjVrpv379+uZZ57Rzp07lZ6erpycHB09elSfffaZOnTooAkTJqhp06ZKSUkpz7oBAABKfkTG399fP/74o0JDQwuMCwsLU+fOndW5c2dNnDhRK1asUGpqqlq3bl2mxQIAAJyvxEEmPj6+xAvt1q3bJRUDAABQGpfc2TffL7/8oi1btig3N1etW7dWeHh4WdQFAABwUZcVZD744AMNGTJE9erV07lz57R7927NmzdPgwcPLqv6AAAAilSqS4tOnTrlMjx58mR9+eWX+vLLL/XVV1/pvffe05NPPlmmBQIAABSlVEGmZcuW+uijj6xhb29vpaWlWcPHjh1TpUqVyq46AACAYpTq1NLKlSs1YsQIJSYmat68eZozZ47uuusu5ebm6vfff1eFChWUmJhYTqUCAAC4KlWQiY6O1qeffqrFixcrJiZGo0aN0t69e7V3717l5uaqQYMG8vHxKa9aAQAAXFzS7XcHDBiglJQUff3114qNjVVeXp6aN29OiAEAAFdUqYPMZ599plmzZmnr1q1asGCBZsyYoXvvvVePPfaYzpw5U6bF5ebm6umnn1atWrXk6+urOnXqaMqUKS6PRgAAAFevUgWZcePGafDgwUpJSdHw4cM1ZcoUxcTEaPv27fLx8dENN9yg5cuXl1lx06dP1/z58zV37lzt2rVL06dP14wZM/Tvf/+7zNYBAADsy2FKcXgjNDRUq1atUsuWLfXbb7/pxhtv1J49e6zxO3fu1PDhw7V+/foyKa5Xr16qXr26XnvtNavtjjvukK+vr958880SLSMjI0NBQUFKT09XYGBgmdSVz+Eo08XBhtx9cNAxmZ3wamcmesARar4Mr27l9EVY0n+/S3VExt/fX/v375ckpaamFugT06hRozILMZLUrl07rVmzxgpLX3/9tTZs2KDu3buX2ToAAIB9leqqpfj4eA0cOFCjRo3S6dOn9cYbb5RXXZKkJ554QhkZGWrQoIG8vLyUm5urZ599Vvfee2+R82RnZys7O9sazsjIKNcaAQCA+5QqyNx7773q1q2bfvzxR9WtW1fBwcHlVNYf3n33XS1atEhvvfWWGjdurB07dmj06NGKiIhQXFxcofPEx8dr8uTJ5VoXAADwDKXqI3OlRUZG6oknntCIESOstmeeeUZvvvmmvv/++0LnKeyITGRkJH1kUC7c/ddDHxnQRwZuZ5c+Mg8++KAOHz5comnfeecdLVq0qKSLLtLp06dVoYJriV5eXsrLyytyHqfTqcDAQJcXAAD4cyrxqaVq1aqpcePGat++vXr37q1WrVopIiJCPj4+OnHihHbu3KkNGzbo7bffVkREhF555ZXLLq5379569tlnVbNmTTVu3FhfffWVnn/+ef3tb3+77GUDAAD7K9WppWPHjmnBggV6++23tXPnTpdxAQEB6tKlix544AF169atTIrLzMzU008/rSVLligtLU0REREaMGCAJkyYUOKHU3L5NcoTp5bgbpxagtu5+dTSJfeROXHihA4dOqQzZ86oatWqqlOnjhweuDMTZFCeCDJwN4IM3M7NQaZUVy2dr0qVKqpSpcqlzg4AAHDZLumhkQAAAJ6AIAMAAGyLIAMAAGyLIAMAAGyrTIPM2bNn9dxzz5XlIgEAAIpU6iBz/PhxLVu2TKtWrVJubq4k6dy5c5ozZ46io6M1bdq0Mi8SAACgMKW6/HrDhg3q1auXMjIy5HA41KpVKyUkJKhv377y9vbWpEmTinyYIwAAQFkr1RGZp556Sj169ND//u//auzYsUpJSdFtt92mqVOnaufOnXrwwQfl6+tbXrUCAAC4KNWdfUNDQ7V+/Xo1atRIZ86cUeXKlfXhhx+qT58+5VnjZeHOvihP3NkX7sadfeF2dnn6tfTHYwmqVq0qSfL19ZWfn5+uv/76y6sUAADgEpX6EQU7d+7U0aNHJUnGGO3evVtZWVku0zRt2rRsqgMAAChGqYPMzTffrPPPRvXq1UuS5HA4ZIyRw+GwrmYCAAAoT6UKMvv37y+vOgAAAEqtVEEmKiqqvOoAAAAotVJ19p0xY4bOnDljDScnJys7O9sazszM1EMPPVR21QEAABSjVEFm/PjxyszMtIa7d++uI0eOWMOnT5/Wyy+/XHbVAQAAFKNUQebCW86U4hY0AAAAZY6nXwMAANsiyAAAANsq9X1kFixYoMqVK0uSfv/9dyUmJlp3+z2//wwAAEB5K9WzlqKjo+UowTM1POl+MzxrCeXJ3d3EeNYSeNYS3M7Nz1oq1RGZAwcOXG5dAAAAZaZUfWQ86UgLAABAqYJMnTp1VKtWLf3tb3/TwoULdfjw4fKqCwAA4KJKdWrp888/V1JSkpKSkrR48WLl5OSodu3a6ty5szp16qROnTqpevXq5VUrAACAi1IFmdjYWMXGxkqSzp49q40bN1rB5o033tC5c+fUoEEDfffdd+VRKwAAgItSX36dz8fHR507d1aHDh3UqVMnLV++XC+//LK+//77sqwPAACgSKUOMjk5Odq8ebPWrl2rpKQkbdmyRZGRkerYsaPmzp2rmJiY8qgTAACggFIFmc6dO2vLli2qVauWYmJiNHz4cL311lsKDw8vr/oAAACKVKogs379eoWHh6tz586KjY1VTEyMQkNDy6s2AACAYpXq8uuTJ0/qlVdekZ+fn6ZPn66IiAg1adJEI0eO1Pvvv6/jx4+XV50AAAAFlOoRBRfKzMzUhg0brP4yX3/9terWratvv/22LGu8LDyiAOWJRxTA3XhEAdzOzY8ouKynX/v7+yskJEQhISGqUqWKvL29tWvXrstZJAAAQImVqo9MXl6etm7dqqSkJK1du1bJycnKysrSNddco06dOmnevHnq1KlTedUKAADgolRBJjg4WFlZWapRo4Y6deqkF154QbGxsapTp0551QcAAFCkUgWZmTNnqlOnTqpXr1551QMAAFBipQoyw4cPL686AAAASu2yOvsCAAC4k8cHmSNHjui+++5TaGiofH191aRJE23dutXdZQEAAA9wyQ+NvBJOnDih9u3bWw+lrFatmn744QdVqVLF3aUBAAAP4NFBZvr06YqMjFRCQoLVVqtWLTdWBAAAPIlHn1r6+OOP1apVK/Xr109hYWG64YYb9OqrrxY7T3Z2tjIyMlxeAADgz8mjg8yPP/6o+fPnq27dulq5cqX+53/+R6NGjdIbb7xR5Dzx8fEKCgqyXpGRkVewYgAAcCVd1rOWylulSpXUqlUrbdy40WobNWqUUlJStGnTpkLnyc7OVnZ2tjWckZGhyMhInrWEcuHuvx6etQSetQS3s/OzlspbeHi4GjVq5NLWsGFDHTp0qMh5nE6nAgMDXV4AAODPyaODTPv27bV7926Xtj179igqKspNFQEAAE/i0UFmzJgx2rx5s6ZOnaq9e/fqrbfe0iuvvKIRI0a4uzQAAOABPDrItG7dWkuWLNHixYt1/fXXa8qUKZo9e7buvfded5cGAAA8gEffR0aSevXqpV69erm7DAAA4IE8+ogMAABAcQgyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtggyAADAtmwVZKZNmyaHw6HRo0e7uxQAAOABbBNkUlJS9PLLL6tp06buLgUAAHgIWwSZU6dO6d5779Wrr76qKlWquLscAADgIWwRZEaMGKGePXuqS5cuF502OztbGRkZLi8AAPDn5O3uAi7m7bff1vbt25WSklKi6ePj4zV58uRyrgoAAHgCjz4ik5qaqkceeUSLFi2Sj49PieYZP3680tPTrVdqamo5VwkAANzFo4/IbNu2TWlpaWrRooXVlpubqy+++EJz585Vdna2vLy8XOZxOp1yOp1XulQAAOAGHh1kbr75Zn3zzTcubYMHD1aDBg30+OOPFwgxAADg6uLRQSYgIEDXX3+9S5u/v79CQ0MLtAMAgKuPR/eRAQAAKI5HH5EpTFJSkrtLAAAAHoIjMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLY8OsjEx8erdevWCggIUFhYmPr27avdu3e7uywAAOAhPDrIrFu3TiNGjNDmzZu1evVqnTt3Tn/961+VlZXl7tIAAIAH8HZ3AcVZsWKFy3BiYqLCwsK0bds2dezY0U1VAQAAT+HRQeZC6enpkqSQkJAip8nOzlZ2drY1nJGRUe51AQAA9/DoU0vny8vL0+jRo9W+fXtdf/31RU4XHx+voKAg6xUZGXkFqwQAAFeSbYLMiBEj9O233+rtt98udrrx48crPT3deqWmpl6hCgEAwJVmi1NLI0eO1LJly/TFF1/o2muvLXZap9Mpp9N5hSoDAADu5NFBxhijhx9+WEuWLFFSUpJq1arl7pIAAIAH8eggM2LECL311lv66KOPFBAQoKNHj0qSgoKC5Ovr6+bqAACAu3l0H5n58+crPT1dsbGxCg8Pt17vvPOOu0sDAAAewKOPyBhj3F0CAADwYB59RAYAAKA4BBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbBBkAAGBbtggy8+bNU3R0tHx8fNSmTRt9+eWX7i4JAAB4AI8PMu+8847Gjh2riRMnavv27WrWrJm6du2qtLQ0d5cGAADczOODzPPPP6+hQ4dq8ODBatSokV566SX5+fnp9ddfd3dpAADAzTw6yOTk5Gjbtm3q0qWL1VahQgV16dJFmzZtcmNlAADAE3i7u4Di/PLLL8rNzVX16tVd2qtXr67vv/++0Hmys7OVnZ1tDaenp0uSMjIyyq9QXLXcvluddfP64XZ8t8HtymkfzN+3jTHFTufRQeZSxMfHa/LkyQXaIyMj3VAN/uyCgtxdAa52QdPYCeFm5fxFmJmZqaBi1uHRQaZq1ary8vLSsWPHXNqPHTumGjVqFDrP+PHjNXbsWGs4Ly9Pv/32m0JDQ+VwOMq13qtNRkaGIiMjlZqaqsDAQHeXg6sQ+yDcjX2w/BhjlJmZqYiIiGKn8+ggU6lSJbVs2VJr1qxR3759Jf0RTNasWaORI0cWOo/T6ZTT6XRpCw4OLudKr26BgYH8AcOt2AfhbuyD5aO4IzH5PDrISNLYsWMVFxenVq1a6S9/+Ytmz56trKwsDR482N2lAQAAN/P4IHPXXXfp+PHjmjBhgo4eParmzZtrxYoVBToAAwCAq4/HBxlJGjlyZJGnkuA+TqdTEydOLHAqD7hS2AfhbuyD7ucwF7uuCQAAwEN59A3xAAAAikOQAQAAtkWQAQAAtkWQ8RCJiYkXvd/NoEGDrPvpXK0mTZqk5s2bW8O8J57BzvvvhfsUcDEl2Zejo6M1e/bsK1JPacTGxmr06NHuLqNM2eKqJfxhzpw5F33mxNWG98Q++KxQHmJjY9W8eXOPCw0pKSny9/d3dxlXBYKMjZTkDoflyRij3NxceXt7zm7j7vcEJefuz6o8999z586pYsWKZb5c2Fe1atXcuv6cnBxVqlSpzJebm5srh8OhChU854SO51TyJ7Rs2TIFBwcrNzdXkrRjxw45HA498cQT1jQPPPCA7rvvPmt45cqVatiwoSpXrqxu3brp559/tsZdeDjz/fffV5MmTeTr66vQ0FB16dJFWVlZLtNOnjxZ1apVU2BgoB588EHl5ORY8+fl5Sk+Pl61atWSr6+vmjVrpvfff98an5SUJIfDoeXLl6tly5ZyOp3asGFDge3MPzT/+uuvq2bNmqpcubIeeugh5ebmasaMGapRo4bCwsL07LPPusx38uRJPfDAA1Z9nTt31tdff+0yzbRp01S9enUFBARoyJAhOnvW9XHPF74nhR3Obd68uSZNmmQNOxwOvfzyy+rVq5f8/PzUsGFDbdq0SXv37lVsbKz8/f3Vrl077du3r8C2Xk2ulv33Qnl5efrnP/+pa6+9Vk6n07oJZ74DBw7I4XDonXfeUUxMjHx8fLRo0SL9+uuvGjBggK655hr5+fmpSZMmWrx4cSnecXvJysrSwIEDVblyZYWHh2vWrFkupy0cDoeWLl3qMk9wcLASExOt4ccff1z16tWTn5+fateuraefflrnzp2zxud/tyxcuFDR0dEKCgrS3XffrczMTEl/7Cfr1q3TnDlz5HA45HA4dODAAQ0aNMgaPv+VlJRU6LY8+uij6tWrlzU8e/ZsORwOl8/9uuuu04IFC1zme+655xQeHq7Q0FCNGDHCpfbzv4uMMZo0aZJq1qwpp9OpiIgIjRo1ymXaKVOmaMCAAfL399c111yjefPmuazrYt+X+e/VggULVKtWLfn4+BS6rRc6ceKEBg4cqCpVqsjPz0/du3fXDz/8YI3PP2388ccfq1GjRnI6nTp06JBSUlJ0yy23qGrVqgoKClJMTIy2b99eonWWNYJMObrpppuUmZmpr776SpK0bt06Va1a1eWPad26dYqNjZUknT59Ws8995wWLlyoL774QocOHdKjjz5a6LJ//vlnDRgwQH/729+0a9cuJSUl6fbbb3c5dL9mzRpr3OLFi/Xhhx+6PBk8Pj5e//nPf/TSSy/pu+++05gxY3Tfffdp3bp1Lut64oknNG3aNO3atUtNmzYttJ59+/Zp+fLlWrFihRYvXqzXXntNPXv21OHDh7Vu3TpNnz5dTz31lLZs2WLN069fP6WlpWn58uXatm2bWrRooZtvvlm//fabJOndd9/VpEmTNHXqVG3dulXh4eF68cUXS/4BFGPKlCkaOHCgduzYoQYNGuiee+7R8OHDNX78eG3dulXGmKv+JoxX0/57vjlz5mjWrFl67rnn9L//+7/q2rWrbr31Vpcv9/zlPvLII9q1a5e6du2qs2fPqmXLlvr000/17bffatiwYbr//vv15ZdfXnSddvTYY49p3bp1+uijj7Rq1SolJSWV+h+ygIAAJSYmaufOnZozZ45effVVvfDCCy7T7Nu3T0uXLtWyZcu0bNkyrVu3TtOmTZP0x2fVtm1bDR06VD///LN+/vlnRUZGas6cOdbwzz//rEceeURhYWFq0KBBoXXExMRow4YNVmi/cF8/cuSI9u3bZ+3rkrR27Vrt27dPa9eu1RtvvKHExESXkHa+Dz74QC+88IJefvll/fDDD1q6dKmaNGniMs3MmTPVrFkzffXVV9a+tXr1amv8xb4vJWnv3r364IMP9OGHH2rHjh0l+Qg0aNAgbd26VR9//LE2bdokY4x69OjhEspOnz6t6dOna8GCBfruu+8UFhamzMxMxcXFacOGDdq8ebPq1q2rHj16WCHzijIoVy1atDAzZ840xhjTt29f8+yzz5pKlSqZzMxMc/jwYSPJ7NmzxyQkJBhJZu/evda88+bNM9WrV7eG4+LiTJ8+fYwxxmzbts1IMgcOHCh0vXFxcSYkJMRkZWVZbfPnzzeVK1c2ubm55uzZs8bPz89s3LjRZb4hQ4aYAQMGGGOMWbt2rZFkli5dWuw2Tpw40fj5+ZmMjAyrrWvXriY6Otrk5uZabfXr1zfx8fHGGGPWr19vAgMDzdmzZ12WVadOHfPyyy8bY4xp27ateeihh1zGt2nTxjRr1qzQ98QYY6KioswLL7zgMk+zZs3MxIkTrWFJ5qmnnrKGN23aZCSZ1157zWpbvHix8fHxKXa7rwZXy/57/j4VERFhnn32WZdpWrdube2L+/fvN5LM7Nmzi12uMcb07NnTjBs37qLT2U1mZqapVKmSeffdd622X3/91fj6+ppHHnnEGPPH39mSJUtc5gsKCjIJCQlFLnfmzJmmZcuW1nBh3y2PPfaYadOmjTUcExNjrbMwH3zwgfHx8TEbNmwocpoTJ06YChUqmJSUFJOXl2dCQkJMfHy8tZ4333zTXHPNNdb0cXFxJioqyvz+++9WW79+/cxdd91lDZ//XTRr1ixTr149k5OTU+j6o6KiTLdu3Vza7rrrLtO9e3djTMm+LydOnGgqVqxo0tLSitxOY1zfrz179hhJJjk52Rr/yy+/GF9fX+uzzf/b3rFjR7HLzc3NNQEBAeaTTz4pdrrywBGZchYTE6OkpCQZY7R+/XrdfvvtatiwoTZs2KB169YpIiJCdevWlST5+fmpTp061rzh4eFKS0srdLnNmjXTzTffrCZNmqhfv3569dVXdeLEiQLT+Pn5WcNt27bVqVOnlJqaqr179+r06dO65ZZbVLlyZev1n//8p8AplVatWln/f/60Dz74oNUeHR2tgIAAa7h69epq1KiRy3nU6tWrW9vz9ddf69SpUwoNDXVZ5v79+63179q1S23atHGppW3btsW82yV3/i/z/Od2nf8LqXr16jp79qwyMjLKZH12dbXsv/kyMjL0008/qX379i7t7du3165du4pcrvRH34EpU6aoSZMmCgkJUeXKlbVy5UodOnSo0PfAzvbt26ecnByXv8+QkBDVr1+/VMt555131L59e9WoUUOVK1fWU089VeD9uvC7pbj96kJfffWV7r//fs2dO9f6TKdOneqyHxw6dEjBwcFq1qyZkpKS9M0336hSpUoaNmyYvvrqK506dUrr1q1TTEyMy7IbN24sLy+vEtXVr18/nTlzRrVr19bQoUO1ZMkS/f777y7TXPjd1rZtW2ufK8n3pSRFRUVZfXPWr1/vMu2iRYsK1LVr1y55e3u7fI6hoaGqX7++y/5eqVKlAkczjx07pqFDh6pu3boKCgpSYGCgTp065Zb93XN6bf5JxcbG6vXXX9fXX3+tihUrqkGDBoqNjVVSUpJOnDjh8sdxYWdBh8NR5FUeXl5eWr16tTZu3KhVq1bp3//+t5588klt2bJFtWrVumhdp06dkiR9+umnuuaaa1zGXfjMkPN73p9/uPL8R9YXVnthbXl5edb6w8PDCz1nfbHLeItToUKFAu/Z+YdIC6vX4XAU2ZZf79Xqatl/L8WFV6TMnDlTc+bM0ezZs9WkSRP5+/tr9OjRLv16riaFff7n/y1u2rRJ9957ryZPnqyuXbsqKChIb7/9tmbNmuUyT3HfI8U5evSobr31Vj3wwAMaMmSI1f7ggw+qf//+1nBERIQkWfu10+lUTEyMQkJCXEL7uHHjLrmuyMhI7d69W//973+1evVqPfTQQ5o5c6bWrVtXok7iJf2+PH+fbNWqlcv+fjkPWvb19bW+E/PFxcXp119/1Zw5cxQVFSWn06m2bdu6ZX8nyJSz/H4GL7zwgvWlHxsbq2nTpunEiRMF/jhKw+FwqH379mrfvr0mTJigqKgoLVmyRGPHjpX0R4o/c+aMfH19JUmbN29W5cqVFRkZqZCQEKvT1oW/NIpz3XXXXXK952vRooWOHj0qb29vRUdHFzpNw4YNtWXLFg0cONBq27x5c7HLrVatmksH04yMDO3fv79Mar4aXW37b2BgoCIiIpScnOyy3OTkZP3lL38pdt7k5GT16dPH6vycl5enPXv2qFGjRiWuzy7q1KmjihUrasuWLapZs6akPzqN7tmzx3rfLvxb/OGHH3T69GlreOPGjYqKitKTTz5ptR08eLDUtVSqVMnq25Lv7Nmz6tOnjxo0aKDnn3/eZVxISIhCQkIKLCcmJkavv/66vL291a1bN0l/7OuLFy/Wnj17XPrHXApfX1/17t1bvXv31ogRI9SgQQN98803atGihaSC322bN29Ww4YNJZXs+7Kw9V1sf2/YsKF+//13bdmyRe3atZMk/frrr9q9e/dF99vk5GS9+OKL6tGjhyQpNTVVv/zyS4lqK2sEmXJWpUoVNW3aVIsWLdLcuXMlSR07dlT//v117ty5Un0Jn2/Lli1as2aN/vrXvyosLExbtmzR8ePHrR1f+uPyuyFDhuipp57SgQMHNHHiRI0cOVIVKlRQQECAHn30UY0ZM0Z5eXnq0KGD0tPTlZycrMDAQMXFxZXJ9helS5cuatu2rfr27asZM2aoXr16+umnn/Tpp5/qtttuU6tWrfTII49o0KBBatWqldq3b69Fixbpu+++U+3atYtcbufOnZWYmKjevXsrODhYEyZMcDn8i9K5Gvffxx57TBMnTlSdOnXUvHlzJSQkaMeOHYUemj9f3bp19f7772vjxo2qUqWKnn/+eR07duxPGWQqV66sIUOG6LHHHlNoaKjCwsL05JNPupxK7ty5s+bOnau2bdsqNzdXjz/+uMvRh7p16+rQoUN6++231bp1a3366adasmRJqWuJjo7Wli1bdODAAVWuXFkhISEaPny4UlNTtWbNGh0/ftyaNiQkpMhLkjt27KjMzEwtW7bM6kwcGxurO++8U+Hh4apXr16pa8uXmJio3NxctWnTRn5+fnrzzTfl6+urqKgoa5rk5GTNmDFDffv21erVq/Xee+/p008/lVSy78tLUbduXfXp00dDhw7Vyy+/rICAAD3xxBO65ppr1KdPn4vOu3DhQrVq1UoZGRl67LHHrB8dVxpB5gqIiYnRjh07rEQfEhKiRo0a6dixY6U+p5wvMDBQX3zxhWbPnq2MjAxFRUVp1qxZ6t69uzXNzTffrLp166pjx47Kzs7WgAEDXC5DnjJliqpVq6b4+Hj9+OOPCg4OVosWLfSPf/zjcja3RBwOhz777DM9+eSTGjx4sI4fP64aNWqoY8eO1iHQu+66S/v27dPf//53nT17VnfccYf+53/+RytXrixyuePHj9f+/fvVq1cvBQUFacqUKRyRuUxX2/47atQopaena9y4cUpLS1OjRo308ccfW32BivLUU0/pxx9/VNeuXeXn56dhw4apb9++Sk9Pv6x6PNXMmTN16tQp9e7dWwEBARo3bpzLts6aNUuDBw/WTTfdpIiICM2ZM0fbtm2zxt96660aM2aMRo4cqezsbPXs2VNPP/20y2dcEo8++qji4uLUqFEjnTlzRvv379e6dev0888/FwiRa9euLfLISpUqVdSkSRMdO3bMurqpY8eOysvLu+TAni84OFjTpk3T2LFjlZubqyZNmuiTTz5RaGioNc24ceO0detWTZ48WYGBgXr++efVtWtXSSX7vrxUCQkJeuSRR9SrVy/l5OSoY8eO+uyzzy56yuu1117TsGHD1KJFC0VGRmrq1KlFXqVY3hymqJPYsLVBgwbp5MmTBe7jANgB+689eepddj1ddHS0Ro8e/ad7dMCVwlVLAADAtggyAADAtji1BAAAbIsjMgAAwLYIMgAAwLYIMgAAwLYIMgAAwLYIMgA8RmxsLPfSAFAqBBkAV9SgQYPkcDgKvPbu3evu0gDYEI8oAHDFdevWTQkJCS5t1apVuyLrzsnJKfJ5OwDshyMyAK44p9OpGjVquLwKe7hndna2Hn30UV1zzTXy9/dXmzZtlJSUZI2fNGmSmjdv7jLP7NmzXZ4QPGjQIPXt21fPPvusIiIirOdDvfjii6pbt658fHxUvXp13XnnneWxqQDKGUdkAHiskSNHaufOnXr77bcVERGhJUuWqFu3bvrmm28u+hDH861Zs0aBgYFavXq1JGnr1q0aNWqUFi5cqHbt2um3337T+vXry2szAJQjggyAK27ZsmWqXLmyNdy9e3e99957LtMcOnRICQkJOnTokCIiIiT98aTjFStWKCEhQVOnTi3x+vz9/bVgwQLrlNKHH34of39/9erVSwEBAYqKitINN9xQBlsG4EojyAC44jp16qT58+dbw/7+/gWm+eabb5Sbm6t69eq5tGdnZys0NLRU62vSpIlLv5hbbrlFUVFRql27trp166Zu3brptttuk5+fXym3BIC7EWQAXHH+/v667rrrip3m1KlT8vLy0rZt2wr0n8k/mlOhQgVd+Li4c+fOFbq+8wUEBGj79u1KSkrSqlWrNGHCBE2aNEkpKSkKDg6+hC0C4C4EGQAe6YYbblBubq7S0tJ00003FTpNtWrVdPToURlj5HA4JEk7duwo0fK9vb3VpUsXdenSRRMnTlRwcLA+//xz3X777WW1CQCuAIIMAI9Ur1493XvvvRo4cKBmzZqlG264QcePH9eaNWvUtGlT9ezZU7GxsTp+/LhmzJihO++8UytWrNDy5csVGBhY7LKXLVumH3/8UR07dlSVKlX02WefKS8vz7qiCYB9cPk1AI+VkJCggQMHaty4capfv7769u2rlJQU1axZU5LUsGFDvfjii5o3b56aNWumL7/8Uo8++uhFlxscHKwPP/xQnTt3VsOGDfXSSy9p8eLFaty4cXlvEoAy5jAXnmAGAACwCY7IAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2/p/v43HPpbBlfAAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "\n", + "model_names = [\"whisper-medium\", \"whisper-lora\",\"quantz-whisper-lora\"]\n", + "wer_values = [13.639508070714834,13.624135280553421,14.044640098856966]\n", + "wer_values = [round(value, 2) for value in wer_values]\n", + "\n", + "bar_colors = ['blue', 'green',\"red\"]\n", + "# Create a bar chart\n", + "plt.bar(model_names, wer_values,color=bar_colors)\n", + "plt.xlabel(\"Fleurs\")\n", + "plt.ylabel(\"WER (%)\")\n", + "plt.title(\"Word Error Rate (WER) of Models\")\n", + "plt.ylim(0, 15) # Set y-axis range to 0-100\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Vin100h" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## medium" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DatasetDict({\n", + " train: Dataset({\n", + " features: ['input_features', 'input_length', 'attention_mask', 'labels'],\n", + " num_rows: 44927\n", + " })\n", + " test: Dataset({\n", + " features: ['input_features', 'input_length', 'attention_mask', 'labels'],\n", + " num_rows: 11245\n", + " })\n", + "})\n" + ] + } + ], + "source": [ + " # Load Dataset\n", + "from datasets import load_dataset, DatasetDict, load_from_disk\n", + "processed_dataset = DatasetDict()\n", + "processed_dataset = load_from_disk(\"./vin_clean\")\n", + "\n", + "\n", + "print(processed_dataset)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/models/auto/configuration_auto.py:992: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers.\n", + " warnings.warn(\n", + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/models/auto/feature_extraction_auto.py:322: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers.\n", + " warnings.warn(\n", + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/models/auto/tokenization_auto.py:628: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers.\n", + " warnings.warn(\n" + ] + } + ], + "source": [ + "\n", + "\n", + "import datasets\n", + "from datasets import DatasetDict, load_dataset, concatenate_datasets\n", + "from tqdm import tqdm\n", + "from transformers import (\n", + " AutoConfig,\n", + " AutoFeatureExtractor,\n", + " AutoModelForSpeechSeq2Seq,\n", + " AutoTokenizer,\n", + " set_seed,\n", + ")\n", + "from tqdm import tqdm\n", + "\n", + "\n", + "\n", + "from datasets import Dataset, DatasetDict\n", + "import torchaudio\n", + "from torchaudio import transforms as at\n", + "import pandas as pd\n", + "import torch\n", + "from pathlib import Path\n", + "\n", + "set_seed(42)\n", + "\n", + "\n", + "config = AutoConfig.from_pretrained(\n", + " \"openai/whisper-medium\", revision=\"main\", use_auth_token=True\n", + " )\n", + "\n", + "config.update({\"forced_decoder_ids\": None, \"suppress_tokens\": None})\n", + "\n", + "\n", + "\n", + "feature_extractor = AutoFeatureExtractor.from_pretrained(\n", + " \"openai/whisper-medium\",\n", + " revision=\"main\",\n", + " use_auth_token=True,\n", + " )\n", + "tokenizer = AutoTokenizer.from_pretrained(\n", + " \"openai/whisper-medium\",\n", + " use_fast=True,\n", + " revision=\"main\",\n", + " use_auth_token=True,\n", + " )\n", + "\n", + "tokenizer.set_prefix_tokens(language=\"vi\", task=\"transcribe\")\n", + "\n", + "audio_column_name = \"audio\"\n", + "text_column_name = \"transcription\"\n", + "model_input_name = feature_extractor.model_input_names[0]\n", + "\n", + "\n", + "forward_attention_mask = True\n", + "\n", + "\n", + "\n", + "def prepare_dataset(batch):\n", + " # load and resample audio data from 48 to 16kHz\n", + " audio = batch[\"audio\"]\n", + "\n", + " # compute log-Mel input features from input audio array \n", + " batch[\"input_features\"] = feature_extractor(audio[\"array\"], sampling_rate=audio[\"sampling_rate\"]).input_features[0]\n", + "\n", + " # encode target text to label ids \n", + " batch[\"labels\"] = tokenizer(batch[text_column_name]).input_ids\n", + " return batch\n", + "\n", + "\n", + "clean_data = processed_dataset['test']\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "from dataclasses import dataclass\n", + "from typing import Any, Dict, List, Union\n", + "\n", + "from transformers import WhisperProcessor\n", + "\n", + "processor = WhisperProcessor.from_pretrained(\"openai/whisper-medium\", language=\"vi\", task=\"transcribe\")\n", + "@dataclass\n", + "class DataCollatorSpeechSeq2SeqWithPadding:\n", + " processor: Any\n", + "\n", + " def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n", + " # split inputs and labels since they have to be of different lengths and need different padding methods\n", + " # first treat the audio inputs by simply returning torch tensors\n", + " input_features = [{\"input_features\": feature[\"input_features\"]} for feature in features]\n", + " batch = self.processor.feature_extractor.pad(input_features, return_tensors=\"pt\")\n", + "\n", + " # get the tokenized label sequences\n", + " label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features]\n", + "\n", + " # pad the labels to max length\n", + " labels_batch = self.processor.tokenizer.pad(label_features, return_tensors=\"pt\")\n", + "\n", + " # replace padding with -100 to ignore loss correctly\n", + " labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100)\n", + "\n", + " # if bos token is appended in previous tokenization step,\n", + " # cut bos token here as it's append later anyways\n", + " if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item():\n", + " labels = labels[:, 1:]\n", + "\n", + " batch[\"labels\"] = labels\n", + "\n", + " return batch\n", + "data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor)" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Dataset({\n", + " features: ['input_features', 'input_length', 'attention_mask', 'labels'],\n", + " num_rows: 11245\n", + "})" + ] + }, + "execution_count": 39, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "clean_data" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "metadata": {}, + "outputs": [], + "source": [ + "torch.cuda.empty_cache()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "from transformers import WhisperForConditionalGeneration\n", + "\n", + "\n", + "model = WhisperForConditionalGeneration.from_pretrained(\n", + " 'openai/whisper-medium', device_map=\"auto\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 703/703 [1:42:04<00:00, 8.71s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "wer=35.96574964967958 and normalized_wer=22.798881834138733\n", + "{'eval/wer': 35.96574964967958, 'eval/normalized_wer': 22.798881834138733}\n" + ] + } + ], + "source": [ + "import gc\n", + "import numpy as np\n", + "from tqdm import tqdm\n", + "from torch.utils.data import DataLoader\n", + "from transformers.models.whisper.english_normalizer import BasicTextNormalizer\n", + "\n", + "eval_dataloader = DataLoader(clean_data, batch_size=16, collate_fn=data_collator)\n", + "forced_decoder_ids = processor.get_decoder_prompt_ids(language=\"vi\", task='transcribe')\n", + "normalizer = BasicTextNormalizer()\n", + "\n", + "predictions = []\n", + "references = []\n", + "normalized_predictions = []\n", + "normalized_references = []\n", + "import evaluate\n", + "metric = evaluate.load(\"wer\")\n", + "model.eval()\n", + "for step, batch in enumerate(tqdm(eval_dataloader)):\n", + " with torch.cuda.amp.autocast():\n", + " with torch.no_grad():\n", + " generated_tokens = (\n", + " model.generate(\n", + " input_features=batch[\"input_features\"].to(\"cuda\"),\n", + " forced_decoder_ids=forced_decoder_ids,\n", + " max_new_tokens=255,\n", + " )\n", + " .cpu()\n", + " .numpy()\n", + " )\n", + " labels = batch[\"labels\"].cpu().numpy()\n", + " labels = np.where(labels != -100, labels, processor.tokenizer.pad_token_id)\n", + " decoded_preds = processor.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)\n", + " decoded_labels = processor.tokenizer.batch_decode(labels, skip_special_tokens=True)\n", + " predictions.extend(decoded_preds)\n", + " references.extend(decoded_labels)\n", + " normalized_predictions.extend([normalizer(pred).strip() for pred in decoded_preds])\n", + " normalized_references.extend([normalizer(label).strip() for label in decoded_labels])\n", + " del generated_tokens, labels, batch\n", + " gc.collect()\n", + "wer = 100 * metric.compute(predictions=predictions, references=references)\n", + "normalized_wer = 100 * metric.compute(predictions=normalized_predictions, references=normalized_references)\n", + "eval_metrics = {\"eval/wer\": wer, \"eval/normalized_wer\": normalized_wer}\n", + "\n", + "print(f\"{wer=} and {normalized_wer=}\")\n", + "print(eval_metrics)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Lora" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from transformers import (\n", + " AutomaticSpeechRecognitionPipeline,\n", + " WhisperForConditionalGeneration,\n", + " WhisperTokenizer,\n", + " WhisperProcessor,\n", + ")\n", + "from peft import PeftModel, PeftConfig\n", + "\n", + "\n", + "peft_model_id = \"DuyTa/vi-whisper-medium-Lora\"\n", + "language = \"Vietnamese\"\n", + "task = \"transcribe\"\n", + "\n", + "peft_config = PeftConfig.from_pretrained(peft_model_id)\n", + "model = WhisperForConditionalGeneration.from_pretrained(\n", + " peft_config.base_model_name_or_path\n", + ")\n", + "model = PeftModel.from_pretrained(model, peft_model_id)\n", + "model.to(\"cuda:0\").half()\n", + "\n", + "processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 703/703 [2:13:50<00:00, 11.42s/it] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "wer=35.96574964967958 and normalized_wer=22.798881834138733\n", + "{'eval/wer': 35.95314171676819, 'eval/normalized_wer': 22.793825528564515}\n" + ] + } + ], + "source": [ + "import gc\n", + "import numpy as np\n", + "from tqdm import tqdm\n", + "from torch.utils.data import DataLoader\n", + "from transformers.models.whisper.english_normalizer import BasicTextNormalizer\n", + "\n", + "eval_dataloader = DataLoader(clean_data, batch_size=16, collate_fn=data_collator)\n", + "forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task='transcribe')\n", + "normalizer = BasicTextNormalizer()\n", + "\n", + "predictions = []\n", + "references = []\n", + "normalized_predictions = []\n", + "normalized_references = []\n", + "import evaluate\n", + "metric = evaluate.load(\"wer\")\n", + "model.eval()\n", + "for step, batch in enumerate(tqdm(eval_dataloader)):\n", + " with torch.cuda.amp.autocast():\n", + " with torch.no_grad():\n", + " generated_tokens = (\n", + " model.generate(\n", + " input_features=batch[\"input_features\"].to(\"cuda\"),\n", + " forced_decoder_ids=forced_decoder_ids,\n", + " max_new_tokens=255,\n", + " )\n", + " .cpu()\n", + " .numpy()\n", + " )\n", + " labels = batch[\"labels\"].cpu().numpy()\n", + " labels = np.where(labels != -100, labels, processor.tokenizer.pad_token_id)\n", + " decoded_preds = processor.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)\n", + " decoded_labels = processor.tokenizer.batch_decode(labels, skip_special_tokens=True)\n", + " predictions.extend(decoded_preds)\n", + " references.extend(decoded_labels)\n", + " normalized_predictions.extend([normalizer(pred).strip() for pred in decoded_preds])\n", + " normalized_references.extend([normalizer(label).strip() for label in decoded_labels])\n", + " del generated_tokens, labels, batch\n", + " gc.collect()\n", + "lora_wer = 100 * metric.compute(predictions=predictions, references=references)\n", + "lora_normalized_wer = 100 * metric.compute(predictions=normalized_predictions, references=normalized_references)\n", + "eval_metrics = {\"eval/wer\": lora_wer, \"eval/normalized_wer\": lora_normalized_wer}\n", + "\n", + "print(f\"{wer=} and {normalized_wer=}\")\n", + "print(eval_metrics)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Quantized model" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "from whisper_quant import WhisperModel\n", + "\n", + "model_size = \"medium\"\n", + "\n", + "# Run on GPU with FP16\n", + "model = WhisperModel(model_size, device=\"cuda\", compute_type=\"float16\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### format dataset for quantized model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "import numpy as np\n", + "import datasets\n", + "from datasets import DatasetDict, load_dataset, concatenate_datasets\n", + "from tqdm import tqdm\n", + "from transformers import (\n", + " AutoConfig,\n", + " AutoFeatureExtractor,\n", + " AutoModelForSpeechSeq2Seq,\n", + " AutoTokenizer,\n", + " set_seed,\n", + ")\n", + "from transformers.utils.versions import require_version\n", + "from transformers.utils import check_min_version\n", + "from tqdm import tqdm\n", + "\n", + "\n", + "\n", + "\n", + "logger = logging.getLogger(__name__)\n", + "from datasets import Dataset, DatasetDict\n", + "import torchaudio\n", + "from torchaudio import transforms as at\n", + "import pandas as pd\n", + "import torch\n", + "from pathlib import Path\n", + "\n", + "def main():\n", + " set_seed(42)\n", + "\n", + "\n", + " max_input_length = 30.0 * 16000\n", + " min_input_length = 0.0 * 16000\n", + " audio_column_name = \"audio\"\n", + " text_column_name = \"text\"\n", + "\n", + "\n", + "\n", + "\n", + "\n", + " def load_wave(wave_path, sample_rate:int=16000) -> np.ndarray:\n", + " waveform, sr = torchaudio.load(wave_path, normalize=True)\n", + " if sample_rate != sr:\n", + " waveform = at.Resample(sr, sample_rate)(waveform)\n", + " return np.asarray(waveform)\n", + " \n", + "\n", + " def get_list_files_vin100h(phase, sample_rate=16000, audio_max_sample_length=480000):\n", + " audio_list = []\n", + " text_list = []\n", + " path_list = []\n", + " if phase == 'train':\n", + " csv_file = 'vin_train.csv'\n", + " else:\n", + " csv_file = 'vin_test.csv'\n", + " df = pd.read_csv(csv_file)\n", + " for index, row in tqdm(df.iterrows()):\n", + " path = row['path']\n", + " new_path = Path(row['path'])\n", + " audio_id = index\n", + " text = row['sentence']\n", + " if new_path.exists():\n", + " audio = load_wave(new_path, sample_rate=sample_rate)[0]\n", + " if len(audio) > audio_max_sample_length or len(audio) < 0:\n", + " print('skip file:', new_path,'with len audio', len(audio))\n", + " del new_path\n", + " continue\n", + " text_list.append(text)\n", + " path_list.append(path) \n", + "\n", + " return path_list, text_list\n", + "\n", + "\n", + "\n", + "\n", + " # Get the testing dataset\n", + " test_audio, test_text = get_list_files_vin100h(phase='test')\n", + " #print(test_audio[0])\n", + " test_dataset = Dataset.from_dict({\"audio\": test_audio, \"text\": test_text})\n", + " vin_100h = DatasetDict({\"test\": test_dataset})\n", + " #print(clean_data)\n", + "\n", + "\n", + " return vin_100h, test_dataset\n", + "\n", + "\n", + "if __name__ == \"__main__\":\n", + " clean_data, test_dataset = main()" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "DatasetDict({\n", + " test: Dataset({\n", + " features: ['audio', 'text'],\n", + " num_rows: 11245\n", + " })\n", + "})" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "clean_data" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "segments, _ = model.transcribe(clean_data['test']['audio'][0], beam_size=1, language='vi', temperature= 0)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Cây cam canh là loại cây ăn quả dễ chồng, dễ chăm sót và cho năng suốt cao nên hiện nay được chồng ở nhiều địa phương trong nước ta.\n", + " Cam canh có đặc điểm phỏ mỏng, ăn rất ngọt.\n" + ] + } + ], + "source": [ + "for segment in segments :\n", + " print(segment.text)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from dataclasses import dataclass\n", + "\n", + "@dataclass\n", + "class DataCollatorSpeechSeq2SeqWithPadding:\n", + "\n", + " def __call__(self, features):\n", + " batch = {\n", + " \"audio\": [feature[\"audio\"] for feature in features],\n", + " \"transcription\": [feature[\"text\"] for feature in features]\n", + " }\n", + " return batch\n", + "data_collator = DataCollatorSpeechSeq2SeqWithPadding()" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 703/703 [1:48:04<00:00, 9.22s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "wer_quantz=21.69356959398854\n" + ] + } + ], + "source": [ + "from torch.utils.data import DataLoader\n", + "import re\n", + "from tqdm import tqdm\n", + "import numpy as np\n", + "import gc\n", + "import evaluate\n", + "metric = evaluate.load(\"wer\")\n", + "from transformers.models.whisper.english_normalizer import BasicTextNormalizer\n", + "normalizer = BasicTextNormalizer()\n", + "eval_dataloader = DataLoader(clean_data['test'], batch_size=16, collate_fn=data_collator)\n", + "for step, batch in enumerate(tqdm(eval_dataloader)):\n", + " with torch.cuda.amp.autocast():\n", + " with torch.no_grad():\n", + " final = []\n", + " labels = batch[\"transcription\"]\n", + " for audio in batch[\"audio\"]:\n", + " \n", + " segments, _ = model.transcribe(audio, beam_size=1, language='vi',temperature=0)\n", + " out = [out.text for out in segments]\n", + " pred = ''.join(out)\n", + " norm_pred = normalizer(pred)\n", + " final.append(norm_pred)\n", + " cleaned_text_list = [re.sub(r'\\s+', ' ', text.strip()) for text in final]\n", + " #print(cleaned_text_list)\n", + " #print(labels)\n", + " metric.add_batch(\n", + " predictions=cleaned_text_list,\n", + " references=labels,\n", + " )\n", + " del labels, batch, final\n", + " gc.collect()\n", + "wer_quantz = 100 * metric.compute()\n", + "print(f\"{wer_quantz=}\")\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "normalized_wer=22.798881834138733\n", + "lora_wer = 22.793825528564515" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjIAAAHHCAYAAACle7JuAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAABD80lEQVR4nO3dfVwU5f7/8fcKuoDcKMhtIahpSCnmzTHUBG9OeJuWadqNaKZ21MqbOud4Mm/ynEit1HMyq1NBedLKSi1Ly0xI8Sa17E6zNE2tRPMGEBUMrt8f/divKzeCgruTr+fjsQ+dmWtnP7M7O7x3Zq4ZmzHGCAAAwIJquLoAAACAC0WQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAS5Qenq6bDab0tPTXV0K/r833nhDgYGBOnHihKtLOa9nn31W9evXV35+fpXPe+XKlWrRooW8vLxks9l0/PjxKn+Ni2Wz2TR16tRKP2/v3r2y2WxKS0ur8ppgTQQZuLU33nhDNptNS5YsKTEtLi5ONptNa9asKTGtfv36ateu3aUo8bzS0tJks9nKfGzcuNHVJZZqyJAhTnXa7XY1adJEkydP1unTpy9ontu3b9fUqVO1d+/eqi1WUmFhoaZMmaL77rtPvr6+kqTY2FjFxcWVaLtkyRLZbDYlJCSUmPbSSy/JZrPpww8/lFS5z+/caf7+/kpISNB7771X4nWGDBmigoICPffcc1X1FkiSjhw5ogEDBsjb21vz5s3TggULVLt27VLbnr1s69atKzHdGKPIyEjZbDb16tWrSusEqoqnqwsAytOhQwdJ0rp163TzzTc7xufk5Ojrr7+Wp6enMjMz1alTJ8e0/fv3a//+/Ro4cOAlr7c8jz76qBo0aFBi/FVXXeWCairGbrfrhRdekCRlZ2dr2bJlmj59unbv3q1XX3210vPbvn27pk2bpsTEREVHR1dpre+++6527typESNGOMZ16NBBL774orKzsxUQEOAYn5mZKU9PT23evFlnzpxRzZo1naZ5eHgoPj7eaf4V/fz+/Oc/a/DgwTLG6Mcff9T8+fPVu3dvrVixQklJSY52Xl5eSk5O1lNPPaX77rtPNpvtot8DSdq8ebNyc3M1ffp0de3atULP8fLy0sKFCx3ft2IZGRk6cOCA7HZ7ldQGVAeCDNxaRESEGjRoUOLX4oYNG2SMUf/+/UtMKx4+d6NcWcYYnT59Wt7e3hc1n2Ldu3dX69atK/Wc3377TUVFRapVq1aJaXl5eWX+0q6Iiiyfp6en7rzzTsfwqFGj1K5dOy1atEhPPfWUQkNDL/j1q1pqaqrat2+vK664wjGuQ4cO+u9//6v169ere/fujvGZmZkaMGCAFi5cqK1bt+r66693TFu3bp2aN28uPz8/p/lX9PNr0qSJ03vWr18/xcbGau7cuU5BRpIGDBigmTNnas2aNercuXOll7k0hw4dkiTVqVOnws/p0aOHFi9erH//+9/y9Py/PwsLFy5Uq1at9Ouvv1ZJbUB14NAS3F6HDh30+eef69SpU45xmZmZuuaaa9S9e3dt3LhRRUVFTtNsNpvat28v6fcwMH36dDVq1Eh2u13R0dH6xz/+UeLchOjoaPXq1UsffPCBWrduLW9vb8du/wMHDqhv376qXbu2QkJCNG7cuCo/t6H42P8TTzyhOXPmOOotPhxjs9m0fft23X777apbt64jqFXF8lWUzWZThw4dZIzRDz/84Bj/448/atSoUbr66qvl7e2toKAg9e/f3+kQUlpamvr37y9J6tSpk+OQxtnnGK1YsUI33HCDateuLT8/P/Xs2VPffPPNees6ffq0Vq5cWWIPRPF7lJmZ6dT2s88+0y233KKGDRs6TTt8+LC+++67iw7BZ2vatKnq1aun3bt3l5jWqlUrBQYGatmyZRWa1+LFi9WqVSt5e3urXr16uvPOO/XTTz85picmJio5OVmS1KZNG9lsNg0ZMuS88x00aJCOHDmiVatWOcYVFBTozTff1O23317qc/Ly8jRhwgRFRkbKbrfr6quv1hNPPCFjjFO7/Px8jRs3TsHBwfLz89NNN92kAwcOlDrPn376SXfffbdCQ0Nlt9t1zTXX6KWXXjpv/QcPHtTQoUN15ZVXym63Kzw8XH369KmWQ5hwP+yRgdvr0KGDFixYoE2bNikxMVHS73+Y2rVrp3bt2ik7O1tff/21mjdv7pgWExOjoKAgSdI999yjl19+WbfeeqsmTJigTZs2KSUlRTt27Chx7s3OnTs1aNAgjRw5UsOHD9fVV1+tU6dOqUuXLtq3b5/uv/9+RUREaMGCBfr4448rtRzZ2dklftnabDZHncVSU1N1+vRpjRgxQna7XYGBgY5p/fv3V+PGjfXYY485/mBc7PJVVvEfh7p16zrGbd68WevXr9fAgQN15ZVXau/evZo/f74SExO1fft2+fj4qGPHjrr//vv173//W//4xz/UtGlTSXL8u2DBAiUnJyspKUkzZszQyZMnNX/+fEeQLe9Q1NatW1VQUKCWLVs6jW/YsKEiIiKc9tpt3rxZBQUFjvUnMzNTEyZMkCStX79eUul78yr6+ZX2vGPHjqlRo0alTm/ZsqVTmCpLWlqahg4dqjZt2iglJUVZWVmaO3euMjMz9fnnn6tOnTp6+OGHdfXVV+v55593HAor63XPFh0drfj4eC1atMix52rFihXKzs7WwIED9e9//9upvTFGN910k9asWaNhw4apRYsW+uCDD/TQQw/pp59+0uzZsx1t77nnHv3vf//T7bffrnbt2unjjz9Wz549S9SQlZWl66+/XjabTWPGjFFwcLBWrFihYcOGKScnR2PHji2z/n79+umbb77Rfffdp+joaB06dEirVq3Svn37qvwQJtyQAdzcN998YySZ6dOnG2OMOXPmjKldu7Z5+eWXjTHGhIaGmnnz5hljjMnJyTEeHh5m+PDhxhhjtm3bZiSZe+65x2meDz74oJFkPv74Y8e4qKgoI8msXLnSqe2cOXOMJPPGG284xuXl5ZmrrrrKSDJr1qwpt/7U1FQjqdSH3W53tNuzZ4+RZPz9/c2hQ4ec5jFlyhQjyQwaNMhpfFUsX1mSk5NN7dq1zeHDh83hw4fNrl27zBNPPGFsNpu59tprTVFRkaPtyZMnSzx/w4YNRpJ55ZVXHOMWL15c6nuWm5tr6tSp4/jcih08eNAEBASUGH+uF154wUgyX331VYlp/fv3N97e3qagoMAYY0xKSopp0KCBMcaYZ555xoSEhDjaFr9vP/30k2NcRT8/Y4yRZIYNG2YOHz5sDh06ZLZs2WK6detmJJlZs2aVWvuIESOMt7d3uctXUFBgQkJCzLXXXmtOnTrlGL98+XIjyUyePLlEvZs3by53nue2ffrpp42fn5/js+zfv7/p1KmTMeb3dadnz56O5y1dutRIMv/85z+d5nfrrbcam81mdu3aZYz5v/Vz1KhRTu1uv/12I8lMmTLFMW7YsGEmPDzc/Prrr05tBw4caAICAhx1FX9PUlNTjTHGHDt2rNz3F398HFqC22vatKmCgoIcv6q/+OIL5eXlOXolFf+qln4/d6awsNDxi/r999+XJI0fP95pnsW/wM/tTdKgQYMS5zG8//77Cg8P16233uoY5+Pj43RSaUXMmzdPq1atcnqsWLGiRLt+/fopODi41Hnce++9JWqTLm75ypOXl6fg4GAFBwfrqquu0oMPPqj27dtr2bJlTiennn2ezZkzZ3TkyBFdddVVqlOnjj777LPzvs6qVat0/PhxDRo0SL/++qvj4eHhobZt25baM+1sR44ckeS8l6hYhw4ddOrUKW3dulXS/+3Nk6T27dvr0KFD+v777x3TGjRooIiIiBLzqejn9+KLLyo4OFghISFq3bq1Vq9erb/+9a8lPqNidevW1alTp3Ty5Mkyl2/Lli06dOiQRo0aJS8vL8f4nj17KiYmptReUZU1YMAAnTp1SsuXL1dubq6WL19e5mGl999/Xx4eHrr//vudxk+YMEHGGMf7Urx+ntvu3L0rxhi99dZb6t27t4wxTutAUlKSsrOzy1yPvL29VatWLaWnp+vYsWMXsuiwOA4twe3ZbDa1a9dOn3zyiYqKipSZmamQkBBHb5F27drp6aeflvR/50IUB5kff/xRNWrUKNGzJCwsTHXq1NGPP/7oNL60Xik//vijrrrqqhK9Sip7WOZPf/pThU4WLa2GsqZVxfKVx8vLS++++66k388Tmjlzpg4dOlTiBOFTp04pJSVFqamp+umnn5zOk8jOzj7v6xQHibJOePX3969Qveac8zMk5/Nk2rZtq/Xr1+uf//ynJOnaa6+Vv7+/MjMzFRkZqa1bt+q2224rdd4V/fz69OmjMWPGqKCgQJs3b9Zjjz2mkydPqkaN0n83FtdcXq+l4s+xtHUuJiam1K7TlRUcHKyuXbtq4cKFOnnypAoLC53C+7n1RERElDghuvgwYXG9xevnuYe3zl2Ow4cP6/jx43r++ef1/PPPl/qaxScxn8tut2vGjBmaMGGCQkNDdf3116tXr14aPHiwwsLCzr/gsDyCDCyhQ4cOevfdd/XVV185/aKWfg8yxcfm161bp4iICDVs2NDp+RXt2lpVPZQuRnk1lDWtupbPw8PD6QTapKQkxcTEaOTIkXrnnXcc4++77z6lpqZq7Nixio+PV0BAgGw2mwYOHOh0InZZitssWLCg1D8+Z/ekKU3xeSrHjh3TlVde6TQtLi5Ofn5+WrdunXr06KGjR4861p8aNWqobdu2WrdunRo1aqSCgoKLPtH3yiuvdLxnPXr0UL169TRmzBh16tRJt9xyS4n2x44dk4+Pj1use7fffruGDx+ugwcPqnv37pXq+XQxij//O++803Gy8rmKz4ErzdixY9W7d28tXbpUH3zwgR555BGlpKTo448/1nXXXVctNcN9cGgJlnD29WQyMzMdPZKk33t+2O12paena9OmTU7ToqKiVFRU5PjFXywrK0vHjx9XVFTUeV87KipKu3fvLvFrf+fOnRezSFWiKpavMsLDwzVu3Di9++67TheCe/PNN5WcnKwnn3xSt956q/785z+rQ4cOJa4oW1bgKv7FHhISoq5du5Z4FJ/kXZaYmBhJ0p49e0pM8/Dw0PXXX6/MzEytW7dO/v7+atasmWN68aHJc/fmVZWRI0eqUaNGmjRpUql7jPbs2ePYk1GW4s+xtHVu586dVfY533zzzapRo4Y2btxY5mGl4np+/vln5ebmOo3/9ttvneotXj/P7bF17nIU92gqLCws9fPv2rWrQkJCyq29UaNGmjBhgj788EN9/fXXKigo0JNPPlnhZYd1EWRgCa1bt5aXl5deffVV/fTTT057ZOx2u1q2bKl58+YpLy/P6Q9Rjx49JElz5sxxmt9TTz0lSaX2njhXjx499PPPP+vNN990jDt58mSZu8AvpapYvsq677775OPjo8cff9wxzsPDo8Qf6f/85z8qLCx0Gld83ZtzA05SUpL8/f312GOP6cyZMyVe8/Dhw+XW1KpVK9WqVUtbtmwpdXqHDh10+PBhpaamqm3btk6Hedq1a6edO3dq2bJlCgoKOm+oqCxPT09NmDBBO3bsKLWb9WeffXbeq1C3bt1aISEhevbZZ5261a9YsUI7duyoss/Z19dX8+fP19SpU9W7d+8y2/Xo0UOFhYWOQ7rFZs+eLZvN5uj5VPzvub2ezl1fPTw81K9fP7311lv6+uuvS7xeeZ//yZMnS1xpulGjRvLz86uW2z/A/XBoCZZQq1YttWnTRmvXrpXdblerVq2cprdr187x6+vsIBMXF6fk5GQ9//zzOn78uBISEvTpp5/q5ZdfVt++fZ2uCFyW4cOH6+mnn9bgwYO1detWhYeHa8GCBfLx8anUMqxYscLxi/Xc2s89FFZRVbF8lRUUFKShQ4fqmWee0Y4dO9S0aVP16tVLCxYsUEBAgGJjY7VhwwZ99NFHJbomt2jRQh4eHpoxY4ays7Nlt9vVuXNnhYSEaP78+brrrrvUsmVLDRw4UMHBwdq3b5/ee+89tW/fvsQfzbN5eXnpxhtv1EcffaRHH320xPTidWLDhg0l7u9T3OV348aN6t27d5l7jS7m8xsyZIgmT56sGTNmqG/fvo7xW7du1dGjR9WnT59yn1+zZk3NmDFDQ4cOVUJCggYNGuTofh0dHa1x48aV+/zKKOvQztl69+6tTp066eGHH9bevXsVFxenDz/8UMuWLdPYsWMde9hatGihQYMG6ZlnnlF2drbatWun1atXa9euXSXm+fjjj2vNmjVq27athg8frtjYWB09elSfffaZPvroIx09erTUWr777jt16dJFAwYMUGxsrDw9PbVkyRJlZWW53dW9UU1c1l8KqKSJEycaSaZdu3Ylpr399ttGkvHz8zO//fab07QzZ86YadOmmQYNGpiaNWuayMhIM3HiRHP69Gmndud2MT3bjz/+aG666Sbj4+Nj6tWrZx544AGzcuXKi+5+rbO6kRZ3Ky2tG2lx9+vDhw+XmFYVy1ea4u7Xpdm9e7fx8PAwycnJxpjfu8AOHTrU1KtXz/j6+pqkpCTz7bffmqioKEebYv/9739Nw4YNjYeHR4n3b82aNSYpKckEBAQYLy8v06hRIzNkyBCzZcuW89b79ttvG5vNZvbt21diWl5envH09DSSzIcfflhievPmzY0kM2PGjBLTKvr5GfN79+vRo0eXWt/UqVNLLO/f/vY3U79+faeu7OV5/fXXzXXXXWfsdrsJDAw0d9xxhzlw4ECp9Va2+3V5Slt3cnNzzbhx40xERISpWbOmady4sZk1a1aJZTl16pS5//77TVBQkKldu7bp3bu32b9/f4nu18YYk5WVZUaPHm0iIyNNzZo1TVhYmOnSpYt5/vnnHW3O7X7966+/mtGjR5uYmBhTu3ZtExAQYNq2bet0uQT8sdmMKeWgLQBYTGFhoWJjYzVgwABNnz7d1eWcV35+vqKjo/X3v/9dDzzwgKvLASyLc2QA/CF4eHjo0Ucf1bx583TixAlXl3NeqampqlmzZolrAwGoHPbIAAAAy2KPDAAAsCyXBpmUlBS1adNGfn5+CgkJUd++fUtcXyAxMdFxl9ziB7tiAQCA5OIgk5GRodGjR2vjxo1atWqVzpw5oxtvvFF5eXlO7YYPH65ffvnF8Zg5c6aLKgYAAO7EpdeRWblypdNwWlqaQkJCtHXrVnXs2NEx3sfHh3tmAACAEtzqgnjFN5cLDAx0Gv/qq6/qf//7n8LCwtS7d2898sgjZV6MLD8/3+lqjkVFRTp69KiCgoIqfD8aAADgWsYY5ebmKiIiosybrkpu1GupqKhIN910k44fP+50J9fnn39eUVFRioiI0Jdffqm//e1v+tOf/qS333671PlMnTpV06ZNu1RlAwCAarR///4SN4M9m9sEmb/85S9asWKF1q1bV27BH3/8sbp06aJdu3aVuDW8VHKPTHZ2turXr6/9+/fL39+/WmoHAABVKycnR5GRkTp+/LgCAgLKbOcWh5bGjBmj5cuX65NPPik3xEhS27ZtJanMIGO322W320uM9/f3J8gAAGAx5zstxKVBxhij++67T0uWLFF6eroaNGhw3uds27ZNkhQeHl7N1QEAAHfn0iAzevRoLVy4UMuWLZOfn58OHjwoSQoICJC3t7d2796thQsXqkePHgoKCtKXX36pcePGqWPHjmrevLkrSwcAAG7ApefIlLW7KDU1VUOGDNH+/ft155136uuvv1ZeXp4iIyN18803a9KkSRU+TJSTk6OAgABlZ2dzaAkAAIuo6N9vlx9aKk9kZKQyMjIuUTUAAMBquNcSAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLE9XF2BlNpurK4CrGePa17dNYyW83JkpLl4JARdjjwwAALAsggwAALAsggwAALAsggwAALAsggwAALAsggwAALAsggwAALAsggwAALAsggwAALAsggwAALAsggwAALAsggwAALAsggwAALAsggwAALAsggwAALAsT1cXAACwOJvN1RXAlYxx6cuzRwYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFgWQQYAAFiWS4NMSkqK2rRpIz8/P4WEhKhv377auXOnU5vTp09r9OjRCgoKkq+vr/r166esrCwXVQwAANyJS4NMRkaGRo8erY0bN2rVqlU6c+aMbrzxRuXl5TnajBs3Tu+++64WL16sjIwM/fzzz7rllltcWDUAAHAXNmOMcXURxQ4fPqyQkBBlZGSoY8eOys7OVnBwsBYuXKhbb71VkvTtt9+qadOm2rBhg66//vrzzjMnJ0cBAQHKzs6Wv79/ldZrs1Xp7GBBrv722KaxEl7uzBQ32ISzMby8VdOGsKJ/v93qHJns7GxJUmBgoCRp69atOnPmjLp27epoExMTo/r162vDhg2lziM/P185OTlODwAA8MfkNkGmqKhIY8eOVfv27XXttddKkg4ePKhatWqpTp06Tm1DQ0N18ODBUueTkpKigIAAxyMyMrK6SwcAAC7iNkFm9OjR+vrrr/Xaa69d1HwmTpyo7Oxsx2P//v1VVCEAAHA3nq4uQJLGjBmj5cuX65NPPtGVV17pGB8WFqaCggIdP37caa9MVlaWwsLCSp2X3W6X3W6v7pIBAIAbcOkeGWOMxowZoyVLlujjjz9WgwYNnKa3atVKNWvW1OrVqx3jdu7cqX379ik+Pv5SlwsAANyMS/fIjB49WgsXLtSyZcvk5+fnOO8lICBA3t7eCggI0LBhwzR+/HgFBgbK399f9913n+Lj4yvUYwkAAPyxuTTIzJ8/X5KUmJjoND41NVVDhgyRJM2ePVs1atRQv379lJ+fr6SkJD3zzDOXuFIAAOCO3Oo6MtWB68igOrn628N1ZMB1ZOByXEcGAADgwhBkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZRFkAACAZbk0yHzyySfq3bu3IiIiZLPZtHTpUqfpQ4YMkc1mc3p069bNNcUCAAC349Igk5eXp7i4OM2bN6/MNt26ddMvv/zieCxatOgSVggAANyZpytfvHv37urevXu5bex2u8LCwi5RRQAAwErc/hyZ9PR0hYSE6Oqrr9Zf/vIXHTlypNz2+fn5ysnJcXoAAIA/JrcOMt26ddMrr7yi1atXa8aMGcrIyFD37t1VWFhY5nNSUlIUEBDgeERGRl7CigEAwKVkM8YYVxchSTabTUuWLFHfvn3LbPPDDz+oUaNG+uijj9SlS5dS2+Tn5ys/P98xnJOTo8jISGVnZ8vf37+Ka67S2cGCXP3tsU1jJbzcmSlusAlnY3h5q6YNYU5OjgICAs7799ut98icq2HDhqpXr5527dpVZhu73S5/f3+nBwAA+GOyVJA5cOCAjhw5ovDwcFeXAgAA3IBLey2dOHHCae/Knj17tG3bNgUGBiowMFDTpk1Tv379FBYWpt27d+uvf/2rrrrqKiUlJbmwagAA4C5cGmS2bNmiTp06OYbHjx8vSUpOTtb8+fP15Zdf6uWXX9bx48cVERGhG2+8UdOnT5fdbndVyQAAwI24NMgkJiaqvHONP/jgg0tYDQAAsBpLnSMDAABwtgveI7Nv3z79+OOPOnnypIKDg3XNNddwyAcAAFxSlQoye/fu1fz58/Xaa6/pwIEDToeFatWqpRtuuEEjRoxQv379VKMGO3sAAED1qnDauP/++xUXF6c9e/bon//8p7Zv367s7GwVFBTo4MGDev/999WhQwdNnjxZzZs31+bNm6uzbgAAgIrvkaldu7Z++OEHBQUFlZgWEhKizp07q3PnzpoyZYpWrlyp/fv3q02bNlVaLAAAwNkqHGRSUlIqPNNu3bpdUDEAAACVcdHdr3/99Vdt2rRJhYWFatOmDVfdBQAAl8xFBZm33npLw4YNU5MmTXTmzBnt3LlT8+bN09ChQ6uqPgAAgDJVqmvRiRMnnIanTZumTz/9VJ9++qk+//xzLV68WA8//HCVFggAAFCWSgWZVq1aadmyZY5hT09PHTp0yDGclZWlWrVqVV11AAAA5ajUoaUPPvhAo0ePVlpamubNm6e5c+fqtttuU2FhoX777TfVqFFDaWlp1VQqAACAs0oFmejoaL333ntatGiREhISdP/992vXrl3atWuXCgsLFRMTIy8vr+qqFQAAwMkFXX530KBB2rx5s7744gslJiaqqKhILVq0IMQAAIBLqtK9lt5//33t2LFDcXFxeuGFF5SRkaE77rhD3bt316OPPipvb+/qqBMAAKCESu2RmTBhgoYOHarNmzdr5MiRmj59uhISEvTZZ5/Jy8tL1113nVasWFFdtQIAADixmbPv/HgeQUFB+vDDD9WqVSsdPXpU119/vb777jvH9O3bt2vkyJFau3ZttRR7IXJychQQEKDs7Gz5+/tX6bxttiqdHSyo4t+e6mGbxkp4uTNTXLwSSmwML3fVtCGs6N/vSu2RqV27tvbs2SNJ2r9/f4lzYmJjY90qxAAAgD+2SgWZlJQUDR48WBEREUpISND06dOrqy4AAIDzqtTJvnfccYe6deumH374QY0bN1adOnWqqSwAAIDzq3SvpaCgIAUFBVVHLQAAAJVS4UNL9957rw4cOFChtq+//rpeffXVCy4KAACgIiq8RyY4OFjXXHON2rdvr969e6t169aKiIiQl5eXjh07pu3bt2vdunV67bXXFBERoeeff7466wYAAKhc9+usrCy98MILeu2117R9+3anaX5+furatavuuecedevWrcoLvVB0v0Z1ovs1XI3u13A5F3e/rlSQOduxY8e0b98+nTp1SvXq1VOjRo1kc8OVmSCD6kSQgasRZOByLg4ylT7Zt1jdunVVt27dC306AADARbugm0YCAAC4A4IMAACwLIIMAACwLIIMAACwrCoNMqdPn9YTTzxRlbMEAAAoU6WDzOHDh7V8+XJ9+OGHKiwslCSdOXNGc+fOVXR0tB5//PEqLxIAAKA0lep+vW7dOvXq1Us5OTmy2Wxq3bq1UlNT1bdvX3l6emrq1KlKTk6urloBAACcVGqPzKRJk9SjRw99+eWXGj9+vDZv3qybb75Zjz32mLZv3657771X3t7e1VUrAACAk0pd2TcoKEhr165VbGysTp06JV9fX7399tvq06dPddZ4UbiyL6oTV/aFq3FlX7ici6/sW6k9MseOHVO9evUkSd7e3vLx8dG11157cZUCAABcoErfomD79u06ePCgJMkYo507dyovL8+pTfPmzaumOgAAgHJUOsh06dJFZx+N6tWrlyTJZrPJGCObzebozQQAAFCdKhVk9uzZU111AAAAVFqlgkxUVFR11QEAAFBplTrZd+bMmTp16pRjODMzU/n5+Y7h3NxcjRo1quqqAwAAKEelgszEiROVm5vrGO7evbt++uknx/DJkyf13HPPVV11AAAA5ahUkDn3kjOVuAQNAABAlePu1wAAwLIIMgAAwLIqfR2ZF154Qb6+vpKk3377TWlpaY6r/Z59/gwAAEB1q9S9lqKjo2WrwD013Ol6M9xrCdXJ1aeJca8lcK8luJyL77VUqT0ye/fuvdi6AAAAqkylzpFxpz0tAAAAlQoyjRo1UoMGDXT33XdrwYIFOnDgQHXVBQAAcF6VOrT08ccfKz09Xenp6Vq0aJEKCgrUsGFDde7cWZ06dVKnTp0UGhpaXbUCAAA4qVSQSUxMVGJioiTp9OnTWr9+vSPYvPzyyzpz5oxiYmL0zTffVEetAAAATird/bqYl5eXOnfurA4dOqhTp05asWKFnnvuOX377bdVWR8AAECZKh1kCgoKtHHjRq1Zs0bp6enatGmTIiMj1bFjRz399NNKSEiojjoBAABKqFSQ6dy5szZt2qQGDRooISFBI0eO1MKFCxUeHl5d9QEAAJSpUkFm7dq1Cg8PV+fOnZWYmKiEhAQFBQVVV20AAADlqlT36+PHj+v555+Xj4+PZsyYoYiICDVr1kxjxozRm2++qcOHD1dXnQAAACVU6hYF58rNzdW6desc58t88cUXaty4sb7++uuqrPGicIsCVCduUQBX4xYFcDkX36Lgou5+Xbt2bQUGBiowMFB169aVp6enduzYcTGzBAAAqLBKnSNTVFSkLVu2KD09XWvWrFFmZqby8vJ0xRVXqFOnTpo3b546depUXbUCAAA4qVSQqVOnjvLy8hQWFqZOnTpp9uzZSkxMVKNGjaqrPgAAgDJVKsjMmjVLnTp1UpMmTaqrHgAAgAqrVJAZOXJkddUBAABQaRd1si8AAIAruTTIfPLJJ+rdu7ciIiJks9m0dOlSp+nGGE2ePFnh4eHy9vZW165d9f3337umWAAA4HZcGmTy8vIUFxenefPmlTp95syZ+ve//61nn31WmzZtUu3atZWUlKTTp09f4koBAIA7uuC7X1eF7t27q3v37qVOM8Zozpw5mjRpkvr06SNJeuWVVxQaGqqlS5dq4MCBl7JUAADghtz2HJk9e/bo4MGD6tq1q2NcQECA2rZtqw0bNpT5vPz8fOXk5Dg9AADAH5PbBpmDBw9KkkJDQ53Gh4aGOqaVJiUlRQEBAY5HZGRktdYJAABcx22DzIWaOHGisrOzHY/9+/e7uiQAAFBN3DbIhIWFSZKysrKcxmdlZTmmlcZut8vf39/pAQAA/pjcNsg0aNBAYWFhWr16tWNcTk6ONm3apPj4eBdWBgAA3IVLey2dOHFCu3btcgzv2bNH27ZtU2BgoOrXr6+xY8fqn//8pxo3bqwGDRrokUceUUREhPr27eu6ogEAgNtwaZDZsmWL092yx48fL0lKTk5WWlqa/vrXvyovL08jRozQ8ePH1aFDB61cuVJeXl6uKhkAALgRmzHGuLqI6pSTk6OAgABlZ2dX+fkyNluVzg4W5Opvj20aK+Hlzkxxg004G8PLWzVtCCv699ttz5EBAAA4H4IMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLIIMAACwLLcOMlOnTpXNZnN6xMTEuLosAADgJjxdXcD5XHPNNfroo48cw56ebl8yAAC4RNw+FXh6eiosLMzVZQAAADfk1oeWJOn7779XRESEGjZsqDvuuEP79u0rt31+fr5ycnKcHgAA4I/JrYNM27ZtlZaWppUrV2r+/Pnas2ePbrjhBuXm5pb5nJSUFAUEBDgekZGRl7BiAABwKdmMMcbVRVTU8ePHFRUVpaeeekrDhg0rtU1+fr7y8/Mdwzk5OYqMjFR2drb8/f2rtB6brUpnBwty9bfHNo2V8HJnprjBJpyN4eWtmjaEOTk5CggIOO/fb7c/R+ZsderUUZMmTbRr164y29jtdtnt9ktYFQAAcBW3PrR0rhMnTmj37t0KDw93dSkAAMANuHWQefDBB5WRkaG9e/dq/fr1uvnmm+Xh4aFBgwa5ujQAAOAG3PrQ0oEDBzRo0CAdOXJEwcHB6tChgzZu3Kjg4GBXlwYAANyAWweZ1157zdUlAAAAN+bWh5YAAADKQ5ABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWRZABAACWZYkgM2/ePEVHR8vLy0tt27bVp59+6uqSAACAG3D7IPP6669r/PjxmjJlij777DPFxcUpKSlJhw4dcnVpAADAxdw+yDz11FMaPny4hg4dqtjYWD377LPy8fHRSy+95OrSAACAi7l1kCkoKNDWrVvVtWtXx7gaNWqoa9eu2rBhgwsrAwAA7sDT1QWU59dff1VhYaFCQ0OdxoeGhurbb78t9Tn5+fnKz893DGdnZ0uScnJyqq9QXLZcvlqddvHrw+XYtsHlqmkdLF63jTHltnPrIHMhUlJSNG3atBLjIyMjXVAN/ugCAlxdAS53AY+zEsLFqnlDmJubq4ByXsOtg0y9evXk4eGhrKwsp/FZWVkKCwsr9TkTJ07U+PHjHcNFRUU6evSogoKCZLPZqrXey01OTo4iIyO1f/9++fv7u7ocXIZYB+FqrIPVxxij3NxcRURElNvOrYNMrVq11KpVK61evVp9+/aV9HswWb16tcaMGVPqc+x2u+x2u9O4OnXqVHOllzd/f3++wHAp1kG4Gutg9ShvT0wxtw4ykjR+/HglJyerdevW+tOf/qQ5c+YoLy9PQ4cOdXVpAADAxdw+yNx22206fPiwJk+erIMHD6pFixZauXJliROAAQDA5cftg4wkjRkzpsxDSXAdu92uKVOmlDiUB1wqrINwNdZB17OZ8/VrAgAAcFNufUE8AACA8hBkAACAZRFkAACAZRFk3ERaWtp5r3czZMgQx/V0LldTp05VixYtHMO8J+7ByuvvuesUcD4VWZejo6M1Z86cS1JPZSQmJmrs2LGuLqNKWaLXEn43d+7c895z4nLDe2IdfFaoDomJiWrRooXbhYbNmzerdu3ari7jskCQsZCKXOGwOhljVFhYKE9P91ltXP2eoOJc/VlV5/p75swZ1axZs8rnC+sKDg526esXFBSoVq1aVT7fwsJC2Ww21ajhPgd03KeSP6Dly5erTp06KiwslCRt27ZNNptNf//73x1t7rnnHt15552O4Q8++EBNmzaVr6+vunXrpl9++cUx7dzdmW+++aaaNWsmb29vBQUFqWvXrsrLy3NqO23aNAUHB8vf31/33nuvCgoKHM8vKipSSkqKGjRoIG9vb8XFxenNN990TE9PT5fNZtOKFSvUqlUr2e12rVu3rsRyFu+af+mll1S/fn35+vpq1KhRKiws1MyZMxUWFqaQkBD961//cnre8ePHdc899zjq69y5s7744gunNo8//rhCQ0Pl5+enYcOG6fRp59s9n/uelLY7t0WLFpo6dapj2Gaz6bnnnlOvXr3k4+Ojpk2basOGDdq1a5cSExNVu3ZttWvXTrt37y6xrJeTy2X9PVdRUZEeffRRXXnllbLb7Y6LcBbbu3evbDabXn/9dSUkJMjLy0uvvvqqjhw5okGDBumKK66Qj4+PmjVrpkWLFlXiHbeWvLw8DR48WL6+vgoPD9eTTz7pdNjCZrNp6dKlTs+pU6eO0tLSHMN/+9vf1KRJE/n4+Khhw4Z65JFHdObMGcf04m3LggULFB0drYCAAA0cOFC5ubmSfl9PMjIyNHfuXNlsNtlsNu3du1dDhgxxDJ/9SE9PL3VZHnzwQfXq1csxPGfOHNlsNqfP/aqrrtILL7zg9LwnnnhC4eHhCgoK0ujRo51qP3tbZIzR1KlTVb9+fdntdkVEROj+++93ajt9+nQNGjRItWvX1hVXXKF58+Y5vdb5tpfF79ULL7ygBg0ayMvLq9RlPdexY8c0ePBg1a1bVz4+Purevbu+//57x/Tiw8bvvPOOYmNjZbfbtW/fPm3evFl//vOfVa9ePQUEBCghIUGfffZZhV6zqhFkqtENN9yg3Nxcff7555KkjIwM1atXz+nLlJGRocTEREnSyZMn9cQTT2jBggX65JNPtG/fPj344IOlzvuXX37RoEGDdPfdd2vHjh1KT0/XLbfc4rTrfvXq1Y5pixYt0ttvv+10Z/CUlBS98sorevbZZ/XNN99o3LhxuvPOO5WRkeH0Wn//+9/1+OOPa8eOHWrevHmp9ezevVsrVqzQypUrtWjRIr344ovq2bOnDhw4oIyMDM2YMUOTJk3Spk2bHM/p37+/Dh06pBUrVmjr1q1q2bKlunTpoqNHj0qS3njjDU2dOlWPPfaYtmzZovDwcD3zzDMV/wDKMX36dA0ePFjbtm1TTEyMbr/9do0cOVITJ07Uli1bZIy57C/CeDmtv2ebO3eunnzyST3xxBP68ssvlZSUpJtuuslp41483wceeEA7duxQUlKSTp8+rVatWum9997T119/rREjRuiuu+7Sp59+et7XtKKHHnpIGRkZWrZsmT788EOlp6dX+g+Zn5+f0tLStH37ds2dO1f//e9/NXv2bKc2u3fv1tKlS7V8+XItX75cGRkZevzxxyX9/lnFx8dr+PDh+uWXX/TLL78oMjJSc+fOdQz/8ssveuCBBxQSEqKYmJhS60hISNC6descof3cdf2nn37S7t27Heu6JK1Zs0a7d+/WmjVr9PLLLystLc0ppJ3trbfe0uzZs/Xcc8/p+++/19KlS9WsWTOnNrNmzVJcXJw+//xzx7q1atUqx/TzbS8ladeuXXrrrbf09ttva9u2bRX5CDRkyBBt2bJF77zzjjZs2CBjjHr06OEUyk6ePKkZM2bohRde0DfffKOQkBDl5uYqOTlZ69at08aNG9W4cWP16NHDETIvKYNq1bJlSzNr1ixjjDF9+/Y1//rXv0ytWrVMbm6uOXDggJFkvvvuO5OammokmV27djmeO2/ePBMaGuoYTk5ONn369DHGGLN161Yjyezdu7fU101OTjaBgYEmLy/PMW7+/PnG19fXFBYWmtOnTxsfHx+zfv16p+cNGzbMDBo0yBhjzJo1a4wks3Tp0nKXccqUKcbHx8fk5OQ4xiUlJZno6GhTWFjoGHf11VeblJQUY4wxa9euNf7+/ub06dNO82rUqJF57rnnjDHGxMfHm1GjRjlNb9u2rYmLiyv1PTHGmKioKDN79myn58TFxZkpU6Y4hiWZSZMmOYY3bNhgJJkXX3zRMW7RokXGy8ur3OW+HFwu6+/Z61RERIT517/+5dSmTZs2jnVxz549RpKZM2dOufM1xpiePXuaCRMmnLed1eTm5ppatWqZN954wzHuyJEjxtvb2zzwwAPGmN+/Z0uWLHF6XkBAgElNTS1zvrNmzTKtWrVyDJe2bXnooYdM27ZtHcMJCQmO1yzNW2+9Zby8vMy6devKbHPs2DFTo0YNs3nzZlNUVGQCAwNNSkqK43X+97//mSuuuMLRPjk52URFRZnffvvNMa5///7mtttucwyfvS168sknTZMmTUxBQUGprx8VFWW6devmNO62224z3bt3N8ZUbHs5ZcoUU7NmTXPo0KEyl9MY5/fru+++M5JMZmamY/qvv/5qvL29HZ9t8Xd727Zt5c63sLDQ+Pn5mXfffbfcdtWBPTLVLCEhQenp6TLGaO3atbrlllvUtGlTrVu3ThkZGYqIiFDjxo0lST4+PmrUqJHjueHh4Tp06FCp842Li1OXLl3UrFkz9e/fX//973917NixEm18fHwcw/Hx8Tpx4oT279+vXbt26eTJk/rzn/8sX19fx+OVV14pcUildevWjv+f3fbee+91jI+Ojpafn59jODQ0VLGxsU7HUUNDQx3L88UXX+jEiRMKCgpymueePXscr79jxw61bdvWqZb4+Phy3u2KO/uXefF9u87+hRQaGqrTp08rJyenSl7Pqi6X9bdYTk6Ofv75Z7Vv395pfPv27bVjx44y5yv9fu7A9OnT1axZMwUGBsrX11cffPCB9u3bV+p7YGW7d+9WQUGB0/czMDBQV199daXm8/rrr6t9+/YKCwuTr6+vJk2aVOL9OnfbUt56da7PP/9cd911l55++mnHZ/rYY485rQf79u1TnTp1FBcXp/T0dH311VeqVauWRowYoc8//1wnTpxQRkaGEhISnOZ9zTXXyMPDo0J19e/fX6dOnVLDhg01fPhwLVmyRL/99ptTm3O3bfHx8Y51riLbS0mKiopynJuzdu1ap7avvvpqibp27NghT09Pp88xKChIV199tdP6XqtWrRJ7M7OysjR8+HA1btxYAQEB8vf314kTJ1yyvrvPWZt/UImJiXrppZf0xRdfqGbNmoqJiVFiYqLS09N17Ngxpy/HuScL2my2Mnt5eHh4aNWqVVq/fr0+/PBD/ec//9HDDz+sTZs2qUGDBuet68SJE5Kk9957T1dccYXTtHPvGXL2mfdn7648+5b1pdVe2riioiLH64eHh5d6zPp83XjLU6NGjRLv2dm7SEur12azlTmuuN7L1eWy/l6Ic3ukzJo1S3PnztWcOXPUrFkz1a5dW2PHjnU6r+dyUtrnf/Z3ccOGDbrjjjs0bdo0JSUlKSAgQK+99pqefPJJp+eUtx0pz8GDB3XTTTfpnnvu0bBhwxzj7733Xg0YMMAxHBERIUmO9dputyshIUGBgYFOoX3ChAkXXFdkZKR27typjz76SKtWrdKoUaM0a9YsZWRkVOgk8YpuL89eJ1u3bu20vl/MjZa9vb0d28RiycnJOnLkiObOnauoqCjZ7XbFx8e7ZH0nyFSz4vMMZs+e7djoJyYm6vHHH9exY8dKfDkqw2azqX379mrfvr0mT56sqKgoLVmyROPHj5f0e4o/deqUvL29JUkbN26Ur6+vIiMjFRgY6Dhp69xfGuW56qqrLrjes7Vs2VIHDx6Up6enoqOjS23TtGlTbdq0SYMHD3aM27hxY7nzDQ4OdjrBNCcnR3v27KmSmi9Hl9v66+/vr4iICGVmZjrNNzMzU3/605/KfW5mZqb69OnjOPm5qKhI3333nWJjYytcn1U0atRINWvW1KZNm1S/fn1Jv580+t133znet3O/i99//71OnjzpGF6/fr2ioqL08MMPO8b9+OOPla6lVq1ajnNbip0+fVp9+vRRTEyMnnrqKadpgYGBCgwMLDGfhIQEvfTSS/L09FS3bt0k/b6uL1q0SN99953T+TEXwtvbW71791bv3r01evRoxcTE6KuvvlLLli0lldy2bdy4UU2bNpVUse1laa93vvW9adOm+u2337Rp0ya1a9dOknTkyBHt3LnzvOttZmamnnnmGfXo0UOStH//fv36668Vqq2qEWSqWd26ddW8eXO9+uqrevrppyVJHTt21IABA3TmzJlKbYTPtmnTJq1evVo33nijQkJCtGnTJh0+fNix4ku/d78bNmyYJk2apL1792rKlCkaM2aMatSoIT8/Pz344IMaN26cioqK1KFDB2VnZyszM1P+/v5KTk6ukuUvS9euXRUfH6++fftq5syZatKkiX7++We99957uvnmm9W6dWs98MADGjJkiFq3bq327dvr1Vdf1TfffKOGDRuWOd/OnTsrLS1NvXv3Vp06dTR58mSn3b+onMtx/X3ooYc0ZcoUNWrUSC1atFBqaqq2bdtW6q75szVu3Fhvvvmm1q9fr7p16+qpp55SVlbWHzLI+Pr6atiwYXrooYcUFBSkkJAQPfzww06Hkjt37qynn35a8fHxKiws1N/+9jenvQ+NGzfWvn379Nprr6lNmzZ67733tGTJkkrXEh0drU2bNmnv3r3y9fVVYGCgRo4cqf3792v16tU6fPiwo21gYGCZXZI7duyo3NxcLV++3HEycWJiom699VaFh4erSZMmla6tWFpamgoLC9W2bVv5+Pjof//7n7y9vRUVFeVok5mZqZkzZ6pv375atWqVFi9erPfee09SxbaXF6Jx48bq06ePhg8frueee05+fn76+9//riuuuEJ9+vQ573MXLFig1q1bKycnRw899JDjR8elRpC5BBISErRt2zZHog8MDFRsbKyysrIqfUy5mL+/vz755BPNmTNHOTk5ioqK0pNPPqnu3bs72nTp0kWNGzdWx44dlZ+fr0GDBjl1Q54+fbqCg4OVkpKiH374QXXq1FHLli31j3/842IWt0JsNpvef/99Pfzwwxo6dKgOHz6ssLAwdezY0bEL9LbbbtPu3bv117/+VadPn1a/fv30l7/8RR988EGZ8504caL27NmjXr16KSAgQNOnT2ePzEW63Nbf+++/X9nZ2ZowYYIOHTqk2NhYvfPOO45zgcoyadIk/fDDD0pKSpKPj49GjBihvn37Kjs7+6LqcVezZs3SiRMn1Lt3b/n5+WnChAlOy/rkk09q6NChuuGGGxQREaG5c+dq69atjuk33XSTxo0bpzFjxig/P189e/bUI4884vQZV8SDDz6o5ORkxcbG6tSpU9qzZ48yMjL0yy+/lAiRa9asKXPPSt26ddWsWTNlZWU5ejd17NhRRUVFFxzYi9WpU0ePP/64xo8fr8LCQjVr1kzvvvuugoKCHG0mTJigLVu2aNq0afL399dTTz2lpKQkSRXbXl6o1NRUPfDAA+rVq5cKCgrUsWNHvf/+++c95PXiiy9qxIgRatmypSIjI/XYY4+V2UuxutlMWQexYWlDhgzR8ePHS1zHAbAC1l9rcter7Lq76OhojR079g9364BLhV5LAADAsggyAADAsji0BAAALIs9MgAAwLIIMgAAwLIIMgAAwLIIMgAAwLIIMgAuualTp6pFixauLqNU7lwbgJIIMgCqVO/evR33qjnX2rVrZbPZdMstt2j16tWVmu/bb7+tG2+8UUFBQbLZbE43xCt2+vRpjR492nGX4H79+ikrK8upzb59+9SzZ0/5+PgoJCREDz30UIk7EQOwDoIMgCo1bNgwrVq1SgcOHCgxLTU1Va1bt1bz5s2dLs9eEXl5eerQoYNmzJhRZptx48bp3Xff1eLFi5WRkaGff/5Zt9xyi2N6YWGhevbsqYKCAq1fv14vv/yy0tLSNHny5ErVAsCNGACoQmfOnDGhoaFm+vTpTuNzc3ONr6+vmT9/vpkyZYqJi4tzTEtOTjZ9+vQxs2bNMmFhYSYwMNCMGjXKFBQUlJj/nj17jCTz+eefO40/fvy4qVmzplm8eLFj3I4dO4wks2HDBmOMMe+//76pUaOGOXjwoKPN/Pnzjb+/v8nPzzfGGEdtr7zyiomKijL+/v7mtttuMzk5ORf71gCoBuyRAVClPD09NXjwYKWlpcmcdb3NxYsXq7CwUIMGDSr1eWvWrNHu3bu1Zs0ax56StLS0Cr/u1q1bdebMGXXt2tUxLiYmRvXr19eGDRskSRs2bFCzZs2cbrSXlJSknJwcffPNN45xu3fv1tKlS7V8+XItX75cGRkZjjsiA3AvBBkAVe7uu+/W7t27lZGR4RiXmpqqfv36KSAgoNTn1K1bV08//bRiYmLUq1cv9ezZs1Ln0Rw8eFC1atVSnTp1nMaHhobq4MGDjjbn3i24eLi4jSQVFRUpLS1N1157rW644QbdddddlT6nB8ClQZABUOViYmLUrl07vfTSS5KkXbt2ae3atRo2bFiZz7nmmmvk4eHhGA4PD9ehQ4eqvdbSREdHy8/Pzy1qAVA+ggyAajFs2DC99dZbys3NVWpqqho1aqSEhIQy29esWdNp2GazqaioqMKvFxYWpoKCAh0/ftxpfFZWlsLCwhxtzu3FVDxc3KYqagFw6RBkAFSLAQMGqEaNGlq4cKFeeeUV3X333bLZbNX2eq1atVLNmjWdDgHt3LlT+/btU3x8vCQpPj5eX331ldPelVWrVsnf31+xsbHVVhuA6uPp6gIA/DH5+vrqtttu08SJE5WTk6MhQ4Zc1PyOHj2qffv26eeff5b0e0iRft+TEhYWpoCAAA0bNkzjx49XYGCg/P39dd999yk+Pl7XX3+9JOnGG29UbGys7rrrLs2cOVMHDx7UpEmTNHr0aNnt9ouqD4BrsEcGQLUZNmyYjh07pqSkJEVERFzUvN555x1dd9116tmzpyRp4MCBuu666/Tss8862syePVu9evVSv3791LFjR4WFhentt992TPfw8NDy5cvl4eGh+Ph43XnnnRo8eLAeffTRi6oNgOvYzNn9IwEAACyEPTIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCy/h+KIXuW8TPLtQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "\n", + "model_names = [\"whisper-medium\", \"whisper-lora\",\"quantz-whisper-lora\"]\n", + "wer_values = [normalized_wer,lora_wer,wer_quantz]\n", + "\n", + "\n", + "bar_colors = ['blue', 'green',\"red\"]\n", + "# Create a bar chart\n", + "plt.bar(model_names, wer_values,color=bar_colors)\n", + "plt.xlabel(\"Vin100h\")\n", + "plt.ylabel(\"WER (%)\")\n", + "plt.title(\"Word Error Rate (WER) of Models\")\n", + "plt.ylim(0, 25) # Set y-axis range to 0-100\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "The model 'PeftModel' is not supported for . Supported models are ['SpeechEncoderDecoderModel', 'Speech2TextForConditionalGeneration', 'SpeechT5ForSpeechToText', 'WhisperForConditionalGeneration', 'Data2VecAudioForCTC', 'HubertForCTC', 'MCTCTForCTC', 'SEWForCTC', 'SEWDForCTC', 'UniSpeechForCTC', 'UniSpeechSatForCTC', 'Wav2Vec2ForCTC', 'Wav2Vec2ConformerForCTC', 'WavLMForCTC'].\n" + ] + } + ], + "source": [ + "import torch\n", + "from transformers import (\n", + " AutomaticSpeechRecognitionPipeline,\n", + " WhisperForConditionalGeneration,\n", + " WhisperTokenizer,\n", + " WhisperProcessor,\n", + ")\n", + "from peft import PeftModel, PeftConfig\n", + "\n", + "\n", + "peft_model_id = \"DuyTa/vi-whisper-medium-Lora\"\n", + "language = \"Vietnamese\"\n", + "task = \"transcribe\"\n", + "\n", + "peft_config = PeftConfig.from_pretrained(peft_model_id)\n", + "model = WhisperForConditionalGeneration.from_pretrained(\n", + " peft_config.base_model_name_or_path\n", + ")\n", + "peft_model = PeftModel.from_pretrained(model, peft_model_id)\n", + "peft_model.to(\"cuda:0\").half()\n", + "\n", + "processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task)\n", + "\n", + "pipe = AutomaticSpeechRecognitionPipeline(model=peft_model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, batch_size=8, torch_dtype=torch.float16, device=\"cuda:0\")\n", + "\n", + "def transcribe(audio, return_timestamps=False):\n", + " text = pipe(audio, chunk_length_s=30, return_timestamps=return_timestamps, generate_kwargs={\"language\": language, \"task\": task},)[\"text\"]\n", + " return text" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from transformers import pipeline\n", + "from datasets import load_dataset\n", + "\n", + "device = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n", + "\n", + "pipe = pipeline(\n", + " \"automatic-speech-recognition\",\n", + " model=\"openai/whisper-medium\",\n", + " chunk_length_s=30,\n", + " device=device,\n", + ")\n", + "\n", + "def transcribe_hf(audio, return_timestamps=False):\n", + " text = pipe(audio, chunk_length_s=30, return_timestamps=return_timestamps, generate_kwargs={\"language\": language, \"task\": task},max_new_tokens =448)[\"text\"]\n", + " return text\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def transcribe(audio, return_timestamps=False):\n", + " text = pipe(audio, chunk_length_s=30, return_timestamps=return_timestamps, generate_kwargs={\"language\": language, \"task\": task}, max_new_tokens =448)[\"text\"]\n", + " return text" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "faster-whisper: 0.643276 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/pipelines/base.py:1090: UserWarning: You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset\n", + " warnings.warn(\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libtinfo.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/pipelines/base.py:1090: UserWarning: You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset\n", + " warnings.warn(\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libtinfo.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/pipelines/base.py:1090: UserWarning: You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset\n", + " warnings.warn(\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libtinfo.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/pipelines/base.py:1090: UserWarning: You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset\n", + " warnings.warn(\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libtinfo.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "lora-whisper: 2.030503 seconds\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/pipelines/base.py:1090: UserWarning: You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset\n", + " warnings.warn(\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libtinfo.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/generation/utils.py:1396: UserWarning: Using the model-agnostic default `max_length` (=448) to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.\n", + " warnings.warn(\n", + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/pipelines/base.py:1090: UserWarning: You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset\n", + " warnings.warn(\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libtinfo.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/generation/utils.py:1396: UserWarning: Using the model-agnostic default `max_length` (=448) to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.\n", + " warnings.warn(\n", + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/pipelines/base.py:1090: UserWarning: You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset\n", + " warnings.warn(\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libtinfo.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/generation/utils.py:1396: UserWarning: Using the model-agnostic default `max_length` (=448) to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.\n", + " warnings.warn(\n", + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/pipelines/base.py:1090: UserWarning: You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset\n", + " warnings.warn(\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libncursesw.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "ffmpeg: /home/tesla/miniconda3/lib/libtinfo.so.6: no version information available (required by /lib/x86_64-linux-gnu/libcaca.so.0)\n", + "/home/tesla/miniconda3/envs/DUY/lib/python3.9/site-packages/transformers/generation/utils.py:1396: UserWarning: Using the model-agnostic default `max_length` (=448) to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.\n", + " warnings.warn(\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "hf-whisper: 1.959464 seconds\n" + ] + } + ], + "source": [ + "import os\n", + "import time\n", + "\n", + "\n", + "import whisper\n", + "from whisper_quant import WhisperModel\n", + "\n", + "#SSopenai_whisper_model = whisper.load_model(\"medium\", device=\"cuda\")\n", + "faster_whisper_model = WhisperModel(\"medium\", device=\"cuda\", compute_type = \"float16\")\n", + "\n", + "input_file = \"audio.wav\"\n", + "samples = 3\n", + "\n", + "\n", + "# def run_openai_whisper():\n", + "# result = openai_whisper_model.transcribe(input_file, beam_size=1)\n", + "\n", + "\n", + "def run_lora_whisper():\n", + " text = transcribe(input_file)\n", + "\n", + "def run_hf_whisper():\n", + " text = transcribe_hf(input_file)\n", + "\n", + "def run_faster_whisper():\n", + " segments, _ = faster_whisper_model.transcribe(input_file, beam_size=1, best_of=1)\n", + " segments = list(segments)\n", + "\n", + "\n", + "def measure_execution_time(name, func, samples=3):\n", + " func()\n", + "\n", + " start = time.time()\n", + " for _ in range(samples):\n", + " func()\n", + " end = time.time()\n", + "\n", + " print(\"%s: %f seconds\" % (name, (end - start) / samples))\n", + "\n", + "\n", + "# measure_execution_time(\"openai-whisper\", run_openai_whisper)\n", + "measure_execution_time(\"faster-whisper\", run_faster_whisper)\n", + "measure_execution_time(\"lora-whisper\", run_lora_whisper)\n", + "measure_execution_time(\"hf-whisper\", run_hf_whisper)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MITI quantized" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "420it [00:02, 153.21it/s]\n" + ] + } + ], + "source": [ + "import logging\n", + "import numpy as np\n", + "import datasets\n", + "from datasets import DatasetDict, load_dataset, concatenate_datasets\n", + "from tqdm import tqdm\n", + "from transformers import (\n", + " AutoConfig,\n", + " AutoFeatureExtractor,\n", + " AutoModelForSpeechSeq2Seq,\n", + " AutoTokenizer,\n", + " set_seed,\n", + ")\n", + "from transformers.utils.versions import require_version\n", + "from transformers.utils import check_min_version\n", + "from tqdm import tqdm\n", + "\n", + "\n", + "\n", + "\n", + "logger = logging.getLogger(__name__)\n", + "from datasets import Dataset, DatasetDict\n", + "import torchaudio\n", + "from torchaudio import transforms as at\n", + "import pandas as pd\n", + "import torch\n", + "from pathlib import Path\n", + "\n", + "def main():\n", + " set_seed(42)\n", + "\n", + "\n", + " max_input_length = 30.0 * 16000\n", + " min_input_length = 0.0 * 16000\n", + " audio_column_name = \"audio\"\n", + " text_column_name = \"text\"\n", + "\n", + "\n", + "\n", + "\n", + "\n", + " def load_wave(wave_path, sample_rate:int=16000) -> np.ndarray:\n", + " waveform, sr = torchaudio.load(wave_path, normalize=True)\n", + " if sample_rate != sr:\n", + " waveform = at.Resample(sr, sample_rate)(waveform)\n", + " return np.asarray(waveform)\n", + " \n", + "\n", + " def get_list_files_MITI(phase, sample_rate=16000, audio_max_sample_length=480000):\n", + " text_list = []\n", + " path_list = []\n", + " if phase == 'train':\n", + " csv_file = 'MITI_train.csv'\n", + " else:\n", + " csv_file = 'MITI_test.csv'\n", + " df = pd.read_csv(csv_file)\n", + " for index, row in tqdm(df.iterrows()):\n", + " path = row['path']\n", + " new_path = Path(row['path'])\n", + " audio_id = index\n", + " text = row['sentence']\n", + " if new_path.exists():\n", + " audio = load_wave(new_path, sample_rate=sample_rate)[0]\n", + " if len(audio) > audio_max_sample_length or len(audio) < 0:\n", + " print('skip file:', new_path,'with len audio', len(audio))\n", + " del new_path\n", + " continue\n", + " text_list.append(text)\n", + " path_list.append(path) \n", + "\n", + " return path_list, text_list\n", + "\n", + "\n", + "\n", + "\n", + " # Get the testing dataset\n", + " test_audio, test_text = get_list_files_MITI(phase='test')\n", + " #print(test_audio[0])\n", + " test_dataset = Dataset.from_dict({\"audio\": test_audio, \"text\": test_text})\n", + " MITI = DatasetDict({\"test\": test_dataset})\n", + " #print(clean_data)\n", + "\n", + "\n", + " return MITI, test_dataset\n", + "\n", + "\n", + "if __name__ == \"__main__\":\n", + " clean_data, test_dataset = main()" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from dataclasses import dataclass\n", + "\n", + "@dataclass\n", + "class DataCollatorSpeechSeq2SeqWithPadding:\n", + "\n", + " def __call__(self, features):\n", + " batch = {\n", + " \"audio\": [feature[\"audio\"] for feature in features],\n", + " \"transcription\": [feature[\"text\"] for feature in features]\n", + " }\n", + " return batch\n", + "data_collator = DataCollatorSpeechSeq2SeqWithPadding()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "DatasetDict({\n", + " test: Dataset({\n", + " features: ['audio', 'text'],\n", + " num_rows: 420\n", + " })\n", + "})" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "clean_data" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "from whisper_quant import WhisperModel\n", + "\n", + "model_size = \"medium\"\n", + "\n", + "# Run on GPU with FP16\n", + "model = WhisperModel(model_size, device=\"cuda\", compute_type=\"float16\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/27 [00:00