Khalid751 commited on
Commit
dea079c
1 Parent(s): 26ea4b2

Upload Banglish_to_Bengali_Transliteration.ipynb

Browse files
Banglish_to_Bengali_Transliteration.ipynb ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": []
7
+ },
8
+ "kernelspec": {
9
+ "name": "python3",
10
+ "display_name": "Python 3"
11
+ },
12
+ "language_info": {
13
+ "name": "python"
14
+ }
15
+ },
16
+ "cells": [
17
+ {
18
+ "cell_type": "code",
19
+ "source": [
20
+ "# Install necessary libraries\n",
21
+ "!pip install datasets\n",
22
+ "\n",
23
+ "# Importing required libraries for dataset, model, tokenizer, training, and evaluation\n",
24
+ "from datasets import load_dataset\n",
25
+ "from transformers import T5Tokenizer, T5ForConditionalGeneration, Trainer, TrainingArguments\n",
26
+ "from sklearn.model_selection import train_test_split\n",
27
+ "import torch\n",
28
+ "from IPython import get_ipython\n",
29
+ "from IPython.display import display"
30
+ ],
31
+ "metadata": {
32
+ "id": "HrXBpUDmLbKH"
33
+ },
34
+ "execution_count": null,
35
+ "outputs": []
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "source": [
40
+ "# 1. Load Dataset\n",
41
+ "# Load the Bengali transliteration dataset\n",
42
+ "raw_dataset = load_dataset(\"SKNahin/bengali-transliteration-data\")\n",
43
+ "\n",
44
+ "# Split dataset into training and validation sets (90% training, 10% validation)\n",
45
+ "train_data = raw_dataset['train'].train_test_split(test_size=0.1, seed=42)['train'] # Added seed for reproducibility\n",
46
+ "val_data = raw_dataset['train'].train_test_split(test_size=0.1, seed=42)['test'] # Added seed for reproducibility\n",
47
+ "\n",
48
+ "# 2. Preprocessing\n",
49
+ "# Define model name for T5 and load its tokenizer\n",
50
+ "model_name = \"google/mt5-small\"\n",
51
+ "tokenizer = T5Tokenizer.from_pretrained(model_name)\n",
52
+ "\n",
53
+ "# Tokenize function for preprocessing the data\n",
54
+ "def preprocess_data(examples):\n",
55
+ " # Tokenize input and target sequences with padding and truncation to fixed length\n",
56
+ " # Access the correct columns based on the dataset structure\n",
57
+ " inputs = tokenizer(examples['bn'], padding=\"max_length\", truncation=True, max_length=128)\n",
58
+ " targets = tokenizer(examples['rm'], padding=\"max_length\", truncation=True, max_length=128)\n",
59
+ "\n",
60
+ " # Assign the tokenized target as labels for the model\n",
61
+ " inputs['labels'] = targets['input_ids']\n",
62
+ " return inputs\n",
63
+ "\n",
64
+ "# Apply the preprocessing function to the training and validation datasets\n",
65
+ "train_dataset = train_data.map(preprocess_data, batched=True)\n",
66
+ "val_dataset = val_data.map(preprocess_data, batched=True)\n",
67
+ "\n",
68
+ "# 3. Model Selection\n",
69
+ "# Load the T5 model for conditional generation\n",
70
+ "model = T5ForConditionalGeneration.from_pretrained(model_name)\n",
71
+ "\n",
72
+ "# 4. Training\n",
73
+ "# Define training arguments like learning rate, batch size, number of epochs, etc.\n",
74
+ "training_args = TrainingArguments(\n",
75
+ " output_dir=\"./results\", # Directory to store the results\n",
76
+ " evaluation_strategy=\"epoch\", # Evaluate model after each epoch\n",
77
+ " learning_rate=5e-5, # Learning rate\n",
78
+ " per_device_train_batch_size=16, # Batch size for training\n",
79
+ " per_device_eval_batch_size=16, # Batch size for evaluation\n",
80
+ " num_train_epochs=5, # Number of training epochs\n",
81
+ " weight_decay=0.01, # Weight decay for regularization\n",
82
+ " save_steps=1000, # Save model every 1000 steps\n",
83
+ " save_total_limit=2, # Keep only 2 most recent checkpoints\n",
84
+ " logging_dir=\"./logs\", # Directory to store logs\n",
85
+ " logging_steps=500, # Log every 500 steps\n",
86
+ ")\n"
87
+ ],
88
+ "metadata": {
89
+ "id": "kxHZMLRKPr6L"
90
+ },
91
+ "execution_count": null,
92
+ "outputs": []
93
+ },
94
+ {
95
+ "cell_type": "code",
96
+ "source": [
97
+ "# Initialize the Trainer with the model, arguments, and datasets\n",
98
+ "trainer = Trainer(\n",
99
+ " model=model,\n",
100
+ " args=training_args,\n",
101
+ " train_dataset=train_dataset,\n",
102
+ " eval_dataset=val_dataset\n",
103
+ ")\n",
104
+ "\n",
105
+ "# Start the training process\n",
106
+ "trainer.train()"
107
+ ],
108
+ "metadata": {
109
+ "id": "9_9U0GCCoTqZ"
110
+ },
111
+ "execution_count": null,
112
+ "outputs": []
113
+ }
114
+ ]
115
+ }