Granther commited on
Commit
fa8b708
1 Parent(s): 8204c34

Upload 5 files

Browse files
Files changed (5) hide show
  1. ft.py +184 -0
  2. ft_hus.py +192 -0
  3. ft_instruct.py +208 -0
  4. ft_news.py +213 -0
  5. ft_skib.py +223 -0
ft.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling, get_scheduler
2
+ from huggingface_hub import HfApi, notebook_login
3
+ from datasets import load_dataset
4
+ from peft import LoraConfig, LoraModel, get_peft_model
5
+ from timm.scheduler import CosineLRScheduler
6
+ import wandb
7
+ import os
8
+ from accelerate import Accelerator
9
+ import numpy as np
10
+ import torch
11
+ import tqdm
12
+ import torch.nn as nn
13
+ import torch.optim as optim
14
+
15
+ acc = Accelerator()
16
+
17
+ lora_conf = LoraConfig(
18
+ r=8,
19
+ lora_alpha=64,
20
+ lora_dropout=0.1,
21
+ bias="none",
22
+ task_type="CAUSAL_LM",
23
+ target_modules="all-linear",
24
+ modules_to_save=None,
25
+ )
26
+
27
+ model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
28
+ dataset_id = "microsoft/orca-math-word-problems-200k"
29
+
30
+ model_kwargs = dict(
31
+ use_cache=False,
32
+ attn_implementation="flash_attention_2",
33
+ torch_dtype=torch.bfloat16,
34
+ device_map="sequential",
35
+ )
36
+
37
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
38
+ model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)
39
+ model = get_peft_model(model, lora_conf)
40
+
41
+ def count_trainable_parameters(model):
42
+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())
43
+ params = sum([np.prod(p.size()) for p in model_parameters])
44
+ return params
45
+
46
+ trainable_params = format(count_trainable_parameters(model), ",")
47
+
48
+ epochs = 1
49
+ per_dev_batch_size = 2
50
+ gradient_accumulation_steps = 4
51
+ dtype = torch.bfloat16
52
+ learning_rate = 1e-5
53
+
54
+ raw_dataset = load_dataset(dataset_id, split="train")
55
+
56
+ def apply_chat_template(example, tokenizer):
57
+ chat = [
58
+ {"role": "user", "content": example["question"]},
59
+ {"role": "assistant", "content": example["answer"]},
60
+ ]
61
+ example['text'] = tokenizer.apply_chat_template(chat, add_generation_prompt=False, tokenize=True)
62
+ return example
63
+
64
+ train_dataset = raw_dataset.select(range(150000))
65
+ test_dataset = raw_dataset.select(range(300))
66
+ column_names = list(train_dataset.features)
67
+
68
+ processed_train_dataset = train_dataset.map(
69
+ apply_chat_template,
70
+ # batched=True,
71
+ # batch_size=20,
72
+ fn_kwargs={"tokenizer": tokenizer},
73
+ num_proc=10,
74
+ remove_columns=column_names,
75
+ desc="Applying chat template to train_sft",
76
+ )
77
+
78
+ processed_test_dataset = test_dataset.map(
79
+ apply_chat_template,
80
+ # batched=True,
81
+ # batch_size=20,
82
+ fn_kwargs={"tokenizer": tokenizer},
83
+ num_proc=10,
84
+ remove_columns=column_names,
85
+ desc="Applying chat template to test_sft",
86
+ )
87
+
88
+ data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
89
+
90
+ train_dataloader = torch.utils.data.DataLoader( #
91
+ processed_train_dataset['text'],
92
+ batch_size=per_dev_batch_size,
93
+ shuffle=True,
94
+ collate_fn=data_collator
95
+ )
96
+
97
+ test_dataloader = torch.utils.data.DataLoader(
98
+ processed_test_dataset['text'],
99
+ batch_size=per_dev_batch_size,
100
+ shuffle=True,
101
+ collate_fn=data_collator
102
+ )
103
+
104
+ global_step = 0
105
+ num_training_steps = epochs * len(train_dataloader)
106
+ #num_training_steps = 20000
107
+ warmup_ratio = 0.1
108
+ warmup_steps = int(warmup_ratio * num_training_steps)
109
+
110
+ optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
111
+ cross_entropy = nn.CrossEntropyLoss()
112
+
113
+ scheduler = get_scheduler(
114
+ name="cosine",
115
+ optimizer=optimizer,
116
+ num_warmup_steps=warmup_steps,
117
+ num_training_steps=num_training_steps
118
+ )
119
+
120
+ wandb.init(
121
+ project="math-tiny-llama",
122
+
123
+ config={
124
+ "learning_rate": learning_rate,
125
+ "dataset": dataset_id,
126
+ "batch_size": per_dev_batch_size,
127
+ "lora_r": lora_conf.r,
128
+ "lora_alpha": lora_conf.lora_alpha,
129
+ "lora_dropout": lora_conf.lora_dropout,
130
+ "gradient_accumulation_steps": gradient_accumulation_steps,
131
+ "warmup_ratio": warmup_ratio,
132
+ "trainable_params": trainable_params,
133
+ "num_training_steps": num_training_steps,
134
+ "model_name": "TinyLlama"
135
+ }
136
+ )
137
+
138
+ optimizer, scheduler, train_dataloader, tokenizer, model = acc.prepare(optimizer, scheduler, train_dataloader, tokenizer, model)
139
+
140
+ def calc_metrics():
141
+ model.eval()
142
+ for batch in test_dataloader:
143
+ pred = model(**batch)
144
+ loss = pred.loss
145
+
146
+ wandb.log({"eval_loss": loss.item()})
147
+
148
+ model.train()
149
+
150
+ model.train()
151
+ for epoch in range(epochs):
152
+ for step, batch in enumerate(train_dataloader):
153
+
154
+ outputs = model(**batch)
155
+ loss = outputs.loss
156
+
157
+ loss.backward()
158
+
159
+ wandb.log({"loss": loss.item(), "learning_rate": optimizer.param_groups[0]['lr']})
160
+
161
+ if (step + 1) % gradient_accumulation_steps == 0:
162
+ optimizer.step()
163
+ scheduler.step()
164
+ optimizer.zero_grad()
165
+ global_step += 1
166
+
167
+ if (step + 1) % 100 == 0:
168
+ print(f"Loss: {loss.item()}")
169
+
170
+ if (step + 1) % 400 == 0:
171
+ calc_metrics()
172
+
173
+ if global_step > num_training_steps:
174
+ break
175
+
176
+ if global_step > num_training_steps:
177
+ break
178
+
179
+ wandb.finish()
180
+
181
+ save_path = os.path.join("checkpoint_2_", f"step_{global_step}")
182
+ model.module.save_pretrained(save_path)
183
+
184
+ print("Saved model")
ft_hus.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling, get_scheduler
2
+ from huggingface_hub import HfApi, notebook_login
3
+ from datasets import load_dataset
4
+ from peft import LoraConfig, LoraModel, get_peft_model
5
+ from timm.scheduler import CosineLRScheduler
6
+ import wandb
7
+ import os
8
+ from accelerate import Accelerator
9
+ import numpy as np
10
+ import torch
11
+ import tqdm
12
+ import torch.nn as nn
13
+ import torch.optim as optim
14
+
15
+ lora_conf = LoraConfig(
16
+ r=8,
17
+ lora_alpha=32,
18
+ lora_dropout=0.05,
19
+ bias="none",
20
+ task_type="CAUSAL_LM",
21
+ target_modules="all-linear",
22
+ modules_to_save=None,
23
+ )
24
+
25
+ model_id = "Qwen/Qwen2-1.5B-Instruct"
26
+
27
+ model_kwargs = dict(
28
+ use_cache=False,
29
+ #attn_implementation="flash_attention_2",
30
+ torch_dtype="auto",
31
+ device_map="sequential",
32
+ )
33
+
34
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
35
+ tokenizer.model_max_length = 4096
36
+ model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)
37
+ model = get_peft_model(model, lora_conf)
38
+
39
+ def count_trainable_parameters(model):
40
+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())
41
+ params = sum([np.prod(p.size()) for p in model_parameters])
42
+ return params
43
+
44
+ trainable_params = format(count_trainable_parameters(model), ",")
45
+
46
+ epochs = 2
47
+ per_dev_batch_size = 2
48
+ gradient_accumulation_steps = 20
49
+ dtype = torch.bfloat16
50
+ learning_rate = 1e-4
51
+
52
+ def apply_chat_template(example, tokenizer):
53
+ convo = example['conversations']
54
+ for dic in convo:
55
+ dic['role'] = dic.pop('from')
56
+ dic['content'] = dic.pop('value')
57
+ if dic['role'] == 'gpt':
58
+ dic['role'] = 'assistant'
59
+ elif dic['role'] == 'human':
60
+ dic['role'] = 'user'
61
+
62
+ example['text'] = tokenizer.apply_chat_template(convo, tokenize=True, add_generation_prompt=False, truncation=True)
63
+ return example
64
+
65
+ train_dataset = dataset.select(range(98000))
66
+ test_dataset = dataset.select(range(3000))
67
+ column_names = list(train_dataset.features)
68
+
69
+ processed_train_dataset = train_dataset.map(
70
+ apply_chat_template,
71
+ fn_kwargs={"tokenizer": tokenizer},
72
+ num_proc=10,
73
+ remove_columns=column_names,
74
+ )
75
+
76
+ processed_test_dataset = test_dataset.map(
77
+ apply_chat_template,
78
+ fn_kwargs={"tokenizer": tokenizer},
79
+ num_proc=10,
80
+ remove_columns=column_names,
81
+ )
82
+
83
+ data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
84
+
85
+ train_dataloader = torch.utils.data.DataLoader( #
86
+ processed_train_dataset['text'],
87
+ batch_size=per_dev_batch_size,
88
+ shuffle=True,
89
+ collate_fn=data_collator
90
+ )
91
+
92
+ test_dataloader = torch.utils.data.DataLoader(
93
+ processed_test_dataset['text'],
94
+ batch_size=per_dev_batch_size,
95
+ shuffle=True,
96
+ collate_fn=data_collator
97
+ )
98
+
99
+ global_step = 0
100
+ num_training_steps = epochs * len(train_dataloader)
101
+ warmup_ratio = 0.1
102
+ warmup_steps = 800
103
+ #warmup_steps = int(warmup_ratio * num_training_steps)
104
+
105
+ optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
106
+ cross_entropy = nn.CrossEntropyLoss()
107
+
108
+ scheduler = get_scheduler(
109
+ name="cosine",
110
+ optimizer=optimizer,
111
+ num_warmup_steps=warmup_steps,
112
+ num_training_steps=num_training_steps
113
+ )
114
+
115
+ acc = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps)
116
+
117
+ if acc.is_main_process:
118
+ wandb.init(
119
+ project="qwen-hus-inst",
120
+
121
+ config={
122
+ "learning_rate": learning_rate,
123
+ "dataset": "Mix of RP and Instruct,
124
+ "batch_size": per_dev_batch_size,
125
+ "lora_r": lora_conf.r,
126
+ "lora_alpha": lora_conf.lora_alpha,
127
+ "lora_dropout": lora_conf.lora_dropout,
128
+ "gradient_accumulation_steps": gradient_accumulation_steps,
129
+ "warmup_ratio": warmup_ratio,
130
+ "trainable_params": trainable_params,
131
+ "num_training_steps": num_training_steps,
132
+ "model_name": model_id
133
+ }
134
+ )
135
+
136
+ optimizer, scheduler, train_dataloader, tokenizer, model, scheduler = acc.prepare(optimizer, scheduler, train_dataloader, tokenizer, model, scheduler)
137
+
138
+ def save_checkpoint():
139
+ if acc.is_main_process:
140
+ save_path = os.path.join("checkpoint_hus", f"step_{global_step}")
141
+ model.module.save_pretrained(save_path)
142
+
143
+ print(f"Saved model at step {global_step}")
144
+
145
+ def calc_metrics():
146
+ model.eval()
147
+ for batch in test_dataloader:
148
+ pred = model(**batch)
149
+ loss = pred.loss
150
+
151
+ if acc.is_main_process:
152
+ perplexity = torch.exp(loss)
153
+ wandb.log({"eval_loss": loss.item(), "eval_perplexity": perplexity})
154
+
155
+ model.train()
156
+
157
+ model.train()
158
+ for epoch in range(epochs):
159
+ for step, batch in enumerate(train_dataloader):
160
+ with acc.accumulate(model):
161
+ outputs = model(**batch)
162
+ loss = outputs.loss
163
+ acc.backward(loss)
164
+ optimizer.step()
165
+ scheduler.step()
166
+ optimizer.zero_grad()
167
+
168
+ if acc.is_main_process:
169
+ perplexity = torch.exp(loss)
170
+ wandb.log({"loss": loss.item(), "learning_rate": optimizer.param_groups[0]['lr'], "perplexity": perplexity})
171
+
172
+ global_step += 1
173
+
174
+ if (step + 1) % 1000 == 0:
175
+ save_checkpoint()
176
+
177
+ if (step + 1) % 100 == 0 and acc.is_main_process:
178
+ print(f"Loss: {loss.item()}")
179
+
180
+
181
+ if (step + 1) % 2000 == 0:
182
+ calc_metrics()
183
+
184
+ if global_step > num_training_steps:
185
+ break
186
+
187
+ if global_step > num_training_steps:
188
+ break
189
+
190
+ if acc.is_main_process:
191
+ wandb.finish()
192
+ save_checkpoint()
ft_instruct.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling, get_scheduler
2
+ from huggingface_hub import HfApi, notebook_login
3
+ from datasets import load_dataset
4
+ from peft import LoraConfig, LoraModel, get_peft_model
5
+ from timm.scheduler import CosineLRScheduler
6
+ import wandb
7
+ import os
8
+ from accelerate import Accelerator
9
+ import numpy as np
10
+ import torch
11
+ import tqdm
12
+ import torch.nn as nn
13
+ import torch.optim as optim
14
+
15
+ lora_conf = LoraConfig(
16
+ r=8,
17
+ lora_alpha=64,
18
+ lora_dropout=0.05,
19
+ bias="none",
20
+ task_type="CAUSAL_LM",
21
+ target_modules="all-linear",
22
+ modules_to_save=None,
23
+ )
24
+
25
+ model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
26
+ dataset_id = "BAAI/Infinity-Instruct"
27
+
28
+ model_kwargs = dict(
29
+ use_cache=False,
30
+ attn_implementation="flash_attention_2",
31
+ torch_dtype=torch.bfloat16,
32
+ device_map="sequential",
33
+ )
34
+
35
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
36
+ tokenizer.model_max_length = 2048
37
+ model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)
38
+ model = get_peft_model(model, lora_conf)
39
+
40
+ def count_trainable_parameters(model):
41
+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())
42
+ params = sum([np.prod(p.size()) for p in model_parameters])
43
+ return params
44
+
45
+ trainable_params = format(count_trainable_parameters(model), ",")
46
+
47
+ epochs = 1
48
+ per_dev_batch_size = 1
49
+ gradient_accumulation_steps = 30
50
+ dtype = torch.bfloat16
51
+ learning_rate = 1e-5
52
+
53
+ raw_dataset = load_dataset(dataset_id, "0625", split="train")
54
+
55
+ def apply_chat_template(example, tokenizer):
56
+ convo = example['conversations']
57
+ for dic in convo:
58
+ dic['role'] = dic.pop('from')
59
+ dic['content'] = dic.pop('value')
60
+ if dic['role'] == 'gpt':
61
+ dic['role'] = 'assistant'
62
+ elif dic['role'] == 'human':
63
+ dic['role'] = 'user'
64
+
65
+ example['text'] = tokenizer.apply_chat_template(convo, tokenize=True, add_generation_prompt=False, truncation=True)
66
+ return example
67
+
68
+ train_dataset = raw_dataset.select(range(100000))
69
+ test_dataset = raw_dataset.select(range(300))
70
+ column_names = list(train_dataset.features)
71
+
72
+ processed_train_dataset = train_dataset.map(
73
+ apply_chat_template,
74
+ # batched=True,
75
+ # batch_size=20,
76
+ fn_kwargs={"tokenizer": tokenizer},
77
+ num_proc=10,
78
+ remove_columns=column_names,
79
+ )
80
+
81
+ processed_test_dataset = test_dataset.map(
82
+ apply_chat_template,
83
+ # batched=True,
84
+ # batch_size=20,
85
+ fn_kwargs={"tokenizer": tokenizer},
86
+ num_proc=10,
87
+ remove_columns=column_names,
88
+ )
89
+
90
+ data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
91
+
92
+ train_dataloader = torch.utils.data.DataLoader( #
93
+ processed_train_dataset['text'],
94
+ batch_size=per_dev_batch_size,
95
+ shuffle=True,
96
+ collate_fn=data_collator
97
+ )
98
+
99
+ test_dataloader = torch.utils.data.DataLoader(
100
+ processed_test_dataset['text'],
101
+ batch_size=per_dev_batch_size,
102
+ shuffle=True,
103
+ collate_fn=data_collator
104
+ )
105
+
106
+ global_step = 0
107
+ num_training_steps = epochs * len(train_dataloader)
108
+ warmup_ratio = 0.1
109
+ warmup_steps = 1000
110
+ #warmup_steps = int(warmup_ratio * num_training_steps)
111
+
112
+ optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
113
+ cross_entropy = nn.CrossEntropyLoss()
114
+
115
+ scheduler = get_scheduler(
116
+ name="cosine",
117
+ optimizer=optimizer,
118
+ num_warmup_steps=warmup_steps,
119
+ num_training_steps=num_training_steps
120
+ )
121
+
122
+ acc = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps)
123
+
124
+ if acc.is_main_process:
125
+ wandb.init(
126
+ project="tiny-llama-instruct",
127
+
128
+ config={
129
+ "learning_rate": learning_rate,
130
+ "dataset": dataset_id,
131
+ "batch_size": per_dev_batch_size,
132
+ "lora_r": lora_conf.r,
133
+ "lora_alpha": lora_conf.lora_alpha,
134
+ "lora_dropout": lora_conf.lora_dropout,
135
+ "gradient_accumulation_steps": gradient_accumulation_steps,
136
+ "warmup_ratio": warmup_ratio,
137
+ "trainable_params": trainable_params,
138
+ "num_training_steps": num_training_steps,
139
+ "model_name": "TinyLlama"
140
+ }
141
+ )
142
+
143
+ optimizer, scheduler, train_dataloader, tokenizer, model, scheduler = acc.prepare(optimizer, scheduler, train_dataloader, tokenizer, model, scheduler)
144
+
145
+ def calc_metrics():
146
+ model.eval()
147
+ for batch in test_dataloader:
148
+ pred = model(**batch)
149
+ loss = pred.loss
150
+
151
+ if acc.is_main_process:
152
+ perplexity = torch.exp(loss)
153
+ wandb.log({"eval_loss": loss.item(), "eval_perplexity": perplexity})
154
+
155
+ model.train()
156
+
157
+ device = acc.device
158
+
159
+ model.train()
160
+ for epoch in range(epochs):
161
+ for step, batch in enumerate(train_dataloader):
162
+
163
+ # outputs = model(**batch)
164
+ # loss = outputs.loss
165
+
166
+ # acc.backward(loss)
167
+ # wandb.log({"loss": loss.item(), "learning_rate": optimizer.param_groups[0]['lr'], "perplexity": perplexity})
168
+
169
+ with acc.accumulate(model):
170
+ batch = {k: v.to(device) for k, v in batch.items()}
171
+ outputs = model(**batch)
172
+ loss = outputs.loss
173
+ acc.backward(loss)
174
+ optimizer.step()
175
+ scheduler.step()
176
+ optimizer.zero_grad()
177
+
178
+ if acc.is_main_process:
179
+ perplexity = torch.exp(loss)
180
+ wandb.log({"loss": loss.item(), "learning_rate": optimizer.param_groups[0]['lr'], "perplexity": perplexity})
181
+
182
+ global_step += 1
183
+
184
+ # if (step + 1) % gradient_accumulation_steps == 0:
185
+ # optimizer.step()
186
+ # scheduler.step()
187
+ # optimizer.zero_grad()
188
+ # global_step += 1
189
+
190
+ if (step + 1) % 100 == 0 and acc.is_main_process:
191
+ print(f"Loss: {loss.item()}")
192
+
193
+ if (step + 1) % 400 == 0:
194
+ calc_metrics()
195
+
196
+ if global_step > num_training_steps:
197
+ break
198
+
199
+ if global_step > num_training_steps:
200
+ break
201
+
202
+ if acc.is_main_process:
203
+ wandb.finish()
204
+
205
+ save_path = os.path.join("checkpoint_instruct_2", f"step_{global_step}")
206
+ model.module.save_pretrained(save_path)
207
+
208
+ print("Saved model")
ft_news.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling, get_scheduler
2
+ from huggingface_hub import HfApi, notebook_login
3
+ from datasets import load_dataset
4
+ from peft import LoraConfig, LoraModel, get_peft_model
5
+ from timm.scheduler import CosineLRScheduler
6
+ import wandb
7
+ import os
8
+ from accelerate import Accelerator
9
+ import numpy as np
10
+ import torch
11
+ import tqdm
12
+ import torch.nn as nn
13
+ import torch.optim as optim
14
+
15
+ lora_conf = LoraConfig(
16
+ r=8,
17
+ lora_alpha=32,
18
+ lora_dropout=0.05,
19
+ bias="none",
20
+ task_type="CAUSAL_LM",
21
+ target_modules="all-linear",
22
+ modules_to_save=None,
23
+ )
24
+
25
+ model_id = "Qwen/Qwen2-1.5B-Instruct"
26
+ dataset_id = "GonzaloA/fake_news"
27
+
28
+ model_kwargs = dict(
29
+ use_cache=False,
30
+ #attn_implementation="flash_attention_2",
31
+ torch_dtype="auto",
32
+ device_map="sequential",
33
+ )
34
+
35
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
36
+ tokenizer.model_max_length = 2048
37
+ model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)
38
+ model = get_peft_model(model, lora_conf)
39
+
40
+ def count_trainable_parameters(model):
41
+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())
42
+ params = sum([np.prod(p.size()) for p in model_parameters])
43
+ return params
44
+
45
+ trainable_params = format(count_trainable_parameters(model), ",")
46
+
47
+ epochs = 1
48
+ per_dev_batch_size = 1
49
+ gradient_accumulation_steps = 20
50
+ dtype = torch.bfloat16
51
+ learning_rate = 1e-4
52
+
53
+ train_dataset = load_dataset(dataset_id, split="train")
54
+ test_dataset = load_dataset(dataset_id, split="test").select(range(100))
55
+
56
+ def apply_chat_template(example, tokenizer):
57
+ story = example['text']
58
+ chat = [
59
+ {"role": "system", "content": "Given a title, please generate a news story"},
60
+ {"role": "user", "content": example['title']},
61
+ {"role": "assistant", "content": story}
62
+ ]
63
+
64
+ example['text'] = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=False, truncation=True)
65
+ #example['text'] = tokenizer([text], return_tensors="pt")
66
+ return example
67
+
68
+
69
+ processed_train_dataset = train_dataset.map(
70
+ apply_chat_template,
71
+ # batched=True,
72
+ # batch_size=20,
73
+ fn_kwargs={"tokenizer": tokenizer},
74
+ num_proc=10,
75
+ #remove_columns=column_names,
76
+ )
77
+
78
+ processed_test_dataset = test_dataset.map(
79
+ apply_chat_template,
80
+ # batched=True,
81
+ # batch_size=20,
82
+ fn_kwargs={"tokenizer": tokenizer},
83
+ num_proc=10,
84
+ #remove_columns=column_names,
85
+ )
86
+
87
+ data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
88
+
89
+ train_dataloader = torch.utils.data.DataLoader( #
90
+ processed_train_dataset['text'],
91
+ batch_size=per_dev_batch_size,
92
+ shuffle=False,
93
+ collate_fn=data_collator
94
+ )
95
+
96
+ test_dataloader = torch.utils.data.DataLoader(
97
+ processed_test_dataset['text'],
98
+ batch_size=per_dev_batch_size,
99
+ shuffle=False,
100
+ collate_fn=data_collator
101
+ )
102
+
103
+ global_step = 0
104
+ num_training_steps = epochs * len(train_dataloader)
105
+ warmup_ratio = 0.1
106
+ warmup_steps = 500
107
+ #warmup_steps = int(warmup_ratio * num_training_steps)
108
+
109
+ optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
110
+ cross_entropy = nn.CrossEntropyLoss()
111
+
112
+ scheduler = get_scheduler(
113
+ name="cosine",
114
+ optimizer=optimizer,
115
+ num_warmup_steps=warmup_steps,
116
+ num_training_steps=num_training_steps
117
+ )
118
+
119
+ acc = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps)
120
+
121
+ if acc.is_main_process:
122
+ wandb.init(
123
+ project="qwen-fake-news",
124
+
125
+ config={
126
+ "learning_rate": learning_rate,
127
+ "dataset": dataset_id,
128
+ "batch_size": per_dev_batch_size,
129
+ "lora_r": lora_conf.r,
130
+ "lora_alpha": lora_conf.lora_alpha,
131
+ "lora_dropout": lora_conf.lora_dropout,
132
+ "gradient_accumulation_steps": gradient_accumulation_steps,
133
+ "warmup_ratio": warmup_ratio,
134
+ "trainable_params": trainable_params,
135
+ "num_training_steps": num_training_steps,
136
+ "model_name": "TinyLlama"
137
+ }
138
+ )
139
+
140
+ optimizer, scheduler, train_dataloader, tokenizer, model, scheduler = acc.prepare(optimizer, scheduler, train_dataloader, tokenizer, model, scheduler)
141
+
142
+ def save_checkpoint():
143
+ if acc.is_main_process:
144
+ save_path = os.path.join("checkpoint_news", f"step_{global_step}")
145
+ model.module.save_pretrained(save_path)
146
+
147
+ print(f"Saved model at step {global_step}")
148
+
149
+ def calc_metrics():
150
+ model.eval()
151
+ for batch in test_dataloader:
152
+ pred = model(**batch)
153
+ loss = pred.loss
154
+
155
+ if acc.is_main_process:
156
+ perplexity = torch.exp(loss)
157
+ wandb.log({"eval_loss": loss.item(), "eval_perplexity": perplexity})
158
+
159
+ model.train()
160
+
161
+ device = acc.device
162
+
163
+ model.train()
164
+ for epoch in range(epochs):
165
+ for step, batch in enumerate(train_dataloader):
166
+ #print(tokenizer.decode(batch['input_ids'][0]))
167
+
168
+ # outputs = model(**batch)
169
+ # loss = outputs.loss
170
+
171
+ # acc.backward(loss)
172
+ # wandb.log({"loss": loss.item(), "learning_rate": optimizer.param_groups[0]['lr'], "perplexity": perplexity})
173
+
174
+ with acc.accumulate(model):
175
+ #batch = {k: v.to(device) for k, v in batch.items()}
176
+ outputs = model(**batch)
177
+ loss = outputs.loss
178
+ acc.backward(loss)
179
+ optimizer.step()
180
+ scheduler.step()
181
+ optimizer.zero_grad()
182
+
183
+ if acc.is_main_process:
184
+ perplexity = torch.exp(loss)
185
+ wandb.log({"loss": loss.item(), "learning_rate": optimizer.param_groups[0]['lr'], "perplexity": perplexity})
186
+
187
+ global_step += 1
188
+
189
+ if (step + 1) % 1000 == 0:
190
+ save_checkpoint()
191
+
192
+ # if (step + 1) % gradient_accumulation_steps == 0:
193
+ # optimizer.step()
194
+ # scheduler.step()
195
+ # optimizer.zero_grad()
196
+ # global_step += 1
197
+
198
+ if (step + 1) % 100 == 0 and acc.is_main_process:
199
+ print(f"Loss: {loss.item()}")
200
+
201
+
202
+ if (step + 1) % 400 == 0:
203
+ calc_metrics()
204
+
205
+ if global_step > num_training_steps:
206
+ break
207
+
208
+ if global_step > num_training_steps:
209
+ break
210
+
211
+ if acc.is_main_process:
212
+ wandb.finish()
213
+ save_checkpoint()
ft_skib.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling, get_scheduler
2
+ from huggingface_hub import HfApi, notebook_login
3
+ from datasets import load_dataset
4
+ from peft import LoraConfig, LoraModel, get_peft_model
5
+ from timm.scheduler import CosineLRScheduler
6
+ import wandb
7
+ import os
8
+ from accelerate import Accelerator
9
+ import numpy as np
10
+ import torch
11
+ import tqdm
12
+ import torch.nn as nn
13
+ import torch.optim as optim
14
+
15
+ lora_conf = LoraConfig(
16
+ r=8,
17
+ lora_alpha=32,
18
+ lora_dropout=0.05,
19
+ bias="none",
20
+ task_type="CAUSAL_LM",
21
+ target_modules="all-linear",
22
+ modules_to_save=None,
23
+ )
24
+
25
+ model_id = "Qwen/Qwen2-1.5B-Instruct"
26
+ dataset_id = "HuggingFaceH4/orca-math-word-problems-200k"
27
+
28
+ model_kwargs = dict(
29
+ use_cache=False,
30
+ #attn_implementation="flash_attention_2",
31
+ torch_dtype="auto",
32
+ device_map="sequential",
33
+ )
34
+
35
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
36
+ tokenizer.model_max_length = 2048
37
+ model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)
38
+ model = get_peft_model(model, lora_conf)
39
+
40
+ def count_trainable_parameters(model):
41
+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())
42
+ params = sum([np.prod(p.size()) for p in model_parameters])
43
+ return params
44
+
45
+ trainable_params = format(count_trainable_parameters(model), ",")
46
+
47
+ epochs = 1
48
+ per_dev_batch_size = 1
49
+ gradient_accumulation_steps = 20
50
+ dtype = torch.bfloat16
51
+ learning_rate = 1e-4
52
+
53
+ train_dataset = load_dataset(dataset_id, split="train_sft").select(range(150000))
54
+ test_dataset = load_dataset(dataset_id, split="test_sft").select(range(100))
55
+
56
+ # def apply_chat_template(example, tokenizer):
57
+ # chat = []
58
+ # convo = example['conversations']
59
+ # for dic in convo:
60
+ # if dic['from'] == 'human':
61
+ # chat = [
62
+ # {"role": "user", "content": dic['value']},
63
+ # {"role": "assistant", "content": "skibbidy"}
64
+ # ]
65
+
66
+ # example['text'] = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=False, truncation=True)
67
+ # return example
68
+
69
+ # train_dataset = raw_dataset.select(range(10000))
70
+ # test_dataset = raw_dataset.select(range(300))
71
+ # column_names = list(train_dataset.features)
72
+
73
+ def apply_chat_template(example, tokenizer):
74
+ example['text'] = tokenizer.apply_chat_template(example['messages'], tokenize=True, add_generation_prompt=False, truncation=True)
75
+ return example
76
+
77
+ column_names = list(train_dataset.features)
78
+
79
+ processed_train_dataset = train_dataset.map(
80
+ apply_chat_template,
81
+ # batched=True,
82
+ # batch_size=20,
83
+ fn_kwargs={"tokenizer": tokenizer},
84
+ num_proc=10,
85
+ remove_columns=column_names,
86
+ )
87
+
88
+ processed_test_dataset = test_dataset.map(
89
+ apply_chat_template,
90
+ # batched=True,
91
+ # batch_size=20,
92
+ fn_kwargs={"tokenizer": tokenizer},
93
+ num_proc=10,
94
+ remove_columns=column_names,
95
+ )
96
+
97
+ data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
98
+
99
+ train_dataloader = torch.utils.data.DataLoader( #
100
+ processed_train_dataset['text'],
101
+ batch_size=per_dev_batch_size,
102
+ shuffle=False,
103
+ collate_fn=data_collator
104
+ )
105
+
106
+ test_dataloader = torch.utils.data.DataLoader(
107
+ processed_test_dataset['text'],
108
+ batch_size=per_dev_batch_size,
109
+ shuffle=False,
110
+ collate_fn=data_collator
111
+ )
112
+
113
+ global_step = 0
114
+ num_training_steps = epochs * len(train_dataloader)
115
+ warmup_ratio = 0.1
116
+ warmup_steps = 500
117
+ #warmup_steps = int(warmup_ratio * num_training_steps)
118
+
119
+ optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
120
+ cross_entropy = nn.CrossEntropyLoss()
121
+
122
+ scheduler = get_scheduler(
123
+ name="cosine",
124
+ optimizer=optimizer,
125
+ num_warmup_steps=warmup_steps,
126
+ num_training_steps=num_training_steps
127
+ )
128
+
129
+ acc = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps)
130
+
131
+ if acc.is_main_process:
132
+ wandb.init(
133
+ project="qwen-math",
134
+
135
+ config={
136
+ "learning_rate": learning_rate,
137
+ "dataset": dataset_id,
138
+ "batch_size": per_dev_batch_size,
139
+ "lora_r": lora_conf.r,
140
+ "lora_alpha": lora_conf.lora_alpha,
141
+ "lora_dropout": lora_conf.lora_dropout,
142
+ "gradient_accumulation_steps": gradient_accumulation_steps,
143
+ "warmup_ratio": warmup_ratio,
144
+ "trainable_params": trainable_params,
145
+ "num_training_steps": num_training_steps,
146
+ "model_name": "TinyLlama"
147
+ }
148
+ )
149
+
150
+ optimizer, scheduler, train_dataloader, tokenizer, model, scheduler = acc.prepare(optimizer, scheduler, train_dataloader, tokenizer, model, scheduler)
151
+
152
+ def save_checkpoint():
153
+ if acc.is_main_process:
154
+ save_path = os.path.join("checkpoint_math", f"step_{global_step}")
155
+ model.module.save_pretrained(save_path)
156
+
157
+ print(f"Saved model at step {global_step}")
158
+
159
+ def calc_metrics():
160
+ model.eval()
161
+ for batch in test_dataloader:
162
+ pred = model(**batch)
163
+ loss = pred.loss
164
+
165
+ if acc.is_main_process:
166
+ perplexity = torch.exp(loss)
167
+ wandb.log({"eval_loss": loss.item(), "eval_perplexity": perplexity})
168
+
169
+ model.train()
170
+
171
+ device = acc.device
172
+
173
+ model.train()
174
+ for epoch in range(epochs):
175
+ for step, batch in enumerate(train_dataloader):
176
+ #print(tokenizer.decode(batch['input_ids'][0]))
177
+
178
+ # outputs = model(**batch)
179
+ # loss = outputs.loss
180
+
181
+ # acc.backward(loss)
182
+ # wandb.log({"loss": loss.item(), "learning_rate": optimizer.param_groups[0]['lr'], "perplexity": perplexity})
183
+
184
+ with acc.accumulate(model):
185
+ #batch = {k: v.to(device) for k, v in batch.items()}
186
+ outputs = model(**batch)
187
+ loss = outputs.loss
188
+ acc.backward(loss)
189
+ optimizer.step()
190
+ scheduler.step()
191
+ optimizer.zero_grad()
192
+
193
+ if acc.is_main_process:
194
+ perplexity = torch.exp(loss)
195
+ wandb.log({"loss": loss.item(), "learning_rate": optimizer.param_groups[0]['lr'], "perplexity": perplexity})
196
+
197
+ global_step += 1
198
+
199
+ if (step + 1) % 1000 == 0:
200
+ save_checkpoint()
201
+
202
+ # if (step + 1) % gradient_accumulation_steps == 0:
203
+ # optimizer.step()
204
+ # scheduler.step()
205
+ # optimizer.zero_grad()
206
+ # global_step += 1
207
+
208
+ if (step + 1) % 100 == 0 and acc.is_main_process:
209
+ print(f"Loss: {loss.item()}")
210
+
211
+
212
+ if (step + 1) % 400 == 0:
213
+ calc_metrics()
214
+
215
+ if global_step > num_training_steps:
216
+ break
217
+
218
+ if global_step > num_training_steps:
219
+ break
220
+
221
+ if acc.is_main_process:
222
+ wandb.finish()
223
+ save_checkpoint()