Upload folder using huggingface_hub
Browse files- args.json +314 -0
- config.json +3 -3
- generation_config.json +1 -1
- model-00001-of-00004.safetensors +1 -1
- model-00002-of-00004.safetensors +1 -1
- model-00003-of-00004.safetensors +1 -1
- model-00004-of-00004.safetensors +1 -1
- tokenizer_config.json +1 -0
args.json
ADDED
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model": "/home/dead/merged/pab-fa-233k/",
|
3 |
+
"model_type": "llama3_1",
|
4 |
+
"model_revision": null,
|
5 |
+
"task_type": "causal_lm",
|
6 |
+
"torch_dtype": "bfloat16",
|
7 |
+
"attn_impl": null,
|
8 |
+
"num_labels": null,
|
9 |
+
"rope_scaling": null,
|
10 |
+
"device_map": null,
|
11 |
+
"local_repo_path": null,
|
12 |
+
"template": "llama3_2",
|
13 |
+
"system": null,
|
14 |
+
"max_length": 4096,
|
15 |
+
"truncation_strategy": "delete",
|
16 |
+
"max_pixels": null,
|
17 |
+
"tools_prompt": "react_en",
|
18 |
+
"padding_side": "right",
|
19 |
+
"loss_scale": "default",
|
20 |
+
"sequence_parallel_size": 1,
|
21 |
+
"use_chat_template": false,
|
22 |
+
"template_backend": "swift",
|
23 |
+
"dataset": [
|
24 |
+
"/home/dead/m/md60/dataset/quran/translate/tanzil/processed/all-1.jsonl",
|
25 |
+
"/home/dead/m/md60/dataset/risale/risaleglobal.com/sqlite/sqlite-tr-en.jsonl",
|
26 |
+
"/home/dead/m/md60/dataset/youtube/kubi/kubi-yt-1.jsonl",
|
27 |
+
"/home/dead/m/md60/dataset/iarabi/processed/proc/fusus-yek.jsonl",
|
28 |
+
"/home/dead/m/md60/dataset/iarabi/processed/proc/fusus-terzibaba.jsonl",
|
29 |
+
"/home/dead/m/md60/dataset/iarabi/processed/proc/tefsiri-kebir.jsonl",
|
30 |
+
"/home/dead/m/md60/dataset/rabbani/proc/rabbani-mektubat.jsonl",
|
31 |
+
"/home/dead/m/md60/dataset/mevlana/proc/yek-mesnevi.jsonl",
|
32 |
+
"/home/dead/m/md60/dataset/gazali/proc/gazali-ilahi-ahlak.jsonl",
|
33 |
+
"/home/dead/m/md60/dataset/iarabi/futuhati-mekkiyye/processed/proc/ia-fm-1.jsonl",
|
34 |
+
"/home/dead/m/md60/dataset/iarabi/futuhati-mekkiyye/processed/proc/ia-fm-2.jsonl",
|
35 |
+
"/home/dead/m/md60/dataset/iarabi/futuhati-mekkiyye/processed/proc/ia-fm-3.jsonl",
|
36 |
+
"/home/dead/m/md60/dataset/iarabi/futuhati-mekkiyye/processed/proc/ia-fm-4.jsonl"
|
37 |
+
],
|
38 |
+
"val_dataset": [],
|
39 |
+
"split_dataset_ratio": 0.01,
|
40 |
+
"data_seed": 42,
|
41 |
+
"dataset_num_proc": 1,
|
42 |
+
"streaming": false,
|
43 |
+
"enable_cache": false,
|
44 |
+
"download_mode": "reuse_dataset_if_exists",
|
45 |
+
"strict": false,
|
46 |
+
"model_name": [
|
47 |
+
"pab-fa"
|
48 |
+
],
|
49 |
+
"model_author": [
|
50 |
+
"dead"
|
51 |
+
],
|
52 |
+
"custom_dataset_info": [],
|
53 |
+
"quant_method": null,
|
54 |
+
"quant_bits": null,
|
55 |
+
"hqq_axis": null,
|
56 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
57 |
+
"bnb_4bit_quant_type": "nf4",
|
58 |
+
"bnb_4bit_use_double_quant": true,
|
59 |
+
"bnb_4bit_quant_storage": null,
|
60 |
+
"max_new_tokens": 64,
|
61 |
+
"temperature": 0.0,
|
62 |
+
"top_k": null,
|
63 |
+
"top_p": null,
|
64 |
+
"repetition_penalty": null,
|
65 |
+
"num_beams": 1,
|
66 |
+
"stream": false,
|
67 |
+
"stop_words": [],
|
68 |
+
"logprobs": false,
|
69 |
+
"ckpt_dir": "/home/dead/merged/pab-fa-233k/",
|
70 |
+
"load_dataset_config": null,
|
71 |
+
"lora_modules": [],
|
72 |
+
"tuner_backend": "peft",
|
73 |
+
"train_type": "lora",
|
74 |
+
"adapters": [],
|
75 |
+
"seed": 42,
|
76 |
+
"model_kwargs": {},
|
77 |
+
"load_args": true,
|
78 |
+
"load_data_args": false,
|
79 |
+
"use_hf": false,
|
80 |
+
"hub_token": null,
|
81 |
+
"custom_register_path": [],
|
82 |
+
"ignore_args_error": false,
|
83 |
+
"use_swift_lora": false,
|
84 |
+
"output_dir": "/home/dead/tr-check/pab-fa/v13-20250116-080312",
|
85 |
+
"overwrite_output_dir": false,
|
86 |
+
"do_train": false,
|
87 |
+
"do_eval": false,
|
88 |
+
"do_predict": false,
|
89 |
+
"eval_strategy": "steps",
|
90 |
+
"prediction_loss_only": false,
|
91 |
+
"per_device_train_batch_size": 1,
|
92 |
+
"per_device_eval_batch_size": 1,
|
93 |
+
"per_gpu_train_batch_size": null,
|
94 |
+
"per_gpu_eval_batch_size": null,
|
95 |
+
"gradient_accumulation_steps": 16,
|
96 |
+
"eval_accumulation_steps": null,
|
97 |
+
"eval_delay": 0,
|
98 |
+
"torch_empty_cache_steps": null,
|
99 |
+
"learning_rate": 3e-06,
|
100 |
+
"weight_decay": 0.1,
|
101 |
+
"adam_beta1": 0.9,
|
102 |
+
"adam_beta2": 0.999,
|
103 |
+
"adam_epsilon": 1e-08,
|
104 |
+
"max_grad_norm": 1.0,
|
105 |
+
"num_train_epochs": 2.0,
|
106 |
+
"max_steps": -1,
|
107 |
+
"lr_scheduler_type": "cosine",
|
108 |
+
"lr_scheduler_kwargs": null,
|
109 |
+
"warmup_ratio": 0.05,
|
110 |
+
"warmup_steps": 0,
|
111 |
+
"log_level": "passive",
|
112 |
+
"log_level_replica": "warning",
|
113 |
+
"log_on_each_node": true,
|
114 |
+
"logging_dir": "/home/dead/tr-check/pab-fa/v13-20250116-080312/runs",
|
115 |
+
"logging_strategy": "steps",
|
116 |
+
"logging_first_step": true,
|
117 |
+
"logging_steps": 5,
|
118 |
+
"logging_nan_inf_filter": true,
|
119 |
+
"save_strategy": "steps",
|
120 |
+
"save_steps": 100.0,
|
121 |
+
"save_total_limit": null,
|
122 |
+
"save_safetensors": true,
|
123 |
+
"save_on_each_node": false,
|
124 |
+
"save_only_model": false,
|
125 |
+
"restore_callback_states_from_checkpoint": false,
|
126 |
+
"no_cuda": false,
|
127 |
+
"use_cpu": false,
|
128 |
+
"use_mps_device": false,
|
129 |
+
"jit_mode_eval": false,
|
130 |
+
"use_ipex": false,
|
131 |
+
"bf16": true,
|
132 |
+
"fp16": false,
|
133 |
+
"fp16_opt_level": "O1",
|
134 |
+
"half_precision_backend": "auto",
|
135 |
+
"bf16_full_eval": false,
|
136 |
+
"fp16_full_eval": false,
|
137 |
+
"tf32": null,
|
138 |
+
"local_rank": -1,
|
139 |
+
"ddp_backend": null,
|
140 |
+
"tpu_num_cores": null,
|
141 |
+
"tpu_metrics_debug": false,
|
142 |
+
"debug": null,
|
143 |
+
"dataloader_drop_last": false,
|
144 |
+
"eval_steps": 100.0,
|
145 |
+
"dataloader_num_workers": 4,
|
146 |
+
"dataloader_prefetch_factor": null,
|
147 |
+
"past_index": -1,
|
148 |
+
"run_name": null,
|
149 |
+
"disable_tqdm": null,
|
150 |
+
"remove_unused_columns": false,
|
151 |
+
"label_names": null,
|
152 |
+
"load_best_model_at_end": false,
|
153 |
+
"metric_for_best_model": "loss",
|
154 |
+
"greater_is_better": false,
|
155 |
+
"ignore_data_skip": false,
|
156 |
+
"fsdp": "",
|
157 |
+
"fsdp_min_num_params": 0,
|
158 |
+
"fsdp_config": null,
|
159 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
160 |
+
"accelerator_config": {
|
161 |
+
"dispatch_batches": false
|
162 |
+
},
|
163 |
+
"deepspeed": null,
|
164 |
+
"label_smoothing_factor": 0.0,
|
165 |
+
"optim": "adamw_torch",
|
166 |
+
"optim_args": null,
|
167 |
+
"adafactor": false,
|
168 |
+
"group_by_length": false,
|
169 |
+
"length_column_name": "length",
|
170 |
+
"report_to": [
|
171 |
+
"tensorboard"
|
172 |
+
],
|
173 |
+
"ddp_find_unused_parameters": null,
|
174 |
+
"ddp_bucket_cap_mb": null,
|
175 |
+
"ddp_broadcast_buffers": null,
|
176 |
+
"dataloader_pin_memory": true,
|
177 |
+
"dataloader_persistent_workers": false,
|
178 |
+
"skip_memory_metrics": true,
|
179 |
+
"use_legacy_prediction_loop": false,
|
180 |
+
"push_to_hub": false,
|
181 |
+
"resume_from_checkpoint": null,
|
182 |
+
"hub_model_id": null,
|
183 |
+
"hub_strategy": "every_save",
|
184 |
+
"hub_private_repo": null,
|
185 |
+
"hub_always_push": false,
|
186 |
+
"gradient_checkpointing": true,
|
187 |
+
"gradient_checkpointing_kwargs": null,
|
188 |
+
"include_inputs_for_metrics": false,
|
189 |
+
"include_for_metrics": [],
|
190 |
+
"eval_do_concat_batches": true,
|
191 |
+
"fp16_backend": "auto",
|
192 |
+
"evaluation_strategy": "steps",
|
193 |
+
"push_to_hub_model_id": null,
|
194 |
+
"push_to_hub_organization": null,
|
195 |
+
"push_to_hub_token": null,
|
196 |
+
"mp_parameters": "",
|
197 |
+
"auto_find_batch_size": false,
|
198 |
+
"full_determinism": false,
|
199 |
+
"torchdynamo": null,
|
200 |
+
"ray_scope": "last",
|
201 |
+
"ddp_timeout": 1800,
|
202 |
+
"torch_compile": false,
|
203 |
+
"torch_compile_backend": null,
|
204 |
+
"torch_compile_mode": null,
|
205 |
+
"dispatch_batches": null,
|
206 |
+
"split_batches": null,
|
207 |
+
"include_tokens_per_second": false,
|
208 |
+
"include_num_input_tokens_seen": false,
|
209 |
+
"neftune_noise_alpha": null,
|
210 |
+
"optim_target_modules": null,
|
211 |
+
"batch_eval_metrics": false,
|
212 |
+
"eval_on_start": false,
|
213 |
+
"use_liger_kernel": false,
|
214 |
+
"eval_use_gather_object": false,
|
215 |
+
"average_tokens_across_devices": false,
|
216 |
+
"sortish_sampler": false,
|
217 |
+
"predict_with_generate": false,
|
218 |
+
"generation_max_length": null,
|
219 |
+
"generation_num_beams": null,
|
220 |
+
"generation_config": null,
|
221 |
+
"freeze_parameters": [],
|
222 |
+
"freeze_parameters_ratio": 0.0,
|
223 |
+
"trainable_parameters": [],
|
224 |
+
"freeze_llm": false,
|
225 |
+
"freeze_vit": true,
|
226 |
+
"freeze_aligner": true,
|
227 |
+
"target_modules": [
|
228 |
+
"all-linear"
|
229 |
+
],
|
230 |
+
"target_regex": null,
|
231 |
+
"modules_to_save": [],
|
232 |
+
"lora_rank": 32,
|
233 |
+
"lora_alpha": 64,
|
234 |
+
"lora_dropout": 0.05,
|
235 |
+
"lora_bias": "none",
|
236 |
+
"lora_dtype": null,
|
237 |
+
"lorap_lr_ratio": null,
|
238 |
+
"use_rslora": false,
|
239 |
+
"use_dora": false,
|
240 |
+
"lora_ga_batch_size": 2,
|
241 |
+
"lora_ga_iters": 2,
|
242 |
+
"lora_ga_max_length": 1024,
|
243 |
+
"lora_ga_direction": "ArB2r",
|
244 |
+
"lora_ga_scale": "stable",
|
245 |
+
"lora_ga_stable_gamma": 16,
|
246 |
+
"init_weights": true,
|
247 |
+
"fourier_n_frequency": 2000,
|
248 |
+
"fourier_scaling": 300.0,
|
249 |
+
"boft_block_size": 4,
|
250 |
+
"boft_block_num": 0,
|
251 |
+
"boft_n_butterfly_factor": 1,
|
252 |
+
"boft_dropout": 0.0,
|
253 |
+
"vera_rank": 256,
|
254 |
+
"vera_projection_prng_key": 0,
|
255 |
+
"vera_dropout": 0.0,
|
256 |
+
"vera_d_initial": 0.1,
|
257 |
+
"adapter_act": "gelu",
|
258 |
+
"adapter_length": 128,
|
259 |
+
"use_galore": false,
|
260 |
+
"galore_target_modules": null,
|
261 |
+
"galore_rank": 128,
|
262 |
+
"galore_update_proj_gap": 50,
|
263 |
+
"galore_scale": 1.0,
|
264 |
+
"galore_proj_type": "std",
|
265 |
+
"galore_optim_per_parameter": false,
|
266 |
+
"galore_with_embedding": false,
|
267 |
+
"galore_quantization": false,
|
268 |
+
"galore_proj_quant": false,
|
269 |
+
"galore_proj_bits": 4,
|
270 |
+
"galore_proj_group_size": 256,
|
271 |
+
"galore_cos_threshold": 0.4,
|
272 |
+
"galore_gamma_proj": 2,
|
273 |
+
"galore_queue_size": 5,
|
274 |
+
"adalora_target_r": 8,
|
275 |
+
"adalora_init_r": 12,
|
276 |
+
"adalora_tinit": 0,
|
277 |
+
"adalora_tfinal": 0,
|
278 |
+
"adalora_deltaT": 1,
|
279 |
+
"adalora_beta1": 0.85,
|
280 |
+
"adalora_beta2": 0.85,
|
281 |
+
"adalora_orth_reg_weight": 0.5,
|
282 |
+
"llamapro_num_new_blocks": 4,
|
283 |
+
"llamapro_num_groups": null,
|
284 |
+
"lisa_activated_layers": 0,
|
285 |
+
"lisa_step_interval": 20,
|
286 |
+
"reft_layer_key": null,
|
287 |
+
"reft_layers": null,
|
288 |
+
"reft_rank": 4,
|
289 |
+
"reft_intervention_type": "LoreftIntervention",
|
290 |
+
"reft_args": null,
|
291 |
+
"use_liger": false,
|
292 |
+
"model_layer_cls_name": null,
|
293 |
+
"metric_warmup_step": 0,
|
294 |
+
"fsdp_num": 1,
|
295 |
+
"acc_steps": 1,
|
296 |
+
"add_version": true,
|
297 |
+
"resume_only_model": false,
|
298 |
+
"check_model": true,
|
299 |
+
"packing": false,
|
300 |
+
"lazy_tokenize": false,
|
301 |
+
"loss_type": null,
|
302 |
+
"optimizer": null,
|
303 |
+
"metric": null,
|
304 |
+
"acc_strategy": "token",
|
305 |
+
"rank": -1,
|
306 |
+
"global_world_size": 1,
|
307 |
+
"local_world_size": 1,
|
308 |
+
"model_suffix": "pab-fa-233k",
|
309 |
+
"model_info": "ModelInfo(model_type='llama3_1', model_dir='/home/dead/merged/pab-fa-233k', torch_dtype=torch.bfloat16, max_model_len=131072, quant_method=None, quant_bits=None, config={'factor': 8.0, 'high_freq_factor': 4.0, 'low_freq_factor': 1.0, 'original_max_position_embeddings': 8192, 'rope_type': 'llama3'}, task_type='causal_lm', num_labels=None)",
|
310 |
+
"model_meta": "ModelMeta(model_type='llama3_1', model_groups=[ModelGroup(models=[Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B-Instruct', hf_model_id='meta-llama/Meta-Llama-3.1-8B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct', hf_model_id='meta-llama/Meta-Llama-3.1-70B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct', hf_model_id='meta-llama/Meta-Llama-3.1-405B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B', hf_model_id='meta-llama/Meta-Llama-3.1-8B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B', hf_model_id='meta-llama/Meta-Llama-3.1-70B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B', hf_model_id='meta-llama/Meta-Llama-3.1-405B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct-FP8', hf_model_id='meta-llama/Meta-Llama-3.1-70B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct-FP8', hf_model_id='meta-llama/Meta-Llama-3.1-405B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B-Instruct-BNB-NF4', hf_model_id='hugging-quants/Meta-Llama-3.1-8B-Instruct-BNB-NF4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct-bnb-4bit', hf_model_id='unsloth/Meta-Llama-3.1-70B-Instruct-bnb-4bit', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct-BNB-NF4', hf_model_id='hugging-quants/Meta-Llama-3.1-405B-Instruct-BNB-NF4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct-GPTQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-70B-Instruct-GPTQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct-GPTQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-405B-Instruct-GPTQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B-Instruct-AWQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct-AWQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct-AWQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-405B-Instruct-AWQ-INT4', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='AI-ModelScope/Llama-3.1-Nemotron-70B-Instruct-HF', hf_model_id='nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='llama3_2', get_function=<function get_model_tokenizer_with_flash_attn at 0x79b658147ce0>, model_arch='llama', architectures=['LlamaForCausalLM'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, ignore_patterns=[], requires=['transformers>=4.43'], tags=[])",
|
311 |
+
"model_dir": "/home/dead/merged/pab-fa-233k",
|
312 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
313 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/home/dead/tr-check/pab-fa/v13-20250116-080312', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=1, per_device_eval_batch_size=1, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=16, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=3e-06, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=2.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/home/dead/tr-check/pab-fa/v13-20250116-080312/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=100, save_total_limit=None, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=100, dataloader_num_workers=4, dataloader_prefetch_factor=None, past_index=-1, run_name='/home/dead/tr-check/pab-fa/v13-20250116-080312', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed=None, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['tensorboard'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', evaluation_strategy='steps', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, split_batches=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, acc_strategy='token', sequence_parallel_size=1, check_model=True, train_sampler_random=True, is_encoder_decoder=False, metric_warmup_step=0, train_dataset_sample=-1, fsdp_num=1, acc_steps=1, train_type='lora', optimizer=None, galore_config=None)"
|
314 |
+
}
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "/home/dead/merged/pab-fa-
|
3 |
"architectures": [
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
@@ -31,8 +31,8 @@
|
|
31 |
"rope_theta": 500000.0,
|
32 |
"tie_word_embeddings": false,
|
33 |
"torch_dtype": "bfloat16",
|
34 |
-
"transformers_version": "4.
|
35 |
-
"unsloth_version": "
|
36 |
"use_cache": true,
|
37 |
"vocab_size": 128256
|
38 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "/home/dead/merged/pab-fa-233k",
|
3 |
"architectures": [
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
|
|
31 |
"rope_theta": 500000.0,
|
32 |
"tie_word_embeddings": false,
|
33 |
"torch_dtype": "bfloat16",
|
34 |
+
"transformers_version": "4.48.0",
|
35 |
+
"unsloth_version": "2025.1.5",
|
36 |
"use_cache": true,
|
37 |
"vocab_size": 128256
|
38 |
}
|
generation_config.json
CHANGED
@@ -7,5 +7,5 @@
|
|
7 |
"pad_token_id": 128004,
|
8 |
"temperature": 0.6,
|
9 |
"top_p": 0.9,
|
10 |
-
"transformers_version": "4.
|
11 |
}
|
|
|
7 |
"pad_token_id": 128004,
|
8 |
"temperature": 0.6,
|
9 |
"top_p": 0.9,
|
10 |
+
"transformers_version": "4.48.0"
|
11 |
}
|
model-00001-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4976698672
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3c515bbdb8604e71a7cba850e490f9f3f48c0bf0bb655cea1a080b21667c8db2
|
3 |
size 4976698672
|
model-00002-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4999802720
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d5ec7db694a911d786637efb941f948aab8db41e4447d410f07dd62f65e409f0
|
3 |
size 4999802720
|
model-00003-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4915916176
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:416007463bba5bc7a903ef64a1196b5b64c57ea67aed71285912e63dffd8726b
|
3 |
size 4915916176
|
model-00004-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1168138808
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b3e489cecc5f19cbbf7fbc4a91ea888c184f4cbfad5dcc21380a1f6b5d27974
|
3 |
size 1168138808
|
tokenizer_config.json
CHANGED
@@ -2053,6 +2053,7 @@
|
|
2053 |
"chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n",
|
2054 |
"clean_up_tokenization_spaces": true,
|
2055 |
"eos_token": "<|eot_id|>",
|
|
|
2056 |
"model_input_names": [
|
2057 |
"input_ids",
|
2058 |
"attention_mask"
|
|
|
2053 |
"chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n",
|
2054 |
"clean_up_tokenization_spaces": true,
|
2055 |
"eos_token": "<|eot_id|>",
|
2056 |
+
"extra_special_tokens": {},
|
2057 |
"model_input_names": [
|
2058 |
"input_ids",
|
2059 |
"attention_mask"
|