Training in progress, step 5
Browse files- adapter_config.json +5 -5
- adapter_model.safetensors +1 -1
- args.json +5 -5
- logging.jsonl +6 -30
- training_args.bin +1 -1
adapter_config.json
CHANGED
@@ -20,13 +20,13 @@
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
-
"gate_proj",
|
24 |
"q_proj",
|
25 |
-
"k_proj",
|
26 |
-
"down_proj",
|
27 |
-
"v_proj",
|
28 |
"up_proj",
|
29 |
-
"
|
|
|
|
|
|
|
|
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
|
|
23 |
"q_proj",
|
|
|
|
|
|
|
24 |
"up_proj",
|
25 |
+
"v_proj",
|
26 |
+
"gate_proj",
|
27 |
+
"o_proj",
|
28 |
+
"k_proj",
|
29 |
+
"down_proj"
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 42002136
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:58bbf1f598f54c33aeae5460a72ec5b60ed494959fc210d9b2409c26b035e59a
|
3 |
size 42002136
|
args.json
CHANGED
@@ -11,7 +11,7 @@
|
|
11 |
"local_repo_path": null,
|
12 |
"template": "llama3_2",
|
13 |
"system": null,
|
14 |
-
"max_length":
|
15 |
"truncation_strategy": "left",
|
16 |
"max_pixels": null,
|
17 |
"tools_prompt": "react_en",
|
@@ -71,7 +71,7 @@
|
|
71 |
"custom_register_path": [],
|
72 |
"ignore_args_error": false,
|
73 |
"use_swift_lora": false,
|
74 |
-
"output_dir": "/workspace/output/
|
75 |
"overwrite_output_dir": false,
|
76 |
"do_train": false,
|
77 |
"do_eval": false,
|
@@ -101,7 +101,7 @@
|
|
101 |
"log_level": "passive",
|
102 |
"log_level_replica": "warning",
|
103 |
"log_on_each_node": true,
|
104 |
-
"logging_dir": "/workspace/output/
|
105 |
"logging_strategy": "steps",
|
106 |
"logging_first_step": true,
|
107 |
"logging_steps": 1,
|
@@ -299,8 +299,8 @@
|
|
299 |
"local_world_size": 1,
|
300 |
"model_suffix": "Llama-3.1-8B-Instruct",
|
301 |
"model_info": "ModelInfo(model_type='llama3_1', model_dir='/root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659', torch_dtype=torch.bfloat16, max_model_len=131072, quant_method='bnb', quant_bits=4, config={'factor': 8.0, 'low_freq_factor': 1.0, 'high_freq_factor': 4.0, 'original_max_position_embeddings': 8192, 'rope_type': 'llama3'}, task_type='causal_lm', num_labels=None)",
|
302 |
-
"model_meta": "ModelMeta(model_type='llama3_1', model_groups=[ModelGroup(models=[Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B-Instruct', hf_model_id='meta-llama/Meta-Llama-3.1-8B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct', hf_model_id='meta-llama/Meta-Llama-3.1-70B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct', hf_model_id='meta-llama/Meta-Llama-3.1-405B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B', hf_model_id='meta-llama/Meta-Llama-3.1-8B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B', hf_model_id='meta-llama/Meta-Llama-3.1-70B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B', hf_model_id='meta-llama/Meta-Llama-3.1-405B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct-FP8', hf_model_id='meta-llama/Meta-Llama-3.1-70B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct-FP8', hf_model_id='meta-llama/Meta-Llama-3.1-405B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B-Instruct-BNB-NF4', hf_model_id='hugging-quants/Meta-Llama-3.1-8B-Instruct-BNB-NF4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct-bnb-4bit', hf_model_id='unsloth/Meta-Llama-3.1-70B-Instruct-bnb-4bit', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct-BNB-NF4', hf_model_id='hugging-quants/Meta-Llama-3.1-405B-Instruct-BNB-NF4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct-GPTQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-70B-Instruct-GPTQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct-GPTQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-405B-Instruct-GPTQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B-Instruct-AWQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct-AWQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct-AWQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-405B-Instruct-AWQ-INT4', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='AI-ModelScope/Llama-3.1-Nemotron-70B-Instruct-HF', hf_model_id='nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='llama3_2', get_function=<function get_model_tokenizer_with_flash_attn at
|
303 |
"model_dir": "/root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659",
|
304 |
"hub": "<class 'swift.hub.hub.HFHub'>",
|
305 |
-
"training_args": "Seq2SeqTrainingArguments(output_dir='/workspace/output/
|
306 |
}
|
|
|
11 |
"local_repo_path": null,
|
12 |
"template": "llama3_2",
|
13 |
"system": null,
|
14 |
+
"max_length": 2048,
|
15 |
"truncation_strategy": "left",
|
16 |
"max_pixels": null,
|
17 |
"tools_prompt": "react_en",
|
|
|
71 |
"custom_register_path": [],
|
72 |
"ignore_args_error": false,
|
73 |
"use_swift_lora": false,
|
74 |
+
"output_dir": "/workspace/output/v5-20250108-223508",
|
75 |
"overwrite_output_dir": false,
|
76 |
"do_train": false,
|
77 |
"do_eval": false,
|
|
|
101 |
"log_level": "passive",
|
102 |
"log_level_replica": "warning",
|
103 |
"log_on_each_node": true,
|
104 |
+
"logging_dir": "/workspace/output/v5-20250108-223508/runs",
|
105 |
"logging_strategy": "steps",
|
106 |
"logging_first_step": true,
|
107 |
"logging_steps": 1,
|
|
|
299 |
"local_world_size": 1,
|
300 |
"model_suffix": "Llama-3.1-8B-Instruct",
|
301 |
"model_info": "ModelInfo(model_type='llama3_1', model_dir='/root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659', torch_dtype=torch.bfloat16, max_model_len=131072, quant_method='bnb', quant_bits=4, config={'factor': 8.0, 'low_freq_factor': 1.0, 'high_freq_factor': 4.0, 'original_max_position_embeddings': 8192, 'rope_type': 'llama3'}, task_type='causal_lm', num_labels=None)",
|
302 |
+
"model_meta": "ModelMeta(model_type='llama3_1', model_groups=[ModelGroup(models=[Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B-Instruct', hf_model_id='meta-llama/Meta-Llama-3.1-8B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct', hf_model_id='meta-llama/Meta-Llama-3.1-70B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct', hf_model_id='meta-llama/Meta-Llama-3.1-405B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B', hf_model_id='meta-llama/Meta-Llama-3.1-8B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B', hf_model_id='meta-llama/Meta-Llama-3.1-70B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B', hf_model_id='meta-llama/Meta-Llama-3.1-405B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct-FP8', hf_model_id='meta-llama/Meta-Llama-3.1-70B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct-FP8', hf_model_id='meta-llama/Meta-Llama-3.1-405B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B-Instruct-BNB-NF4', hf_model_id='hugging-quants/Meta-Llama-3.1-8B-Instruct-BNB-NF4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct-bnb-4bit', hf_model_id='unsloth/Meta-Llama-3.1-70B-Instruct-bnb-4bit', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct-BNB-NF4', hf_model_id='hugging-quants/Meta-Llama-3.1-405B-Instruct-BNB-NF4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct-GPTQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-70B-Instruct-GPTQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct-GPTQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-405B-Instruct-GPTQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-8B-Instruct-AWQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-70B-Instruct-AWQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='LLM-Research/Meta-Llama-3.1-405B-Instruct-AWQ-INT4', hf_model_id='hugging-quants/Meta-Llama-3.1-405B-Instruct-AWQ-INT4', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='AI-ModelScope/Llama-3.1-Nemotron-70B-Instruct-HF', hf_model_id='nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='llama3_2', get_function=<function get_model_tokenizer_with_flash_attn at 0x75e08c533ac0>, model_arch='llama', architectures=['LlamaForCausalLM'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, ignore_patterns=[], requires=['transformers>=4.43'], tags=[])",
|
303 |
"model_dir": "/root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659",
|
304 |
"hub": "<class 'swift.hub.hub.HFHub'>",
|
305 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/workspace/output/v5-20250108-223508', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=1, per_device_eval_batch_size=1, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=1, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=0.0003, weight_decay=0.01, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=3.0, max_steps=-1, lr_scheduler_type=<SchedulerType.LINEAR: 'linear'>, lr_scheduler_kwargs=None, warmup_ratio=0.1, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/workspace/output/v5-20250108-223508/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=1, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=5, save_total_limit=2, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend='nccl', tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=100, dataloader_num_workers=0, dataloader_prefetch_factor=None, past_index=-1, run_name='/workspace/output/v5-20250108-223508', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed=None, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['comet_ml'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=True, resume_from_checkpoint=None, hub_model_id='jacpetro/llama-agentic-sft', hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs={'use_reentrant': False}, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', evaluation_strategy='steps', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, split_batches=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, acc_strategy='token', sequence_parallel_size=1, check_model=True, train_sampler_random=True, is_encoder_decoder=False, metric_warmup_step=0, train_dataset_sample=-1, fsdp_num=1, acc_steps=1, train_type='lora', optimizer=None, galore_config=None)"
|
306 |
}
|
logging.jsonl
CHANGED
@@ -1,30 +1,6 @@
|
|
1 |
-
{"loss":
|
2 |
-
{"loss":
|
3 |
-
{"loss":
|
4 |
-
{"loss": 1.
|
5 |
-
{"loss": 1.
|
6 |
-
{"loss":
|
7 |
-
{"loss": 1.80353546, "token_acc": 0.65581395, "grad_norm": 0.67217851, "learning_rate": 5.676e-05, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.544043, "epoch": 0.05691057, "global_step/max_steps": "7/369", "percentage": "1.90%", "elapsed_time": "7s", "remaining_time": "6m 14s"}
|
8 |
-
{"loss": 0.89555246, "token_acc": 0.78473581, "grad_norm": 0.40255135, "learning_rate": 6.486e-05, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.578777, "epoch": 0.06504065, "global_step/max_steps": "8/369", "percentage": "2.17%", "elapsed_time": "8s", "remaining_time": "6m 10s"}
|
9 |
-
{"loss": 2.26151538, "token_acc": 0.50835322, "grad_norm": 0.85837853, "learning_rate": 7.297e-05, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.608553, "epoch": 0.07317073, "global_step/max_steps": "9/369", "percentage": "2.44%", "elapsed_time": "9s", "remaining_time": "6m 6s"}
|
10 |
-
{"loss": 2.15133929, "token_acc": 0.55084746, "grad_norm": 1.18577528, "learning_rate": 8.108e-05, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.634457, "epoch": 0.08130081, "global_step/max_steps": "10/369", "percentage": "2.71%", "elapsed_time": "10s", "remaining_time": "6m 4s"}
|
11 |
-
{"loss": 1.63149965, "token_acc": 0.62237762, "grad_norm": 0.70675153, "learning_rate": 8.919e-05, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.650612, "epoch": 0.08943089, "global_step/max_steps": "11/369", "percentage": "2.98%", "elapsed_time": "11s", "remaining_time": "6m 7s"}
|
12 |
-
{"loss": 1.6930747, "token_acc": 0.64858491, "grad_norm": 0.75992346, "learning_rate": 9.73e-05, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.671389, "epoch": 0.09756098, "global_step/max_steps": "12/369", "percentage": "3.25%", "elapsed_time": "12s", "remaining_time": "6m 4s"}
|
13 |
-
{"loss": 2.18108296, "token_acc": 0.58454106, "grad_norm": 1.50389647, "learning_rate": 0.00010541, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.689945, "epoch": 0.10569106, "global_step/max_steps": "13/369", "percentage": "3.52%", "elapsed_time": "13s", "remaining_time": "6m 2s"}
|
14 |
-
{"loss": 1.73638082, "token_acc": 0.6183953, "grad_norm": 0.59281182, "learning_rate": 0.00011351, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.707752, "epoch": 0.11382114, "global_step/max_steps": "14/369", "percentage": "3.79%", "elapsed_time": "14s", "remaining_time": "5m 59s"}
|
15 |
-
{"loss": 1.89742863, "token_acc": 0.53033268, "grad_norm": 0.60121316, "learning_rate": 0.00012162, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.724128, "epoch": 0.12195122, "global_step/max_steps": "15/369", "percentage": "4.07%", "elapsed_time": "15s", "remaining_time": "5m 56s"}
|
16 |
-
{"loss": 1.58984172, "token_acc": 0.58458244, "grad_norm": 1.01862383, "learning_rate": 0.00012973, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.722935, "epoch": 0.1300813, "global_step/max_steps": "16/369", "percentage": "4.34%", "elapsed_time": "16s", "remaining_time": "6m 4s"}
|
17 |
-
{"loss": 1.72712588, "token_acc": 0.57186544, "grad_norm": 1.11258352, "learning_rate": 0.00013784, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.737209, "epoch": 0.13821138, "global_step/max_steps": "17/369", "percentage": "4.61%", "elapsed_time": "17s", "remaining_time": "6m 1s"}
|
18 |
-
{"loss": 1.48443031, "token_acc": 0.67521368, "grad_norm": 0.99743199, "learning_rate": 0.00014595, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.750267, "epoch": 0.14634146, "global_step/max_steps": "18/369", "percentage": "4.88%", "elapsed_time": "18s", "remaining_time": "5m 58s"}
|
19 |
-
{"loss": 1.01479948, "token_acc": 0.77894737, "grad_norm": 0.84560066, "learning_rate": 0.00015405, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.76235, "epoch": 0.15447154, "global_step/max_steps": "19/369", "percentage": "5.15%", "elapsed_time": "19s", "remaining_time": "5m 55s"}
|
20 |
-
{"loss": 0.96872497, "token_acc": 0.81604697, "grad_norm": 0.63404536, "learning_rate": 0.00016216, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.773341, "epoch": 0.16260163, "global_step/max_steps": "20/369", "percentage": "5.42%", "elapsed_time": "20s", "remaining_time": "5m 53s"}
|
21 |
-
{"loss": 1.54066253, "token_acc": 0.64589235, "grad_norm": 1.19960952, "learning_rate": 0.00017027, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.778297, "epoch": 0.17073171, "global_step/max_steps": "21/369", "percentage": "5.69%", "elapsed_time": "21s", "remaining_time": "5m 53s"}
|
22 |
-
{"loss": 1.09321463, "token_acc": 0.75870647, "grad_norm": 0.98395354, "learning_rate": 0.00017838, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.788066, "epoch": 0.17886179, "global_step/max_steps": "22/369", "percentage": "5.96%", "elapsed_time": "22s", "remaining_time": "5m 51s"}
|
23 |
-
{"loss": 1.52431905, "token_acc": 0.62971698, "grad_norm": 1.01811945, "learning_rate": 0.00018649, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.797395, "epoch": 0.18699187, "global_step/max_steps": "23/369", "percentage": "6.23%", "elapsed_time": "23s", "remaining_time": "5m 49s"}
|
24 |
-
{"loss": 1.36404324, "token_acc": 0.68711656, "grad_norm": 1.43212712, "learning_rate": 0.00019459, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.806093, "epoch": 0.19512195, "global_step/max_steps": "24/369", "percentage": "6.50%", "elapsed_time": "24s", "remaining_time": "5m 47s"}
|
25 |
-
{"loss": 1.73998666, "token_acc": 0.58227848, "grad_norm": 1.83553851, "learning_rate": 0.0002027, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.813782, "epoch": 0.20325203, "global_step/max_steps": "25/369", "percentage": "6.78%", "elapsed_time": "25s", "remaining_time": "5m 45s"}
|
26 |
-
{"loss": 1.37766147, "token_acc": 0.64367816, "grad_norm": 1.23328626, "learning_rate": 0.00021081, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.813136, "epoch": 0.21138211, "global_step/max_steps": "26/369", "percentage": "7.05%", "elapsed_time": "26s", "remaining_time": "5m 47s"}
|
27 |
-
{"loss": 1.86809516, "token_acc": 0.5654321, "grad_norm": 1.28665292, "learning_rate": 0.00021892, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.820506, "epoch": 0.2195122, "global_step/max_steps": "27/369", "percentage": "7.32%", "elapsed_time": "27s", "remaining_time": "5m 45s"}
|
28 |
-
{"loss": 1.36566472, "token_acc": 0.69902913, "grad_norm": 3.60822392, "learning_rate": 0.00022703, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.827584, "epoch": 0.22764228, "global_step/max_steps": "28/369", "percentage": "7.59%", "elapsed_time": "28s", "remaining_time": "5m 43s"}
|
29 |
-
{"loss": 1.76258373, "token_acc": 0.62043796, "grad_norm": 1.86583698, "learning_rate": 0.00023514, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.834279, "epoch": 0.23577236, "global_step/max_steps": "29/369", "percentage": "7.86%", "elapsed_time": "29s", "remaining_time": "5m 41s"}
|
30 |
-
{"loss": 1.77485406, "token_acc": 0.5902439, "grad_norm": 1.40830421, "learning_rate": 0.00024324, "memory(GiB)": 9.6, "train_speed(iter/s)": 0.84061, "epoch": 0.24390244, "global_step/max_steps": "30/369", "percentage": "8.13%", "elapsed_time": "30s", "remaining_time": "5m 39s"}
|
|
|
1 |
+
{"loss": 1.51931369, "token_acc": 0.65133395, "grad_norm": 0.5639717, "learning_rate": 8.11e-06, "memory(GiB)": 11.56, "train_speed(iter/s)": 0.103188, "epoch": 0.00813008, "global_step/max_steps": "1/369", "percentage": "0.27%", "elapsed_time": "3s", "remaining_time": "24m 14s"}
|
2 |
+
{"loss": 1.25695169, "token_acc": 0.70556827, "grad_norm": 0.51285839, "learning_rate": 1.622e-05, "memory(GiB)": 11.56, "train_speed(iter/s)": 0.150757, "epoch": 0.01626016, "global_step/max_steps": "2/369", "percentage": "0.54%", "elapsed_time": "7s", "remaining_time": "23m 1s"}
|
3 |
+
{"loss": 1.51376784, "token_acc": 0.65096953, "grad_norm": 0.50419378, "learning_rate": 2.432e-05, "memory(GiB)": 11.56, "train_speed(iter/s)": 0.178341, "epoch": 0.02439024, "global_step/max_steps": "3/369", "percentage": "0.81%", "elapsed_time": "11s", "remaining_time": "22m 31s"}
|
4 |
+
{"loss": 1.44488692, "token_acc": 0.65977444, "grad_norm": 0.32598454, "learning_rate": 3.243e-05, "memory(GiB)": 11.56, "train_speed(iter/s)": 0.196312, "epoch": 0.03252033, "global_step/max_steps": "4/369", "percentage": "1.08%", "elapsed_time": "14s", "remaining_time": "22m 15s"}
|
5 |
+
{"loss": 1.1349889, "token_acc": 0.72309028, "grad_norm": 0.54342312, "learning_rate": 4.054e-05, "memory(GiB)": 11.56, "train_speed(iter/s)": 0.208955, "epoch": 0.04065041, "global_step/max_steps": "5/369", "percentage": "1.36%", "elapsed_time": "18s", "remaining_time": "22m 4s"}
|
6 |
+
{"loss": 1.64801562, "token_acc": 0.6032849, "grad_norm": 0.45636648, "learning_rate": 4.865e-05, "memory(GiB)": 11.56, "train_speed(iter/s)": 0.216078, "epoch": 0.04878049, "global_step/max_steps": "6/369", "percentage": "1.63%", "elapsed_time": "22s", "remaining_time": "22m 12s"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5880
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:29d7a1e2470267b2cbfc2e018516909dbdd972bbf3631610d543ea0848243e2f
|
3 |
size 5880
|