shibing624 commited on
Commit
b00f6e2
1 Parent(s): 2526cc8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +56 -1
README.md CHANGED
@@ -32,7 +32,62 @@ The overall performance of model on QA **test**:
32
 
33
  training args:
34
  ```json
35
- {"per_device_train_batch_size": 8, "per_device_eval_batch_size": 8, "per_gpu_train_batch_size": null, "per_gpu_eval_batch_size": null, "gradient_accumulation_steps": 1, "eval_accumulation_steps": null, "eval_delay": 0, "learning_rate": 2e-05, "weight_decay": 0.0, "adam_beta1": 0.9, "adam_beta2": 0.999, "adam_epsilon": 1e-08, "max_grad_norm": 1.0, "num_train_epochs": 10.0, "max_steps": -1, "lr_scheduler_type": "linear", "warmup_ratio": 0.0, "warmup_steps": 50, "log_level": "passive", "log_level_replica": "warning", "log_on_each_node": true, "logging_dir": "outputs-ziya-llama-13b-sft-med-v2/logs", "logging_strategy": "steps", "logging_first_step": false, "logging_steps": 50, "logging_nan_inf_filter": true, "save_strategy": "steps", "save_steps": 50, "save_total_limit": 3, "save_safetensors": false, "save_on_each_node": false, "no_cuda": false, "use_mps_device": false, "seed": 42, "data_seed": null, "jit_mode_eval": false, "use_ipex": false, "bf16": false, "fp16": true, "fp16_opt_level": "O1", "half_precision_backend": "cuda_amp", "bf16_full_eval": false, "fp16_full_eval": false, "tf32": null, "local_rank": 0, "xpu_backend": null, "tpu_num_cores": null, "tpu_metrics_debug": false, "debug": [], "dataloader_drop_last": false, "eval_steps": 50, "dataloader_num_workers": 0, "past_index": -1, "run_name": "outputs-ziya-llama-13b-sft-med-v2", "disable_tqdm": false, "remove_unused_columns": false, "label_names": null, "load_best_model_at_end": true, "metric_for_best_model": "loss", "greater_is_better": false, "ignore_data_skip": false, "sharded_ddp": [], "fsdp": [], "fsdp_min_num_params": 0, "fsdp_config": { "fsdp_min_num_params": 0, "xla": false, "xla_fsdp_grad_ckpt": false }, "fsdp_transformer_layer_cls_to_wrap": null, "deepspeed": null, "label_smoothing_factor": 0.0, "optim": "adamw_torch", "optim_args": null, "adafactor": false, "group_by_length": false, "length_column_name": "length", "report_to": [ "tensorboard" ], "ddp_find_unused_parameters": false, "ddp_bucket_cap_mb": null, "dataloader_pin_memory": true, "skip_memory_metrics": true, "use_legacy_prediction_loop": false, "push_to_hub": false, "resume_from_checkpoint": null, "hub_model_id": null, "hub_strategy": "every_save", "hub_token": "<hub_token>", "hub_private_repo": false, "gradient_checkpointing": false, "include_inputs_for_metrics": false, "fp16_backend": "auto", "push_to_hub_model_id": null, "push_to_hub_organization": null, "push_to_hub_token": "<push_to_hub_token>", "mp_parameters": "", "auto_find_batch_size": false, "full_determinism": false, "torchdynamo": null, "ray_scope": "last", "ddp_timeout": 1800, "torch_compile": false, "torch_compile_backend": null, "torch_compile_mode": null }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  ```
37
 
38
  train loss:
 
32
 
33
  training args:
34
  ```json
35
+ {"per_device_train_batch_size": 8,
36
+ "per_device_eval_batch_size": 8,
37
+ "gradient_accumulation_steps": 1,
38
+ "eval_accumulation_steps": null,
39
+ "eval_delay": 0,
40
+ "learning_rate": 2e-05,
41
+ "weight_decay": 0.0,
42
+ "num_train_epochs": 3.0,
43
+ "max_steps": -1,
44
+ "lr_scheduler_type": "linear",
45
+ "warmup_ratio": 0.0,
46
+ "warmup_steps": 50,
47
+ "logging_dir": "outputs-ziya-llama-13b-sft-med-v2/logs",
48
+ "logging_strategy": "steps",
49
+ "logging_steps": 50,
50
+ "save_strategy": "steps",
51
+ "save_steps": 50,
52
+ "save_total_limit": 30,
53
+ "save_safetensors": false,
54
+ "save_on_each_node": false,
55
+ "no_cuda": false,
56
+ "use_mps_device": false,
57
+ "seed": 42,
58
+ "data_seed": null,
59
+ "jit_mode_eval": false,
60
+ "use_ipex": false,
61
+ "bf16": false,
62
+ "fp16": true,
63
+ "fp16_opt_level": "O1",
64
+ "half_precision_backend": "cuda_amp",
65
+ "bf16_full_eval": false,
66
+ "fp16_full_eval": false,
67
+ "tf32": null,
68
+ "local_rank": 0,
69
+ "dataloader_drop_last": false,
70
+ "eval_steps": 50,
71
+ "dataloader_num_workers": 0,
72
+ "run_name": "outputs-ziya-llama-13b-sft-med-v2",
73
+ "remove_unused_columns": false,
74
+ "label_names": null,
75
+ "load_best_model_at_end": true,
76
+ "metric_for_best_model": "loss",
77
+ "greater_is_better": false,
78
+ "deepspeed": null,
79
+ "label_smoothing_factor": 0.0,
80
+ "optim": "adamw_torch",
81
+ "optim_args": null,
82
+ "adafactor": false,
83
+ "group_by_length": false,
84
+ "length_column_name": "length",
85
+ "report_to": [ "tensorboard" ],
86
+ "ddp_find_unused_parameters": false,
87
+ "gradient_checkpointing": true,
88
+ "fp16_backend": "auto",
89
+ "ddp_timeout": 1800,
90
+ "torch_compile": false }
91
  ```
92
 
93
  train loss: