nike00811 commited on
Commit
aca0f2c
1 Parent(s): 50b8204

Training in progress, step 27

Browse files
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/share/home/models/CHT-Mistral-7B/CHT-6/",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mistral",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "num_key_value_heads": 8,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_theta": 10000.0,
20
+ "sliding_window": 4096,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.42.4",
24
+ "use_cache": false,
25
+ "vocab_size": 32000
26
+ }
run_dpo_2.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # export NCCL_P2P_DISABLE="1"
2
+ # export NCCL_IB_DISABLE="1"
3
+ export OMP_NUM_THREADS=1
4
+ export ACCELERATE_LOG_LEVEL=info
5
+
6
+
7
+ export WANDB_PROJECT='Mistral-DPO'
8
+ export WANDB_ENTITY="ckip"
9
+
10
+ output_dir="DPO/CHT-6_20240715-1605"
11
+ # Check if the directory does not exist
12
+ if [ -d ${output_dir} ]; then
13
+ echo "The directory already exists."
14
+ exit
15
+ fi
16
+ mkdir -p ${output_dir}
17
+ current_script=$0
18
+ cp ${current_script} ${output_dir}
19
+
20
+
21
+ accelerate launch \
22
+ --config_file alignment-handbook/recipes/accelerate_configs/deepspeed_zero3.yaml \
23
+ --num_processes 4 \
24
+ test_dpo.py \
25
+ "dpo_config_full.yaml" \
26
+ --load_in_4bit=false \
27
+ --logging_steps=1 \
28
+ --model_name_or_path="/share/home/models/CHT-Mistral-7B/CHT-6/" \
29
+ --train_file="data/training_ptt_dpo-include_id-converted.jsonl" \
30
+ --validation_file="data/testing_ptt_dpo_include_id-converted.jsonl" \
31
+ --cache_dir="cache_dir" \
32
+ --output_dir=${output_dir} \
33
+ | tee ${output_dir}/trainin.log
34
+
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<s>",
33
+ "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% set messages = messages[1:] %}{% else %}{% set system_message = 'SYSTEM:你是由Mistral AI所開發、經台灣詞庫小組(CKIP)後續優化的語言模型,優化目標為提升台灣正體中文任務的表現與在地化,主要的使用者來自於台灣。\n\n' %}{% endif %}{{system_message}}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate system/user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ 'Human:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant:\n' + message['content'] + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:\n' }}{% endif %}",
34
+ "clean_up_tokenization_spaces": false,
35
+ "eos_token": "</s>",
36
+ "legacy": true,
37
+ "model_max_length": 4096,
38
+ "pad_token": "</s>",
39
+ "padding_side": "left",
40
+ "sp_model_kwargs": {},
41
+ "spaces_between_special_tokens": false,
42
+ "tokenizer_class": "LlamaTokenizer",
43
+ "truncation_side": "left",
44
+ "unk_token": "<unk>",
45
+ "use_default_system_prompt": false
46
+ }
trainin.log ADDED
@@ -0,0 +1,840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2024-07-15 08:07:05,974] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
2
+ [2024-07-15 08:08:29,643] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
3
+ [2024-07-15 08:08:29,660] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
4
+ [2024-07-15 08:08:29,667] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
5
+ [2024-07-15 08:08:29,671] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
6
+ [2024-07-15 08:08:55,588] [INFO] [comm.py:637:init_distributed] cdb=None
7
+ [2024-07-15 08:08:55,588] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl
8
+ [2024-07-15 08:08:55,591] [INFO] [comm.py:637:init_distributed] cdb=None
9
+ [2024-07-15 08:08:55,602] [INFO] [comm.py:637:init_distributed] cdb=None
10
+ 2024-07-15 08:08:55 - INFO - __main__ - Model parameters ModelArguments(base_model_revision=None, model_name_or_path='/share/home/models/CHT-Mistral-7B/CHT-6/', model_revision='main', model_code_revision=None, torch_dtype=None, tokenizer_name_or_path=None, trust_remote_code=False, use_flash_attention_2=False, use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False, bnb_4bit_quant_storage='uint8')
11
+ 2024-07-15 08:08:55 - INFO - __main__ - Data parameters DataArguments(chat_template=None, text_column='text', preprocessing_num_workers=12, truncation_side=None, auto_insert_empty_system_msg=True, train_file='data/training_ptt_dpo-include_id-converted.jsonl', validation_file='data/testing_ptt_dpo_include_id-converted.jsonl', cache_dir='cache_dir')
12
+ 2024-07-15 08:08:55 - INFO - __main__ - Training/evaluation parameters DPOConfig(
13
+ _n_gpu=1,
14
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
15
+ adafactor=False,
16
+ adam_beta1=0.9,
17
+ adam_beta2=0.999,
18
+ adam_epsilon=1e-08,
19
+ auto_find_batch_size=False,
20
+ batch_eval_metrics=False,
21
+ beta=0.01,
22
+ bf16=True,
23
+ bf16_full_eval=False,
24
+ data_seed=None,
25
+ dataloader_drop_last=False,
26
+ dataloader_num_workers=0,
27
+ dataloader_persistent_workers=False,
28
+ dataloader_pin_memory=True,
29
+ dataloader_prefetch_factor=None,
30
+ dataset_num_proc=None,
31
+ ddp_backend=None,
32
+ ddp_broadcast_buffers=None,
33
+ ddp_bucket_cap_mb=None,
34
+ ddp_find_unused_parameters=None,
35
+ ddp_timeout=1800,
36
+ debug=[],
37
+ deepspeed=None,
38
+ disable_dropout=True,
39
+ disable_tqdm=False,
40
+ dispatch_batches=None,
41
+ do_eval=True,
42
+ do_predict=False,
43
+ do_train=False,
44
+ eval_accumulation_steps=None,
45
+ eval_delay=0,
46
+ eval_do_concat_batches=True,
47
+ eval_on_start=False,
48
+ eval_steps=100,
49
+ eval_strategy=steps,
50
+ evaluation_strategy=steps,
51
+ f_alpha_divergence_coef=1.0,
52
+ f_divergence_type=FDivergenceType.REVERSE_KL,
53
+ force_use_ref_model=False,
54
+ fp16=False,
55
+ fp16_backend=auto,
56
+ fp16_full_eval=False,
57
+ fp16_opt_level=O1,
58
+ fsdp=[],
59
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
60
+ fsdp_min_num_params=0,
61
+ fsdp_transformer_layer_cls_to_wrap=None,
62
+ full_determinism=False,
63
+ generate_during_eval=False,
64
+ gradient_accumulation_steps=2,
65
+ gradient_checkpointing=True,
66
+ gradient_checkpointing_kwargs={'use_reentrant': False},
67
+ greater_is_better=None,
68
+ group_by_length=False,
69
+ half_precision_backend=auto,
70
+ hub_always_push=False,
71
+ hub_model_id=None,
72
+ hub_model_revision=main,
73
+ hub_private_repo=False,
74
+ hub_strategy=every_save,
75
+ hub_token=<HUB_TOKEN>,
76
+ ignore_data_skip=False,
77
+ include_inputs_for_metrics=False,
78
+ include_num_input_tokens_seen=False,
79
+ include_tokens_per_second=False,
80
+ is_encoder_decoder=None,
81
+ jit_mode_eval=False,
82
+ label_names=None,
83
+ label_pad_token_id=-100,
84
+ label_smoothing=0,
85
+ label_smoothing_factor=0.0,
86
+ learning_rate=5e-07,
87
+ length_column_name=length,
88
+ load_best_model_at_end=False,
89
+ local_rank=0,
90
+ log_level=info,
91
+ log_level_replica=warning,
92
+ log_on_each_node=True,
93
+ logging_dir=results/DPO/output_dir/runs/Jul15_08-08-55_CKIP-LLM-VM1,
94
+ logging_first_step=True,
95
+ logging_nan_inf_filter=True,
96
+ logging_steps=1.0,
97
+ logging_strategy=steps,
98
+ loss_type=sigmoid,
99
+ lr_scheduler_kwargs={},
100
+ lr_scheduler_type=cosine,
101
+ max_grad_norm=1.0,
102
+ max_length=1024,
103
+ max_prompt_length=512,
104
+ max_steps=-1,
105
+ max_target_length=None,
106
+ metric_for_best_model=None,
107
+ model_adapter_name=None,
108
+ model_init_kwargs=None,
109
+ mp_parameters=,
110
+ neftune_noise_alpha=None,
111
+ no_cuda=False,
112
+ num_train_epochs=1,
113
+ optim=adamw_torch,
114
+ optim_args=None,
115
+ optim_target_modules=None,
116
+ output_dir=DPO/CHT-6_20240715-1605,
117
+ overwrite_output_dir=False,
118
+ padding_value=None,
119
+ past_index=-1,
120
+ per_device_eval_batch_size=8,
121
+ per_device_train_batch_size=8,
122
+ precompute_ref_log_probs=False,
123
+ prediction_loss_only=False,
124
+ push_to_hub=True,
125
+ push_to_hub_model_id=None,
126
+ push_to_hub_organization=None,
127
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
128
+ ray_scope=last,
129
+ ref_adapter_name=None,
130
+ ref_model_init_kwargs=None,
131
+ ref_model_mixup_alpha=0.9,
132
+ ref_model_sync_steps=64,
133
+ reference_free=False,
134
+ remove_unused_columns=False,
135
+ report_to=['tensorboard', 'wandb'],
136
+ restore_callback_states_from_checkpoint=False,
137
+ resume_from_checkpoint=None,
138
+ rpo_alpha=None,
139
+ run_name=None,
140
+ save_on_each_node=False,
141
+ save_only_model=False,
142
+ save_safetensors=True,
143
+ save_steps=100,
144
+ save_strategy=steps,
145
+ save_total_limit=1,
146
+ seed=42,
147
+ skip_memory_metrics=True,
148
+ split_batches=None,
149
+ sync_ref_model=False,
150
+ tf32=None,
151
+ torch_compile=False,
152
+ torch_compile_backend=None,
153
+ torch_compile_mode=None,
154
+ torchdynamo=None,
155
+ tpu_metrics_debug=False,
156
+ tpu_num_cores=None,
157
+ truncation_mode=keep_end,
158
+ use_cpu=False,
159
+ use_ipex=False,
160
+ use_legacy_prediction_loop=False,
161
+ use_mps_device=False,
162
+ warmup_ratio=0.1,
163
+ warmup_steps=0,
164
+ weight_decay=0.0,
165
+ )
166
+ DatasetDict({
167
+ train: Dataset({
168
+ features: ['prompt', 'chosen', 'rejected', 'messages'],
169
+ num_rows: 1728
170
+ })
171
+ test: Dataset({
172
+ features: ['prompt', 'chosen', 'rejected', 'messages'],
173
+ num_rows: 91
174
+ })
175
+ })
176
+ DatasetDict({
177
+ train: Dataset({
178
+ features: ['prompt', 'chosen', 'rejected', 'messages'],
179
+ num_rows: 1728
180
+ })
181
+ test: Dataset({
182
+ features: ['prompt', 'chosen', 'rejected', 'messages'],
183
+ num_rows: 91
184
+ })
185
+ })
186
+ DatasetDict({
187
+ train: Dataset({
188
+ features: ['prompt', 'chosen', 'rejected', 'messages'],
189
+ num_rows: 1728
190
+ })
191
+ test: Dataset({
192
+ features: ['prompt', 'chosen', 'rejected', 'messages'],
193
+ num_rows: 91
194
+ })
195
+ })
196
+ 2024-07-15 08:08:57 - INFO - __main__ - Decontaminated 0 (0.00%) samples from the training set.
197
+ 2024-07-15 08:08:57 - INFO - __main__ - Prompt sample 1309 of the raw training set:
198
+
199
+ <s>Human:
200
+ 文章:輿情作者,日期,輿情內文
201
+ fspt01,6/20,1.近期晚上4G行動網路常常中斷,從110/05/28開始,特別開始記錄4G網路品質。 我也不是沒有先跟中華電信客服人員反應遇到的障礙,但畢竟是跟固定設備投資 有關,客服人員、機房人員只能敷衍了事。 2.一直以來,很少關心公民議題,這次被搞到一肚子火,認真思考有那些機構,可以 讓我們老百姓反應中華電信壟斷性議題。 網路上看到這則, http://www.ftc.gov.tw/uploadDecision/dd3d922b-bc37-4911-8141-b42779405ab4.pdf 我是看不太懂,大概了解「公平交易委員會」處分「中華電信股份有限公司」新台幣 500萬元罰鍰。深深感到,這或許是一條路。 3.因為我不懂很多東西,希望各個專業領域版友能提供一些資訊。
202
+ ebbf,06/20 23:53,有人壓著你只能選種花?
203
+ bcd21086999,06/21 00:22,噓
204
+ cv17,06/21 00:24,???
205
+ obuibo2000,06/21 00:26,任何室內網路不良的,我只推固網.
206
+ obuibo2000,06/21 00:26,要賭便宜有便宜的作法,地點好或是挑業者。
207
+ ufoubjlbotp,06/21 00:36,人家罰固網廣告不實,與壟斷何干
208
+ kbto4560,06/21 00:38,中華電信問題:使用者問題
209
+ kbto4560,06/21 00:38,XXX電信問題:誰叫你要用XXX
210
+ djuzqpsu,06/21 00:41,光世代的廣告跟4G訊號有啥關係???
211
+ gvlbp21,06/21 01:02,呵...為何不攜碼出去?
212
+ qbvm40807,06/21 01:10,真的爛要求無條件解約就好了 台灣有5間電信 比中國
213
+ qbvm40807,06/21 01:10,還多欸 更不用說背後也都是獨立的大財團 說壟斷? 你
214
+ qbvm40807,06/21 01:10,是認真的嗎???
215
+ lojhiu326,06/21 01:32,扯到壟斷就太過了XD 當其他電信塑膠?
216
+ lplpllp416,06/21 01:38,去申請別家試用看看,哪家情況好就跳哪家,你這種
217
+ lplpllp416,06/21 01:38,情況下為何執著於同一家…
218
+ sa759,06/21 01:50,壟...壟斷4G?
219
+ TvohIzvo,06/21 02:04,推!權益是爭取來的!
220
+ Bovunjbp,06/21 02:46,不是湊足關鍵字就能遮掩你是想公審中華電信的奧客
221
+ zffpvp,06/21 03:02,中華4G真的爛 標5G亂喊價 把4G用戶拖下水
222
+ bsjbeof,06/21 04:39,也許你家是其他業者黃金地段 快跳槽吧 這沒壟斷 XD
223
+ bsjbeof,06/21 04:43,種花也許在你家地段受到其他抗議而放棄但不構成壟斷
224
+ nbocpx77,06/21 06:26,你先翻開合約好好看一下 就不會想去浪費這個時間了
225
+ Epsffo1010,06/21 06:55,.........
226
+ ay27771531,06/21 07:20,所以你要解釋那裡壟斷了嗎
227
+ Gn4o,06/21 07:28,……
228
+ ex7931425,06/21 07:58,中華電信壟斷?
229
+ h6i630,06/21 09:26,你不是只有中華可選,何來壟斷=.=?
230
+ hbnfljoh,06/21 10:19,不懂的東西你可以查清楚了在上來發文 顆顆
231
+ tpwb0809,06/21 11:17,測智商的發文
232
+ SD8377,06/21 11:50,什麼啦
233
+ NfohYjbo,06/21 11:56,中華4g哪有壟斷,原po是不是搞錯什麼了
234
+ kvtujo332805,06/21 12:07,過了超過12小時沒回應
235
+ kvtujo332805,06/21 12:07,…感覺又是一篇射後不理文了
236
+ ol950357,06/21 12:09,???????????壟斷在那裡???
237
+ tpoza700,06/21 12:19,?
238
+ cfopbjdp,06/21 12:33,台灣鯛好吃
239
+ bqqmfxbsn,06/21 14:19,呃呃呃,該從哪裡開始吐槽
240
+ ibolthp2012,06/21 15:39,這叫公民議題?
241
+ nfqbmjoh,06/21 16:12,請先理解甚麼是行動網路跟固定���路好嗎 別的先不說
242
+ nfqbmjoh,06/21 16:12,拿光世代的資料比行動網路???????
243
+ tijofMpwf,06/21 18:41,這是公民議題?
244
+ dbnfmmbmb,06/22 03:05,…
245
+ djnpof : 認真回,06/22 10:02,能歸責於種花的大概只有網路太多人使用以致
246
+ djnpof,06/22 10:05,網路中斷 其他很難要歸責種花 (干擾或訊號不佳等)
247
+ djnpof,06/22 10:08,曾有聽過有人因鄰居晚間使用電器而造成網路中斷的
248
+ djnpof,06/22 10:09,像這種狀況的 是種花的問題嗎?
249
+ bmfwfmnbo,06/22 11:25,疫情開始後,網路確實慢很多,共體時艱吧
250
+ ===
251
+ 問題:請對以上討論進行所有意見的分類與摘要,類別有六種: 建議、質疑、疑問、抱怨、讚美、提供資訊。意見摘要用條列的方式呈現,每條意見之後用括號包含持此意見的發>言者,原本文章作者的意見也必須一併分析,同一條意見可能包含不只一人。
252
+
253
+
254
+ 2024-07-15 08:08:57 - INFO - __main__ - Chosen sample 1309 of the raw training set:
255
+
256
+ Assistant:
257
+ 建議:
258
+ - 建議可以換其他電信業者,並試試其他業者的服務品質如何 (ebbf,obuibo2000,lplpllp416,bsjbeof)
259
+ - 建議對方先翻開並理解合約內容,便不會浪費時間 (nbocpx77)
260
+ - 固定網路比較不會出現問題,不應該使用行動網路來取代 (obuibo2000)
261
+ - 如果真的覺得爛到無法忍受,可以請求無條件解約 (qbvm40807)
262
+
263
+ 質疑:
264
+ - 中華電信的問題被視為用戶問題,其他電信的問題則是因為選擇該電信業者 (kbto4560)
265
+ - 中華電信4G和5G的問題和光世代的廣告無關 (djuzqpsu)
266
+ - 對於原po以壟斷來批評中華電信的用詞有疑問 (ufoubjlbotp, qbvm40807, lojhiu326, sa759, ex7931425,h6i630,NfohYjbo)
267
+ - 懷疑原po是個奧客,想要公審中華電信 (Bovunjbp)
268
+ - 懷疑原po沒有好好理解公平交易法和壟斷的意義 (hbnfljoh)
269
+ - 懷疑原po發完文後就沒有回應,可能是在故意搞事 (kvtujo332805)
270
+
271
+ 疑問:
272
+ - 原po所說的壟斷是指什麼?為何認為中華電信壟斷了行動網路市場?(ay27771531, lojhiu326,NfohYjbo,ol950357,h6i630)
273
+ - 不理解原po為什麼堅持使用同一家信號不好的電信業者而不換家試試 (lplpllp416,Gn4o)
274
+
275
+ 抱怨:
276
+ - 抱怨中華電信4G的不穩定,標5G價格卻又拖累4G用戶 (fspt01, zffpvp)
277
+
278
+ 讚美:
279
+ - 原po對公民議題的態度值得肯定。 (TvohIzvo)
280
+
281
+ 提供資訊:
282
+ - 台灣有五家電信業者,背後都有大財團支援,沒有壟斷的問題 (qbvm40807)
283
+ - 台灣的網路使用量在疫情期間增加,速度變慢是共體時艱 (bmfwfmnbo)
284
+ - 網路使用時段、設備種類、位置等因素都可能影響網路品質,並非完全由電信業者控制 (djnpof)</s>
285
+ 2024-07-15 08:08:57 - INFO - __main__ - Rejected sample 1309 of the raw training set:
286
+
287
+ Assistant:
288
+ 建議:
289
+ 1. 應該讓老百姓反應中華電信壟斷性議題 (fspt01)
290
+ 2. 應該攜碼出去 (gvlbp21)
291
+ 3. 應該去申請別家試用看看,哪家情況好就跳哪家 (lplpllp416)
292
+ 4. 應該先翻開合約好好看一下,就不會想去浪費這個時間了 (nbocpx77)
293
+
294
+ 質疑:
295
+ 1. 質疑中華電信是否真的壟斷4G (sa759, a7931425, h6i630, hbnfljoh, ol950357, tpoza700, cfopbjdp, bqqmfxbsn, ibolthp2012, nfqbmjoh, tijofMpwf, dbnfmmbmb)
296
+ 2. 質疑種花是否構成壟斷 (bsjbeof)
297
+
298
+ 疑問:
299
+ 1. 疑問公民議題是什麼 (fspt01, ibolthp2012, nfqbmjoh, tijofMpwf)
300
+ 2. 疑問壟斷在哪裡 (ay27771531, Gn4o, ex7931425, ol950357, tpoza700, cfopbjdp, bqqmfxbsn, ibolthp2012, nfqbmjoh, tijofMpwf, dbnfmmbmb)
301
+
302
+ 抱怨:
303
+ 1. 抱怨4G網路品質不好 (fspt01)
304
+ 2. 抱怨中華電信壟斷 (fspt01, kbto4560, zffpvp, bsjbeof)
305
+ 3. 抱怨網路中斷 (nfqbmjoh)
306
+
307
+ 讚美:
308
+ 1. 讚美固網 (obuibo2000)
309
+ 2. 讚美公平交易委員會處分中華電信的行為 (fspt01)
310
+
311
+ 提供資訊:
312
+ 1. 提供了公平交易委員會處分中華電信的資訊 (fspt01)
313
+ 2. 提供了中華電信被罰款的資訊 (ebbf, cv17, obuibo2000, ufoubjlbotp, kbto4560, djuzqpsu, gvlbp21, qbvm40807, lojhiu326, lplpllp416, sa759, TvohIzvo, Bovunjbp, zffpvp, bsjbeof, nbocpx77, Epsffo1010, ay27771531, Gn4o, ex7931425, h6i630, hbnfljoh, ol950357, tpoza700, cfopbjdp, bqqmfxbsn, ibolthp2012, nfqbmjoh, tijofMpwf, dbnfmmbmb)</s>
314
+ 2024-07-15 08:08:57 - INFO - __main__ - Prompt sample 228 of the raw training set:
315
+
316
+ <s>Human:
317
+ 文章:輿情作者,日期,輿情內文
318
+ bcdtbfhvtb11,6/25,照上面說的1199以下等於是5G的量到降速吃到飽,1399開始才是真的5G吃到飽 現在4G吃到飽我家靠窗速度還有100M上下,晚上有時速度會慢一點不到10M 今天看到5G方案讓我猶豫要不要升級5G了 有2個問題 1.50G的額度用USB分享給電腦也算嗎?還是開Wifi熱點才是? 2.如果用1399是熱點超過50G連本身吃到飽也降速到10M?還是只有被分享的裝置會降速到10M?
319
+ qpqpqbm,06/25 11:22,熱點分享量到是鎖卡 只剩手機能用吧 目前看起來是
320
+ qpqpqbm,06/25 11:22,這樣玩 不夠這也是中華董事長有說目前涵蓋不過才有
321
+ qpqpqbm,06/25 11:22,限制 未來就不知道了
322
+ zjfstbo,06/25 11:31,限制一堆價錢這樣 以為消費者會買單嗎 天真
323
+ qpqpqbm,06/25 11:32,市場跟客戶需求有關 客戶有需求就要滿足XD
324
+ qpqpqbm,06/25 11:33,https://reurl.cc/L3zKMe 高資費1399以上用的爽就好
325
+ qpqpqbm,06/25 11:34,不夠用早該辦固網滿足辦公家庭需求...
326
+ uudnm,06/25 12:07,我也好奇,熱點流量用完,手機的吃到飽應該不會限速
327
+ uudnm,06/25 12:07,吧
328
+ qpqpqbm,06/25 12:11,手機沒影響 就是要擋大戶 分享流量夠一般基本用途
329
+ olit9412235,06/25 14:51,為什麼拿台北市跟東京比,因為都是大都市,比較對
330
+ olit9412235,06/25 14:51,象是 docomo 跟中華電信,也是兩邊電信龍頭,這是
331
+ olit9412235,06/25 14:51,親身體驗。這還是手機而已,如果是租分享器速度會
332
+ olit9412235,06/25 14:51,更快。
333
+ lbnjs,06/25 15:18,額度是全部,包含使用SIM卡網路的所有裝置。
334
+ gmjfstlz,06/25 21:14,容量哪有在分你開分享與否?有網路都是用5g跑阿
335
+ ===
336
+ 問題:請對以上討論進行所有意見的分類與摘要,類別有六種: 建議、質疑、疑問、抱怨、讚美、提供資訊。意見摘要用條列的方式呈現,每條意見之後用括號包含持此意見的發>言者,原本文章作者的意見也必須一併分析,同一條意見可能包含不只一人。
337
+
338
+
339
+ 2024-07-15 08:08:57 - INFO - __main__ - Chosen sample 228 of the raw training set:
340
+
341
+ Assistant:
342
+ 建議:
343
+ 1. 可以考慮升級5G(bcdtbfhvtb11)
344
+ 2. 需求不夠建議可以辦固網滿足辦公家庭需求(qpqpqbm)
345
+
346
+ 質疑:
347
+ 1. 對於新的5G方案有所疑慮,質疑是否值得升級(bcdtbfhvtb11)
348
+ 2. 質疑行動網路公司將價格定得過高,以及設置太多限制(zjfstbo)
349
+
350
+ 疑問:
351
+ 1. 50G的額度是否包含用USB分享給電腦的數據(bcdtbfhvtb11)
352
+ 2. 如果用1399是熱點超過50G連本身吃到飽也降速到10M(bcdtbfhvtb11)
353
+ 3. 對於熱點流量用完後,手機的吃到飽是否會限速(uudnm)
354
+
355
+ 抱怨:
356
+ 1. 對於5G資費計劃有許多限制,價格又偏高感到不滿(zjfstbo)
357
+
358
+ 讚美:
359
+ 1. 沒有讚美的意見
360
+
361
+ 提供資訊:
362
+ 1. 分享數據到其他裝置也會計算在內(lbnjs, gmjfstlz)
363
+ 2. 若熱點流量用完,手機所使用的數據還是5G數據並不受限制(uudnm, qpqpqbm)
364
+ 3. 對於市場需求關係的說明(qpqpqbm)
365
+ 4. 台北市與東京大都市的比較(olit9412235)
366
+ 5. 提供相關的網路連結(qpqpqbm)</s>
367
+ 2024-07-15 08:08:57 - INFO - __main__ - Rejected sample 228 of the raw training set:
368
+
369
+ Assistant:
370
+ 建議:
371
+ 1. 高資費1399以上用的爽就好 (qpqpqbm)
372
+ 2. 不夠用早該辦固網滿足辦公家庭需求 (qpqpqbm)
373
+
374
+ 質疑:
375
+ 1. 限制一堆價錢這樣 以為消費者會買單嗎 天真 (zjfstbo)
376
+
377
+ 疑問:
378
+ 1. 1.50G的額度用USB分享給電腦也算嗎?還是開Wifi熱點才是? (bcdtbfhvtb11)
379
+ 2. 如果用1399是熱點超過50G連本身吃到飽也降速到10M?還是只有被分享的裝置會降速到10M? (bcdtbfhvtb11)
380
+
381
+ 抱怨:
382
+ 1. 市場跟客戶需求有關 客戶有需求就要滿足XD (qpqpqbm)
383
+
384
+ 讚美:
385
+
386
+
387
+ 提供資訊:
388
+ 1. 熱點分享量到是鎖卡 只剩手機能用吧 目前看起來是這樣玩 不夠這也是中華董事長有說目前涵蓋不過才有限制 未來就不知道了 (qpqpqbm)
389
+ 2. 手機沒影響 就是要擋大戶 分享流量夠一般基本用途 (qpqpqbm)
390
+ 3. 額度是全部,包含使用SIM卡網路的所有裝置 (lbnjs)
391
+ 4. 容量哪有在分你開分享與否?有網路都是用5g跑阿 (gmjfstlz)
392
+ 5. 為什麼拿台北市跟東京比,因為都是大都市,比較對象是 docomo 跟中華電信,也是兩邊電信龍頭,這是親身體驗 (olit9412235)</s>
393
+ 2024-07-15 08:08:57 - INFO - __main__ - Prompt sample 51 of the raw training set:
394
+
395
+ <s>Human:
396
+ 文章:輿情作者,日期,輿情內文
397
+ epodmjboh,4/22,大家好!! 小弟這次剛好符合中華的公教雙飽資格, 想衝一波, 但是只想辦個299的就好, 因為平常都是用預付卡買1G, 不過有看到299裡面, 有個優惠是群內優惠 免費, 想問這個 是要需要多加50加電話號碼進群組嗎? 還是就是299就可以直接設定電話進群組 對這個有點不懂,想問有無人可以解答 謝謝大大們
398
+ kbnft732,04/22 18:29,你要不要考慮直接問客服?
399
+ qvtiqvmm,04/22 18:34,好像不用加50。 剛好今天辦299有問。
400
+ yv3,04/22 18:44,台哥的群內免費試 每個單位有一個編號 例如台北市政
401
+ yv3,04/22 18:45,府教育局 編號是1234 那教育局的員工辦公教專案
402
+ ===
403
+ 問題:請對以上討論進行所有意見的分類與摘要,類別有六種: 建議、質疑、疑問、抱怨、讚美、提供資訊。意見摘要用條列的方式呈現,每條意見之後用括號包含持此意見的發>言者,原本文章作者的意見也必須一併分析,同一條意見可能包含不只一人。
404
+
405
+
406
+ 2024-07-15 08:08:57 - INFO - __main__ - Chosen sample 51 of the raw training set:
407
+
408
+ Assistant:
409
+ - 疑問:想要了解299元的方案是不是可以直接加入群組,還是要多加50元,並希望有人能提供資訊 (epodmjboh)
410
+ - 建議:直接詢問客服以獲得最確實資訊 (kbnft732)
411
+ - 提供資訊:今天辦理299元專案,表示不需要加50元 (qvtiqvmm)
412
+ - 提供資訊:解釋群內免費的方式,每個單位組會有一個編號 (yv3)</s>
413
+ 2024-07-15 08:08:57 - INFO - __main__ - Rejected sample 51 of the raw training set:
414
+
415
+ Assistant:
416
+ - 疑問:
417
+ - 想問這個 是要需要多加50加電話號碼進群組嗎? 還是就是299就可以直接設定電話進群組 (epodmjboh)
418
+ - 想問有無人可以解答 (epodmjboh)
419
+
420
+ - 建議:
421
+ - 你要不要考慮直接問客服? (kbnft732)
422
+
423
+ - 提供資訊:
424
+ - 好像不用加50。 剛好今天辦299有問。 (qvtiqvmm)
425
+ - 台哥的群內免費試 每個單位有一個編號 例如台北市政府教育局 編號是1234 那教育局的員工辦公教專案 (yv3)</s>
426
+ [INFO 20240712-1747] init DPOTrainer
427
+ [INFO 20240712-1747] init DPOTrainer
428
+ [INFO 20240712-1747] init DPOTrainer
429
+ [2024-07-15 08:08:57,938] [INFO] [comm.py:637:init_distributed] cdb=None
430
+ DatasetDict({
431
+ train: Dataset({
432
+ features: ['prompt', 'chosen', 'rejected', 'messages'],
433
+ num_rows: 1728
434
+ })
435
+ test: Dataset({
436
+ features: ['prompt', 'chosen', 'rejected', 'messages'],
437
+ num_rows: 91
438
+ })
439
+ })
440
+ [INFO 20240712-1747] init DPOTrainer
441
+ [2024-07-15 08:09:05,078] [INFO] [partition_parameters.py:347:__exit__] finished initializing model - num_params = 291, num_elems = 7.24B
442
+ [2024-07-15 08:12:06,088] [INFO] [partition_parameters.py:347:__exit__] finished initializing model - num_params = 582, num_elems = 14.48B
443
+ 2024-07-15 08:12:09 - WARNING - datasets.fingerprint - Parameter 'function'=<bound method DPOTrainer.tokenize_row of <trl.trainer.dpo_trainer.DPOTrainer object at 0x7f752f58d660>> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.
444
+ 2024-07-15 08:12:19 - WARNING - datasets.fingerprint - Parameter 'function'=<bound method DPOTrainer.tokenize_row of <trl.trainer.dpo_trainer.DPOTrainer object at 0x7f05b3b81660>> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.
445
+ 2024-07-15 08:12:19 - WARNING - datasets.fingerprint - Parameter 'function'=<bound method DPOTrainer.tokenize_row of <trl.trainer.dpo_trainer.DPOTrainer object at 0x7f4724d85660>> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.
446
+ 2024-07-15 08:12:19 - WARNING - datasets.fingerprint - Parameter 'function'=<bound method DPOTrainer.tokenize_row of <trl.trainer.dpo_trainer.DPOTrainer object at 0x7fd53838d690>> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.
447
+ [2024-07-15 08:12:22,020] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed info: version=0.12.2, git-hash=unknown, git-branch=unknown
448
+ [2024-07-15 08:12:22,033] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False
449
+ [2024-07-15 08:12:22,035] [INFO] [logging.py:96:log_dist] [Rank 0] Creating ZeRO Offload
450
+ [2024-07-15 08:12:22,133] [INFO] [utils.py:802:see_memory_usage] DeepSpeedZeRoOffload initialize [begin]
451
+ [2024-07-15 08:12:22,134] [INFO] [utils.py:803:see_memory_usage] MA 6.74 GB Max_MA 7.42 GB CA 7.89 GB Max_CA 8 GB
452
+ [2024-07-15 08:12:22,134] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 17.44 GB, percent = 2.0%
453
+ Parameter Offload: Total persistent parameters: 266240 in 65 params
454
+ [2024-07-15 08:12:22,238] [INFO] [utils.py:802:see_memory_usage] DeepSpeedZeRoOffload initialize [end]
455
+ [2024-07-15 08:12:22,238] [INFO] [utils.py:803:see_memory_usage] MA 6.74 GB Max_MA 6.74 GB CA 7.89 GB Max_CA 8 GB
456
+ [2024-07-15 08:12:22,238] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 17.47 GB, percent = 2.0%
457
+ [2024-07-15 08:12:22,239] [INFO] [config.py:972:print] DeepSpeedEngine configuration:
458
+ [2024-07-15 08:12:22,239] [INFO] [config.py:976:print] activation_checkpointing_config {
459
+ "partition_activations": false,
460
+ "contiguous_memory_optimization": false,
461
+ "cpu_checkpointing": false,
462
+ "number_checkpoints": null,
463
+ "synchronize_checkpoint_boundary": false,
464
+ "profile": false
465
+ }
466
+ [2024-07-15 08:12:22,239] [INFO] [config.py:976:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True}
467
+ [2024-07-15 08:12:22,239] [INFO] [config.py:976:print] amp_enabled .................. False
468
+ [2024-07-15 08:12:22,239] [INFO] [config.py:976:print] amp_params ................... False
469
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] autotuning_config ............ {
470
+ "enabled": false,
471
+ "start_step": null,
472
+ "end_step": null,
473
+ "metric_path": null,
474
+ "arg_mappings": null,
475
+ "metric": "throughput",
476
+ "model_info": null,
477
+ "results_dir": "autotuning_results",
478
+ "exps_dir": "autotuning_exps",
479
+ "overwrite": true,
480
+ "fast": true,
481
+ "start_profile_step": 3,
482
+ "end_profile_step": 5,
483
+ "tuner_type": "gridsearch",
484
+ "tuner_early_stopping": 5,
485
+ "tuner_num_trials": 50,
486
+ "model_info_path": null,
487
+ "mp_size": 1,
488
+ "max_train_batch_size": null,
489
+ "min_train_batch_size": 1,
490
+ "max_train_micro_batch_size_per_gpu": 1.024000e+03,
491
+ "min_train_micro_batch_size_per_gpu": 1,
492
+ "num_tuning_micro_batch_sizes": 3
493
+ }
494
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] bfloat16_enabled ............. True
495
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] checkpoint_parallel_write_pipeline False
496
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] checkpoint_tag_validation_enabled True
497
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] checkpoint_tag_validation_fail False
498
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] comms_config ................. <deepspeed.comm.config.DeepSpeedCommsConfig object at 0x7f74804d3a30>
499
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] communication_data_type ...... None
500
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}}
501
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] curriculum_enabled_legacy .... False
502
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] curriculum_params_legacy ..... False
503
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}}
504
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] data_efficiency_enabled ...... False
505
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] dataloader_drop_last ......... False
506
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] disable_allgather ............ False
507
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] dump_state ................... False
508
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] dynamic_loss_scale_args ...... None
509
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] eigenvalue_enabled ........... False
510
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] eigenvalue_gas_boundary_resolution 1
511
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] eigenvalue_layer_name ........ bert.encoder.layer
512
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] eigenvalue_layer_num ......... 0
513
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] eigenvalue_max_iter .......... 100
514
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] eigenvalue_stability ......... 1e-06
515
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] eigenvalue_tol ............... 0.01
516
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] eigenvalue_verbose ........... False
517
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] elasticity_enabled ........... False
518
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] flops_profiler_config ........ {
519
+ "enabled": false,
520
+ "recompute_fwd_factor": 0.0,
521
+ "profile_step": 1,
522
+ "module_depth": -1,
523
+ "top_modules": 1,
524
+ "detailed": true,
525
+ "output_file": null
526
+ }
527
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] fp16_auto_cast ............... None
528
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] fp16_enabled ................. False
529
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] fp16_master_weights_and_gradients False
530
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] global_rank .................. 0
531
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] grad_accum_dtype ............. None
532
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] gradient_accumulation_steps .. 2
533
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] gradient_clipping ............ 1.0
534
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] gradient_predivide_factor .... 1.0
535
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8
536
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] initial_dynamic_scale ........ 1
537
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] load_universal_checkpoint .... False
538
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] loss_scale ................... 1.0
539
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] memory_breakdown ............. False
540
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] mics_hierarchial_params_gather False
541
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] mics_shard_size .............. -1
542
+ [2024-07-15 08:12:22,240] [INFO] [config.py:976:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False
543
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] nebula_config ................ {
544
+ "enabled": false,
545
+ "persistent_storage_path": null,
546
+ "persistent_time_interval": 100,
547
+ "num_of_version_in_retention": 2,
548
+ "enable_nebula_load": true,
549
+ "load_path": null
550
+ }
551
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] optimizer_legacy_fusion ...... False
552
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] optimizer_name ............... None
553
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] optimizer_params ............. None
554
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0}
555
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] pld_enabled .................. False
556
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] pld_params ................... False
557
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] prescale_gradients ........... False
558
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] scheduler_name ............... None
559
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] scheduler_params ............. None
560
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] seq_parallel_communication_data_type torch.float32
561
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] sparse_attention ............. None
562
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] sparse_gradients_enabled ..... False
563
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] steps_per_print .............. inf
564
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] train_batch_size ............. 64
565
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] train_micro_batch_size_per_gpu 8
566
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] use_node_local_storage ....... False
567
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] wall_clock_breakdown ......... False
568
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] weight_quantization_config ... None
569
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] world_size ................... 4
570
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] zero_allow_untested_optimizer False
571
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] zero_config .................. stage=3 contiguous_gradients=True reduce_scatter=True reduce_bucket_size=500,000,000 allgather_partitions=True allgather_bucket_size=500,000,000 overlap_comm=True load_from_fp32_weights=True elastic_checkpoint=False offload_param=DeepSpeedZeroOffloadParamConfig(device='none', nvme_path=None, buffer_count=5, buffer_size=100,000,000, max_in_cpu=1,000,000,000, pin_memory=False) offload_optimizer=DeepSpeedZeroOffloadOptimizerConfig(device='none', nvme_path=None, buffer_count=4, pin_memory=False, pipeline=False, pipeline_read=False, pipeline_write=False, fast_init=False) sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=True stage3_gather_fp16_weights_on_model_save=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True
572
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] zero_enabled ................. True
573
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] zero_force_ds_cpu_optimizer .. True
574
+ [2024-07-15 08:12:22,241] [INFO] [config.py:976:print] zero_optimization_stage ...... 3
575
+ [2024-07-15 08:12:22,241] [INFO] [config.py:962:print_user_config] json = {
576
+ "train_batch_size": 64,
577
+ "train_micro_batch_size_per_gpu": 8,
578
+ "gradient_accumulation_steps": 2,
579
+ "zero_optimization": {
580
+ "stage": 3,
581
+ "offload_optimizer": {
582
+ "device": "none",
583
+ "nvme_path": null
584
+ },
585
+ "offload_param": {
586
+ "device": "none",
587
+ "nvme_path": null
588
+ },
589
+ "stage3_gather_16bit_weights_on_model_save": true
590
+ },
591
+ "gradient_clipping": 1.0,
592
+ "steps_per_print": inf,
593
+ "bf16": {
594
+ "enabled": true
595
+ },
596
+ "fp16": {
597
+ "enabled": false
598
+ },
599
+ "zero_optimization.reduce_bucket_size": 1.677722e+07,
600
+ "zero_optimization.stage3_param_persistence_threshold": 4.096000e+04,
601
+ "zero_optimization.stage3_prefetch_bucket_size": 1.509949e+07
602
+ }
603
+ [INFO 20240712-1747] ☆☆☆
604
+ [DEBUG 20240712-1804] checkpoint = None
605
+ [INFO 20240712-1747] ☆☆☆
606
+ [DEBUG 20240712-1804] checkpoint = None
607
+ [INFO 20240712-1747] ☆☆☆
608
+ [DEBUG 20240712-1804] checkpoint = None
609
+ [INFO 20240712-1747] ☆☆☆
610
+ [DEBUG 20240712-1804] checkpoint = None
611
+ [2024-07-15 08:12:47,906] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed info: version=0.12.2, git-hash=unknown, git-branch=unknown
612
+ [2024-07-15 08:12:47,913] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False
613
+ [2024-07-15 08:12:47,914] [INFO] [logging.py:96:log_dist] [Rank 0] Using client Optimizer as basic optimizer
614
+ [2024-07-15 08:12:47,914] [INFO] [logging.py:96:log_dist] [Rank 0] Removing param_group that has no 'params' in the basic Optimizer
615
+ [2024-07-15 08:12:47,924] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Basic Optimizer = AdamW
616
+ [2024-07-15 08:12:47,924] [INFO] [utils.py:56:is_zero_supported_optimizer] Checking ZeRO support for optimizer=AdamW type=<class 'torch.optim.adamw.AdamW'>
617
+ [2024-07-15 08:12:47,924] [INFO] [logging.py:96:log_dist] [Rank 0] Creating fp16 ZeRO stage 3 optimizer, MiCS is enabled False, Hierarchical params gather False
618
+ [2024-07-15 08:12:47,924] [INFO] [logging.py:96:log_dist] [Rank 0] Creating torch.bfloat16 ZeRO stage 3 optimizer
619
+ [2024-07-15 08:12:48,039] [INFO] [utils.py:802:see_memory_usage] Stage 3 initialize beginning
620
+ [2024-07-15 08:12:48,040] [INFO] [utils.py:803:see_memory_usage] MA 6.74 GB Max_MA 6.74 GB CA 6.94 GB Max_CA 8 GB
621
+ [2024-07-15 08:12:48,040] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.79 GB, percent = 2.2%
622
+ [2024-07-15 08:12:48,041] [INFO] [stage3.py:126:__init__] Reduce bucket size 500,000,000
623
+ [2024-07-15 08:12:48,042] [INFO] [stage3.py:127:__init__] Prefetch bucket size 50,000,000
624
+ [2024-07-15 08:12:48,153] [INFO] [utils.py:802:see_memory_usage] DeepSpeedZeRoOffload initialize [begin]
625
+ [2024-07-15 08:12:48,154] [INFO] [utils.py:803:see_memory_usage] MA 6.74 GB Max_MA 6.74 GB CA 6.94 GB Max_CA 7 GB
626
+ [2024-07-15 08:12:48,154] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.79 GB, percent = 2.2%
627
+ Parameter Offload: Total persistent parameters: 266240 in 65 params
628
+ [2024-07-15 08:12:48,277] [INFO] [utils.py:802:see_memory_usage] DeepSpeedZeRoOffload initialize [end]
629
+ [2024-07-15 08:12:48,278] [INFO] [utils.py:803:see_memory_usage] MA 6.74 GB Max_MA 6.74 GB CA 6.94 GB Max_CA 7 GB
630
+ [2024-07-15 08:12:48,278] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.79 GB, percent = 2.2%
631
+ [2024-07-15 08:12:48,385] [INFO] [utils.py:802:see_memory_usage] Before creating fp16 partitions
632
+ [2024-07-15 08:12:48,386] [INFO] [utils.py:803:see_memory_usage] MA 6.74 GB Max_MA 6.74 GB CA 6.94 GB Max_CA 7 GB
633
+ [2024-07-15 08:12:48,386] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.79 GB, percent = 2.2%
634
+ [2024-07-15 08:12:49,843] [INFO] [utils.py:802:see_memory_usage] After creating fp16 partitions: 2
635
+ [2024-07-15 08:12:49,844] [INFO] [utils.py:803:see_memory_usage] MA 6.75 GB Max_MA 6.75 GB CA 7.28 GB Max_CA 7 GB
636
+ [2024-07-15 08:12:49,844] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.8 GB, percent = 2.2%
637
+ [2024-07-15 08:12:49,946] [INFO] [utils.py:802:see_memory_usage] Before creating fp32 partitions
638
+ [2024-07-15 08:12:49,946] [INFO] [utils.py:803:see_memory_usage] MA 6.75 GB Max_MA 6.75 GB CA 7.28 GB Max_CA 7 GB
639
+ [2024-07-15 08:12:49,946] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.8 GB, percent = 2.2%
640
+ [2024-07-15 08:12:50,055] [INFO] [utils.py:802:see_memory_usage] After creating fp32 partitions
641
+ [2024-07-15 08:12:50,055] [INFO] [utils.py:803:see_memory_usage] MA 13.49 GB Max_MA 14.97 GB CA 15.92 GB Max_CA 16 GB
642
+ [2024-07-15 08:12:50,055] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.8 GB, percent = 2.2%
643
+ [2024-07-15 08:12:50,175] [INFO] [utils.py:802:see_memory_usage] Before initializing optimizer states
644
+ [2024-07-15 08:12:50,175] [INFO] [utils.py:803:see_memory_usage] MA 13.49 GB Max_MA 13.49 GB CA 15.92 GB Max_CA 16 GB
645
+ [2024-07-15 08:12:50,176] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.81 GB, percent = 2.2%
646
+ [2024-07-15 08:12:50,322] [INFO] [utils.py:802:see_memory_usage] After initializing optimizer states
647
+ [2024-07-15 08:12:50,323] [INFO] [utils.py:803:see_memory_usage] MA 26.98 GB Max_MA 33.73 GB CA 36.97 GB Max_CA 37 GB
648
+ [2024-07-15 08:12:50,323] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.81 GB, percent = 2.2%
649
+ [2024-07-15 08:12:50,324] [INFO] [stage3.py:460:_setup_for_real_optimizer] optimizer state initialized
650
+ [2024-07-15 08:12:50,635] [INFO] [utils.py:802:see_memory_usage] After initializing ZeRO optimizer
651
+ [2024-07-15 08:12:50,636] [INFO] [utils.py:803:see_memory_usage] MA 31.29 GB Max_MA 31.77 GB CA 51.01 GB Max_CA 51 GB
652
+ [2024-07-15 08:12:50,636] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.81 GB, percent = 2.2%
653
+ [2024-07-15 08:12:50,636] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Final Optimizer = AdamW
654
+ [2024-07-15 08:12:50,636] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed using client LR scheduler
655
+ [2024-07-15 08:12:50,636] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed LR Scheduler = None
656
+ [2024-07-15 08:12:50,636] [INFO] [logging.py:96:log_dist] [Rank 0] step=0, skipped=0, lr=[0.0], mom=[(0.9, 0.999)]
657
+ [2024-07-15 08:12:50,637] [INFO] [config.py:972:print] DeepSpeedEngine configuration:
658
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] activation_checkpointing_config {
659
+ "partition_activations": false,
660
+ "contiguous_memory_optimization": false,
661
+ "cpu_checkpointing": false,
662
+ "number_checkpoints": null,
663
+ "synchronize_checkpoint_boundary": false,
664
+ "profile": false
665
+ }
666
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True}
667
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] amp_enabled .................. False
668
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] amp_params ................... False
669
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] autotuning_config ............ {
670
+ "enabled": false,
671
+ "start_step": null,
672
+ "end_step": null,
673
+ "metric_path": null,
674
+ "arg_mappings": null,
675
+ "metric": "throughput",
676
+ "model_info": null,
677
+ "results_dir": "autotuning_results",
678
+ "exps_dir": "autotuning_exps",
679
+ "overwrite": true,
680
+ "fast": true,
681
+ "start_profile_step": 3,
682
+ "end_profile_step": 5,
683
+ "tuner_type": "gridsearch",
684
+ "tuner_early_stopping": 5,
685
+ "tuner_num_trials": 50,
686
+ "model_info_path": null,
687
+ "mp_size": 1,
688
+ "max_train_batch_size": null,
689
+ "min_train_batch_size": 1,
690
+ "max_train_micro_batch_size_per_gpu": 1.024000e+03,
691
+ "min_train_micro_batch_size_per_gpu": 1,
692
+ "num_tuning_micro_batch_sizes": 3
693
+ }
694
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] bfloat16_enabled ............. True
695
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] checkpoint_parallel_write_pipeline False
696
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] checkpoint_tag_validation_enabled True
697
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] checkpoint_tag_validation_fail False
698
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] comms_config ................. <deepspeed.comm.config.DeepSpeedCommsConfig object at 0x7f73f34eb520>
699
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] communication_data_type ...... None
700
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}}
701
+ [2024-07-15 08:12:50,637] [INFO] [config.py:976:print] curriculum_enabled_legacy .... False
702
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] curriculum_params_legacy ..... False
703
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}}
704
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] data_efficiency_enabled ...... False
705
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] dataloader_drop_last ......... False
706
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] disable_allgather ............ False
707
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] dump_state ................... False
708
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] dynamic_loss_scale_args ...... None
709
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] eigenvalue_enabled ........... False
710
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] eigenvalue_gas_boundary_resolution 1
711
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] eigenvalue_layer_name ........ bert.encoder.layer
712
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] eigenvalue_layer_num ......... 0
713
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] eigenvalue_max_iter .......... 100
714
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] eigenvalue_stability ......... 1e-06
715
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] eigenvalue_tol ............... 0.01
716
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] eigenvalue_verbose ........... False
717
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] elasticity_enabled ........... False
718
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] flops_profiler_config ........ {
719
+ "enabled": false,
720
+ "recompute_fwd_factor": 0.0,
721
+ "profile_step": 1,
722
+ "module_depth": -1,
723
+ "top_modules": 1,
724
+ "detailed": true,
725
+ "output_file": null
726
+ }
727
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] fp16_auto_cast ............... None
728
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] fp16_enabled ................. False
729
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] fp16_master_weights_and_gradients False
730
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] global_rank .................. 0
731
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] grad_accum_dtype ............. None
732
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] gradient_accumulation_steps .. 2
733
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] gradient_clipping ............ 1.0
734
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] gradient_predivide_factor .... 1.0
735
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8
736
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] initial_dynamic_scale ........ 1
737
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] load_universal_checkpoint .... False
738
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] loss_scale ................... 1.0
739
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] memory_breakdown ............. False
740
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] mics_hierarchial_params_gather False
741
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] mics_shard_size .............. -1
742
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False
743
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] nebula_config ................ {
744
+ "enabled": false,
745
+ "persistent_storage_path": null,
746
+ "persistent_time_interval": 100,
747
+ "num_of_version_in_retention": 2,
748
+ "enable_nebula_load": true,
749
+ "load_path": null
750
+ }
751
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] optimizer_legacy_fusion ...... False
752
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] optimizer_name ............... None
753
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] optimizer_params ............. None
754
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0}
755
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] pld_enabled .................. False
756
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] pld_params ................... False
757
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] prescale_gradients ........... False
758
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] scheduler_name ............... None
759
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] scheduler_params ............. None
760
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] seq_parallel_communication_data_type torch.float32
761
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] sparse_attention ............. None
762
+ [2024-07-15 08:12:50,638] [INFO] [config.py:976:print] sparse_gradients_enabled ..... False
763
+ [2024-07-15 08:12:50,639] [INFO] [config.py:976:print] steps_per_print .............. inf
764
+ [2024-07-15 08:12:50,639] [INFO] [config.py:976:print] train_batch_size ............. 64
765
+ [2024-07-15 08:12:50,639] [INFO] [config.py:976:print] train_micro_batch_size_per_gpu 8
766
+ [2024-07-15 08:12:50,639] [INFO] [config.py:976:print] use_node_local_storage ....... False
767
+ [2024-07-15 08:12:50,639] [INFO] [config.py:976:print] wall_clock_breakdown ......... False
768
+ [2024-07-15 08:12:50,639] [INFO] [config.py:976:print] weight_quantization_config ... None
769
+ [2024-07-15 08:12:50,639] [INFO] [config.py:976:print] world_size ................... 4
770
+ [2024-07-15 08:12:50,639] [INFO] [config.py:976:print] zero_allow_untested_optimizer True
771
+ [2024-07-15 08:12:50,639] [INFO] [config.py:976:print] zero_config .................. stage=3 contiguous_gradients=True reduce_scatter=True reduce_bucket_size=500,000,000 allgather_partitions=True allgather_bucket_size=500,000,000 overlap_comm=True load_from_fp32_weights=True elastic_checkpoint=False offload_param=DeepSpeedZeroOffloadParamConfig(device='none', nvme_path=None, buffer_count=5, buffer_size=100,000,000, max_in_cpu=1,000,000,000, pin_memory=False) offload_optimizer=DeepSpeedZeroOffloadOptimizerConfig(device='none', nvme_path=None, buffer_count=4, pin_memory=False, pipeline=False, pipeline_read=False, pipeline_write=False, fast_init=False) sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=True stage3_gather_fp16_weights_on_model_save=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True
772
+ [2024-07-15 08:12:50,639] [INFO] [config.py:976:print] zero_enabled ................. True
773
+ [2024-07-15 08:12:50,639] [INFO] [config.py:976:print] zero_force_ds_cpu_optimizer .. True
774
+ [2024-07-15 08:12:50,639] [INFO] [config.py:976:print] zero_optimization_stage ...... 3
775
+ [2024-07-15 08:12:50,639] [INFO] [config.py:962:print_user_config] json = {
776
+ "train_batch_size": 64,
777
+ "train_micro_batch_size_per_gpu": 8,
778
+ "gradient_accumulation_steps": 2,
779
+ "zero_optimization": {
780
+ "stage": 3,
781
+ "offload_optimizer": {
782
+ "device": "none",
783
+ "nvme_path": null
784
+ },
785
+ "offload_param": {
786
+ "device": "none",
787
+ "nvme_path": null
788
+ },
789
+ "stage3_gather_16bit_weights_on_model_save": true
790
+ },
791
+ "gradient_clipping": 1.0,
792
+ "steps_per_print": inf,
793
+ "bf16": {
794
+ "enabled": true
795
+ },
796
+ "fp16": {
797
+ "enabled": false
798
+ },
799
+ "zero_allow_untested_optimizer": true
800
+ }
801
+ [2024-07-15 08:13:21,659] [WARNING] [stage3.py:1949:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time
802
+ {'loss': 0.6931, 'grad_norm': 21.959526805466417, 'learning_rate': 1.6666666666666665e-07, 'rewards/chosen': 0.0, 'rewards/rejected': 0.0, 'rewards/accuracies': 0.0, 'rewards/margins': 0.0, 'logps/rejected': -430.01824951171875, 'logps/chosen': -522.2332763671875, 'logits/rejected': -1.5321521759033203, 'logits/chosen': -1.5251624584197998, 'epoch': 0.04}
803
+ {'loss': 0.6931, 'grad_norm': 20.292728976465323, 'learning_rate': 3.333333333333333e-07, 'rewards/chosen': 0.0, 'rewards/rejected': 0.0, 'rewards/accuracies': 0.0, 'rewards/margins': 0.0, 'logps/rejected': -407.6622314453125, 'logps/chosen': -442.01617431640625, 'logits/rejected': -1.5653201341629028, 'logits/chosen': -1.58231520652771, 'epoch': 0.07}
804
+ {'loss': 0.6925, 'grad_norm': 21.101947369321714, 'learning_rate': 5e-07, 'rewards/chosen': -0.001064786920323968, 'rewards/rejected': -0.0005854797782376409, 'rewards/accuracies': 0.5625, 'rewards/margins': -0.000479307200293988, 'logps/rejected': -416.7240905761719, 'logps/chosen': -491.49005126953125, 'logits/rejected': -1.5687460899353027, 'logits/chosen': -1.5684146881103516, 'epoch': 0.11}
805
+ {'loss': 0.6911, 'grad_norm': 21.29780296562762, 'learning_rate': 4.978612153434526e-07, 'rewards/chosen': -0.0003022289602085948, 'rewards/rejected': -0.003410081844776869, 'rewards/accuracies': 0.6875, 'rewards/margins': 0.003107852768152952, 'logps/rejected': -379.46832275390625, 'logps/chosen': -509.3629150390625, 'logits/rejected': -1.6125160455703735, 'logits/chosen': -1.5377585887908936, 'epoch': 0.15}
806
+ {'loss': 0.6852, 'grad_norm': 20.48824994961677, 'learning_rate': 4.91481456572267e-07, 'rewards/chosen': 0.001991434022784233, 'rewards/rejected': -0.012363090179860592, 'rewards/accuracies': 0.875, 'rewards/margins': 0.0143545251339674, 'logps/rejected': -403.9041748046875, 'logps/chosen': -488.50262451171875, 'logits/rejected': -1.5993618965148926, 'logits/chosen': -1.548508644104004, 'epoch': 0.19}
807
+ {'loss': 0.6695, 'grad_norm': 21.06721548722646, 'learning_rate': 4.809698831278217e-07, 'rewards/chosen': 0.0072067356668412685, 'rewards/rejected': -0.03501763194799423, 'rewards/accuracies': 0.9375, 'rewards/margins': 0.04222436994314194, 'logps/rejected': -404.1998596191406, 'logps/chosen': -471.6202087402344, 'logits/rejected': -1.5432801246643066, 'logits/chosen': -1.4877359867095947, 'epoch': 0.22}
808
+ {'loss': 0.6585, 'grad_norm': 19.330099709889698, 'learning_rate': 4.6650635094610966e-07, 'rewards/chosen': 0.004273919388651848, 'rewards/rejected': -0.057606834918260574, 'rewards/accuracies': 1.0, 'rewards/margins': 0.06188075616955757, 'logps/rejected': -342.83990478515625, 'logps/chosen': -442.44384765625, 'logits/rejected': -1.5952730178833008, 'logits/chosen': -1.5653460025787354, 'epoch': 0.26}
809
+ {'loss': 0.6266, 'grad_norm': 20.582435449086052, 'learning_rate': 4.483383350728088e-07, 'rewards/chosen': 0.014045600779354572, 'rewards/rejected': -0.10960347950458527, 'rewards/accuracies': 0.9375, 'rewards/margins': 0.12364907562732697, 'logps/rejected': -454.7938232421875, 'logps/chosen': -556.8931884765625, 'logits/rejected': -1.496585488319397, 'logits/chosen': -1.4436156749725342, 'epoch': 0.3}
810
+ {'loss': 0.6093, 'grad_norm': 19.02137727505308, 'learning_rate': 4.2677669529663686e-07, 'rewards/chosen': 0.0047954656183719635, 'rewards/rejected': -0.2064724564552307, 'rewards/accuracies': 1.0, 'rewards/margins': 0.21126791834831238, 'logps/rejected': -464.00732421875, 'logps/chosen': -545.1778564453125, 'logits/rejected': -1.5011253356933594, 'logits/chosen': -1.4762550592422485, 'epoch': 0.33}
811
+ {'loss': 0.5892, 'grad_norm': 18.29916228129018, 'learning_rate': 4.0219035725218013e-07, 'rewards/chosen': 0.006784859113395214, 'rewards/rejected': -0.2541770935058594, 'rewards/accuracies': 1.0, 'rewards/margins': 0.2609619200229645, 'logps/rejected': -449.64532470703125, 'logps/chosen': -547.4830322265625, 'logits/rejected': -1.5759844779968262, 'logits/chosen': -1.576920986175537, 'epoch': 0.37}
812
+ {'loss': 0.5794, 'grad_norm': 17.619368905828807, 'learning_rate': 3.75e-07, 'rewards/chosen': -0.055743396282196045, 'rewards/rejected': -0.24957191944122314, 'rewards/accuracies': 1.0, 'rewards/margins': 0.1938285231590271, 'logps/rejected': -381.874267578125, 'logps/chosen': -413.1097106933594, 'logits/rejected': -1.5630524158477783, 'logits/chosen': -1.5285149812698364, 'epoch': 0.41}
813
+ {'loss': 0.5319, 'grad_norm': 19.01708114625534, 'learning_rate': 3.4567085809127245e-07, 'rewards/chosen': -0.08890116214752197, 'rewards/rejected': -0.40977126359939575, 'rewards/accuracies': 0.9375, 'rewards/margins': 0.3208701014518738, 'logps/rejected': -390.45758056640625, 'logps/chosen': -429.37237548828125, 'logits/rejected': -1.504837989807129, 'logits/chosen': -1.5181918144226074, 'epoch': 0.44}
814
+ {'loss': 0.4825, 'grad_norm': 19.102770950198305, 'learning_rate': 3.147047612756302e-07, 'rewards/chosen': -0.1315421462059021, 'rewards/rejected': -0.6144382953643799, 'rewards/accuracies': 1.0, 'rewards/margins': 0.4828961193561554, 'logps/rejected': -425.3389892578125, 'logps/chosen': -497.2473449707031, 'logits/rejected': -1.4981393814086914, 'logits/chosen': -1.4961140155792236, 'epoch': 0.48}
815
+ {'loss': 0.4697, 'grad_norm': 19.301810836402147, 'learning_rate': 2.826315480550129e-07, 'rewards/chosen': -0.24572713673114777, 'rewards/rejected': -0.7290294170379639, 'rewards/accuracies': 1.0, 'rewards/margins': 0.4833022952079773, 'logps/rejected': -439.08197021484375, 'logps/chosen': -486.4429016113281, 'logits/rejected': -1.4528899192810059, 'logits/chosen': -1.4483075141906738, 'epoch': 0.52}
816
+ {'loss': 0.4387, 'grad_norm': 18.71279520294138, 'learning_rate': 2.5e-07, 'rewards/chosen': -0.3652060627937317, 'rewards/rejected': -0.9309228658676147, 'rewards/accuracies': 0.9375, 'rewards/margins': 0.5657168030738831, 'logps/rejected': -494.8599548339844, 'logps/chosen': -521.008544921875, 'logits/rejected': -1.5090162754058838, 'logits/chosen': -1.5292141437530518, 'epoch': 0.56}
817
+ {'loss': 0.4174, 'grad_norm': 18.793537566198992, 'learning_rate': 2.1736845194498716e-07, 'rewards/chosen': -0.5173332691192627, 'rewards/rejected': -1.037455439567566, 'rewards/accuracies': 0.875, 'rewards/margins': 0.5201221108436584, 'logps/rejected': -369.700927734375, 'logps/chosen': -459.7350158691406, 'logits/rejected': -1.4832723140716553, 'logits/chosen': -1.4446516036987305, 'epoch': 0.59}
818
+ {'loss': 0.4033, 'grad_norm': 16.745079618493, 'learning_rate': 1.8529523872436977e-07, 'rewards/chosen': -0.6878727078437805, 'rewards/rejected': -1.3357090950012207, 'rewards/accuracies': 1.0, 'rewards/margins': 0.6478363275527954, 'logps/rejected': -611.6802368164062, 'logps/chosen': -628.3623046875, 'logits/rejected': -1.4667479991912842, 'logits/chosen': -1.4263123273849487, 'epoch': 0.63}
819
+ {'loss': 0.4154, 'grad_norm': 16.635280686124094, 'learning_rate': 1.5432914190872756e-07, 'rewards/chosen': -0.7533223628997803, 'rewards/rejected': -1.3541579246520996, 'rewards/accuracies': 0.875, 'rewards/margins': 0.6008355617523193, 'logps/rejected': -625.437255859375, 'logps/chosen': -662.1283569335938, 'logits/rejected': -1.4638080596923828, 'logits/chosen': -1.4674986600875854, 'epoch': 0.67}
820
+ {'loss': 0.3555, 'grad_norm': 20.71677350667776, 'learning_rate': 1.2500000000000005e-07, 'rewards/chosen': -0.7397876977920532, 'rewards/rejected': -1.7132179737091064, 'rewards/accuracies': 1.0, 'rewards/margins': 0.9734303951263428, 'logps/rejected': -571.26123046875, 'logps/chosen': -588.1524047851562, 'logits/rejected': -1.5262463092803955, 'logits/chosen': -1.522387981414795, 'epoch': 0.7}
821
+ {'loss': 0.3821, 'grad_norm': 15.4400444773846, 'learning_rate': 9.780964274781983e-08, 'rewards/chosen': -0.7622807025909424, 'rewards/rejected': -1.6570687294006348, 'rewards/accuracies': 1.0, 'rewards/margins': 0.8947880268096924, 'logps/rejected': -557.1793212890625, 'logps/chosen': -621.7742919921875, 'logits/rejected': -1.3520491123199463, 'logits/chosen': -1.3689525127410889, 'epoch': 0.74}
822
+ {'loss': 0.3535, 'grad_norm': 15.898172432048854, 'learning_rate': 7.322330470336313e-08, 'rewards/chosen': -0.9690157771110535, 'rewards/rejected': -1.7651585340499878, 'rewards/accuracies': 1.0, 'rewards/margins': 0.7961427569389343, 'logps/rejected': -677.109619140625, 'logps/chosen': -700.77294921875, 'logits/rejected': -1.4618651866912842, 'logits/chosen': -1.4822262525558472, 'epoch': 0.78}
823
+ {'loss': 0.3546, 'grad_norm': 15.739285942357705, 'learning_rate': 5.166166492719124e-08, 'rewards/chosen': -0.9611536264419556, 'rewards/rejected': -1.9216368198394775, 'rewards/accuracies': 1.0, 'rewards/margins': 0.960483193397522, 'logps/rejected': -537.1988525390625, 'logps/chosen': -577.4271240234375, 'logits/rejected': -1.4116517305374146, 'logits/chosen': -1.437910556793213, 'epoch': 0.81}
824
+ {'loss': 0.3301, 'grad_norm': 14.542054521548646, 'learning_rate': 3.349364905389032e-08, 'rewards/chosen': -0.7476925849914551, 'rewards/rejected': -2.160238265991211, 'rewards/accuracies': 1.0, 'rewards/margins': 1.4125456809997559, 'logps/rejected': -686.080322265625, 'logps/chosen': -638.280517578125, 'logits/rejected': -1.379716396331787, 'logits/chosen': -1.3872032165527344, 'epoch': 0.85}
825
+ {'loss': 0.3299, 'grad_norm': 17.78699135059973, 'learning_rate': 1.9030116872178314e-08, 'rewards/chosen': -0.8232169151306152, 'rewards/rejected': -1.8412718772888184, 'rewards/accuracies': 1.0, 'rewards/margins': 1.0180549621582031, 'logps/rejected': -591.6486206054688, 'logps/chosen': -697.1530151367188, 'logits/rejected': -1.4145452976226807, 'logits/chosen': -1.3574109077453613, 'epoch': 0.89}
826
+ {'loss': 0.3283, 'grad_norm': 15.532122481306784, 'learning_rate': 8.518543427732949e-09, 'rewards/chosen': -0.9585639834403992, 'rewards/rejected': -1.9474496841430664, 'rewards/accuracies': 0.9375, 'rewards/margins': 0.9888858199119568, 'logps/rejected': -653.23974609375, 'logps/chosen': -600.0244140625, 'logits/rejected': -1.4114274978637695, 'logits/chosen': -1.4619381427764893, 'epoch': 0.93}
827
+ {'loss': 0.3018, 'grad_norm': 16.641777908742164, 'learning_rate': 2.1387846565474044e-09, 'rewards/chosen': -1.0871095657348633, 'rewards/rejected': -2.2172341346740723, 'rewards/accuracies': 0.9375, 'rewards/margins': 1.1301246881484985, 'logps/rejected': -615.214599609375, 'logps/chosen': -623.5850830078125, 'logits/rejected': -1.3277167081832886, 'logits/chosen': -1.3785361051559448, 'epoch': 0.96}
828
+ {'loss': 0.3359, 'grad_norm': 13.468548124945492, 'learning_rate': 0.0, 'rewards/chosen': -0.978149950504303, 'rewards/rejected': -2.006209373474121, 'rewards/accuracies': 0.9375, 'rewards/margins': 1.0280592441558838, 'logps/rejected': -536.807861328125, 'logps/chosen': -570.6109619140625, 'logits/rejected': -1.4331750869750977, 'logits/chosen': -1.451808214187622, 'epoch': 1.0}
829
+ [2024-07-15 08:21:03,617] [INFO] [logging.py:96:log_dist] [Rank 0] [Torch] Checkpoint global_step27 is about to be saved!
830
+ [2024-07-15 08:21:03,645] [INFO] [logging.py:96:log_dist] [Rank 0] Saving model checkpoint: DPO/CHT-6_20240715-1605/checkpoint-27/global_step27/zero_pp_rank_0_mp_rank_00_model_states.pt
831
+ [2024-07-15 08:21:03,645] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving DPO/CHT-6_20240715-1605/checkpoint-27/global_step27/zero_pp_rank_0_mp_rank_00_model_states.pt...
832
+ [2024-07-15 08:21:03,733] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved DPO/CHT-6_20240715-1605/checkpoint-27/global_step27/zero_pp_rank_0_mp_rank_00_model_states.pt.
833
+ [2024-07-15 08:21:03,834] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving DPO/CHT-6_20240715-1605/checkpoint-27/global_step27/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt...
834
+ [2024-07-15 08:26:30,541] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved DPO/CHT-6_20240715-1605/checkpoint-27/global_step27/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt.
835
+ [2024-07-15 08:26:30,762] [INFO] [engine.py:3393:_save_zero_checkpoint] zero checkpoint saved DPO/CHT-6_20240715-1605/checkpoint-27/global_step27/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
836
+ [2024-07-15 08:26:38,789] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step27 is ready now!
837
+ [INFO 20240712-1747] ☆☆☆☆☆
838
+ [INFO 20240712-1747] ☆☆☆☆☆[INFO 20240712-1747] ☆☆☆☆☆
839
+
840
+ {'train_runtime': 828.5595, 'train_samples_per_second': 2.086, 'train_steps_per_second': 0.033, 'train_loss': 0.4969717639463919, 'epoch': 1.0}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ee60d1e797d4d1493b6c5402f48f0cfdc58d845b7d8580bce0ca15deff98f6f
3
+ size 7352