diff --git a/LLM-Detector-V1-4w/README.md b/LLM-Detector-V1-4w/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..6d11b4c9e52483400eda176f7f8fc7b5c8cd1b62
--- /dev/null
+++ b/LLM-Detector-V1-4w/README.md
@@ -0,0 +1,66 @@
+---
+base_model: ../Baichuan2-7B-Chat
+tags:
+- llama-factory
+- lora
+- generated_from_trainer
+model-index:
+- name: hc3zh
+ results: []
+---
+
+
+
+# hc3zh
+
+This model is a fine-tuned version of [../Baichuan2-7B-Chat](https://huggingface.co/../Baichuan2-7B-Chat) on the hc3zh dataset.
+It achieves the following results on the evaluation set:
+- Loss: 0.0150
+
+## Model description
+
+More information needed
+
+## Intended uses & limitations
+
+More information needed
+
+## Training and evaluation data
+
+More information needed
+
+## Training procedure
+
+### Training hyperparameters
+
+The following hyperparameters were used during training:
+- learning_rate: 5e-05
+- train_batch_size: 8
+- eval_batch_size: 8
+- seed: 42
+- gradient_accumulation_steps: 4
+- total_train_batch_size: 32
+- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
+- lr_scheduler_type: cosine
+- num_epochs: 3.0
+
+### Training results
+
+| Training Loss | Epoch | Step | Validation Loss |
+|:-------------:|:-----:|:----:|:---------------:|
+| 0.0199 | 0.42 | 500 | 0.0105 |
+| 0.0011 | 0.85 | 1000 | 0.0118 |
+| 0.0001 | 1.27 | 1500 | 0.0110 |
+| 0.0143 | 1.7 | 2000 | 0.0135 |
+| 0.0001 | 2.12 | 2500 | 0.0129 |
+| 0.0001 | 2.55 | 3000 | 0.0145 |
+| 0.002 | 2.97 | 3500 | 0.0150 |
+
+
+### Framework versions
+
+- Transformers 4.32.1
+- Pytorch 2.1.0+cu121
+- Datasets 2.14.6
+- Tokenizers 0.13.2
diff --git a/LLM-Detector-V1-4w/adapter_config.json b/LLM-Detector-V1-4w/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..54acbac2c279e46331f75c340af98595a8683d48
--- /dev/null
+++ b/LLM-Detector-V1-4w/adapter_config.json
@@ -0,0 +1,22 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "../Baichuan2-7B-Chat",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 32.0,
+ "lora_dropout": 0.1,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "W_pack"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/LLM-Detector-V1-4w/adapter_model.bin b/LLM-Detector-V1-4w/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..73a4f4766fd048ad38964019279ee9b86a50644e
--- /dev/null
+++ b/LLM-Detector-V1-4w/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:43824a5d6a7ba3851d5cdec7eaebba477ebc4dc160eeeb85afd21cc987ec7440
+size 16800430
diff --git a/LLM-Detector-V1-4w/all_results.json b/LLM-Detector-V1-4w/all_results.json
new file mode 100644
index 0000000000000000000000000000000000000000..1dd35543727978998f481967dd84990c16c23c6a
--- /dev/null
+++ b/LLM-Detector-V1-4w/all_results.json
@@ -0,0 +1,11 @@
+{
+ "epoch": 3.0,
+ "eval_loss": 0.014986271038651466,
+ "eval_runtime": 87.9616,
+ "eval_samples_per_second": 22.544,
+ "eval_steps_per_second": 2.819,
+ "train_loss": 0.06714861565509712,
+ "train_runtime": 17560.0547,
+ "train_samples_per_second": 6.434,
+ "train_steps_per_second": 0.201
+}
\ No newline at end of file
diff --git a/LLM-Detector-V1-4w/checkpoint-1000/README.md b/LLM-Detector-V1-4w/checkpoint-1000/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a04450aa7d792898a89dd2c6093050ffd3808789
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-1000/README.md
@@ -0,0 +1,219 @@
+---
+library_name: peft
+base_model: ../Baichuan2-7B-Chat
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+
+
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: QuantizationMethod.BITS_AND_BYTES
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: float16
+
+### Framework versions
+
+
+- PEFT 0.6.0
diff --git a/LLM-Detector-V1-4w/checkpoint-1000/adapter_config.json b/LLM-Detector-V1-4w/checkpoint-1000/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..54acbac2c279e46331f75c340af98595a8683d48
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-1000/adapter_config.json
@@ -0,0 +1,22 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "../Baichuan2-7B-Chat",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 32.0,
+ "lora_dropout": 0.1,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "W_pack"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/LLM-Detector-V1-4w/checkpoint-1000/adapter_model.bin b/LLM-Detector-V1-4w/checkpoint-1000/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..48e31281cb0edcb978597b91fe27fbb7af76c293
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-1000/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fe40450b2fbd3f10782fef8e66d9acb4e5cac016892e806376a3af80925fad96
+size 16800430
diff --git a/LLM-Detector-V1-4w/checkpoint-1000/optimizer.pt b/LLM-Detector-V1-4w/checkpoint-1000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..01dea075500b0cf47cdfe72f930ddf0cab0b595d
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-1000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:18bddeeaa98c46d5d2497ed51b747e5fe4c7ee27d855dfe46460ce899dc2bf53
+size 33608634
diff --git a/LLM-Detector-V1-4w/checkpoint-1000/rng_state.pth b/LLM-Detector-V1-4w/checkpoint-1000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..baf6cd28c0c99223285a28a993673248a150699b
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-1000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:83da651fcd152de69564c4b12041693577b33619275f061f47eaa1672c885e33
+size 14244
diff --git a/LLM-Detector-V1-4w/checkpoint-1000/scheduler.pt b/LLM-Detector-V1-4w/checkpoint-1000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1076148955872286d0608f89698a3a76a1c630e9
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-1000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4bd3b1194329dcc74504173cf2ff77c083a42ad7882c159146ad4a1df92ffee3
+size 1064
diff --git a/LLM-Detector-V1-4w/checkpoint-1000/special_tokens_map.json b/LLM-Detector-V1-4w/checkpoint-1000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..5819ea25d7b1b6340063a0629c2143c44b0452da
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-1000/special_tokens_map.json
@@ -0,0 +1,30 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/LLM-Detector-V1-4w/checkpoint-1000/tokenization_baichuan.py b/LLM-Detector-V1-4w/checkpoint-1000/tokenization_baichuan.py
new file mode 100644
index 0000000000000000000000000000000000000000..256544b45542d2f5dcd12a65e2f0ddaeeb9def25
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-1000/tokenization_baichuan.py
@@ -0,0 +1,251 @@
+# Copyright 2023 Baichuan Inc. All Rights Reserved.
+
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {},
+ "tokenizer_file": {},
+}
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
+
+
+class BaichuanTokenizer(PreTrainedTokenizer):
+ """
+ Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ add_bos_token=True,
+ add_eos_token=False,
+ clean_up_tokenization_spaces=False,
+ **kwargs,
+ ):
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+ self.vocab_file = vocab_file
+ self.add_bos_token = add_bos_token
+ self.add_eos_token = add_eos_token
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ @property
+ def vocab_size(self):
+ """Returns vocab size"""
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ """Returns vocab as a dict"""
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text):
+ """Returns a tokenized string."""
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for i, token in enumerate(tokens):
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special and i != 0:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string
+
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ """
+ Save the vocabulary and special tokens file to a directory.
+
+ Args:
+ save_directory (`str`):
+ The directory in which to save the vocabulary.
+
+ Returns:
+ `Tuple(str)`: Paths to the files saved.
+ """
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ bos_token_id = [1] if self.add_bos_token else []
+ eos_token_id = [1] if self.add_eos_token else []
+
+ if token_ids_1 is None:
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
+ return (
+ bos_token_id
+ + ([0] * len(token_ids_0))
+ + eos_token_id
+ + bos_token_id
+ + ([0] * len(token_ids_1))
+ + eos_token_id
+ )
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
+
+ if token_ids_1 is not None:
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
+
+ return output
diff --git a/LLM-Detector-V1-4w/checkpoint-1000/tokenizer.model b/LLM-Detector-V1-4w/checkpoint-1000/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..b3902c4521d7f34868ac76dd16150ff5ca41b000
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-1000/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79452955be6b419a65984273a9f08af86042e1c2a75ee3ba989cbf620a133cc2
+size 2001107
diff --git a/LLM-Detector-V1-4w/checkpoint-1000/tokenizer_config.json b/LLM-Detector-V1-4w/checkpoint-1000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..d14486d6a5be1135bdda779a8ffcde1b77155302
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-1000/tokenizer_config.json
@@ -0,0 +1,49 @@
+{
+ "add_bos_token": false,
+ "add_eos_token": false,
+ "auto_map": {
+ "AutoTokenizer": [
+ "tokenization_baichuan.BaichuanTokenizer",
+ null
+ ]
+ },
+ "bos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "clean_up_tokenization_spaces": false,
+ "eos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "model_max_length": 4096,
+ "pad_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "split_special_tokens": false,
+ "tokenizer_class": "BaichuanTokenizer",
+ "unk_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "use_fast": false
+}
diff --git a/LLM-Detector-V1-4w/checkpoint-1000/trainer_state.json b/LLM-Detector-V1-4w/checkpoint-1000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..5964090063211849e71697a54cca820ce7604779
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-1000/trainer_state.json
@@ -0,0 +1,635 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.8496176720475785,
+ "eval_steps": 500,
+ "global_step": 1000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.999919851200522e-05,
+ "loss": 9.9461,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 4.9996428002198536e-05,
+ "loss": 6.4908,
+ "step": 20
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.9992242747551964e-05,
+ "loss": 3.708,
+ "step": 30
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.99857130295276e-05,
+ "loss": 0.8908,
+ "step": 40
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 4.997720546222574e-05,
+ "loss": 0.2454,
+ "step": 50
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 4.996672071909866e-05,
+ "loss": 0.1348,
+ "step": 60
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 4.995425963011034e-05,
+ "loss": 0.0487,
+ "step": 70
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.993982318167074e-05,
+ "loss": 0.0282,
+ "step": 80
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.992341251655768e-05,
+ "loss": 0.0455,
+ "step": 90
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.9905028933826435e-05,
+ "loss": 0.0472,
+ "step": 100
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 4.988467388870688e-05,
+ "loss": 0.0526,
+ "step": 110
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 4.986234899248826e-05,
+ "loss": 0.0679,
+ "step": 120
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 4.983805601239172e-05,
+ "loss": 0.0314,
+ "step": 130
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 4.981179687143034e-05,
+ "loss": 0.0136,
+ "step": 140
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 4.978357364825695e-05,
+ "loss": 0.0409,
+ "step": 150
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 4.975338857699956e-05,
+ "loss": 0.0284,
+ "step": 160
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 4.972124404708454e-05,
+ "loss": 0.0364,
+ "step": 170
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 4.968714260304743e-05,
+ "loss": 0.0147,
+ "step": 180
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 4.965108694433159e-05,
+ "loss": 0.0174,
+ "step": 190
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 4.961307992507443e-05,
+ "loss": 0.0244,
+ "step": 200
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 4.957312455388152e-05,
+ "loss": 0.0387,
+ "step": 210
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 4.953122399358845e-05,
+ "loss": 0.0264,
+ "step": 220
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.948738156101042e-05,
+ "loss": 0.0291,
+ "step": 230
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.9441600726679694e-05,
+ "loss": 0.0214,
+ "step": 240
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 4.939388511457092e-05,
+ "loss": 0.0116,
+ "step": 250
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 4.934423850181419e-05,
+ "loss": 0.0191,
+ "step": 260
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 4.9292664818396117e-05,
+ "loss": 0.0064,
+ "step": 270
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 4.9239168146848666e-05,
+ "loss": 0.0184,
+ "step": 280
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 4.9183752721926036e-05,
+ "loss": 0.0026,
+ "step": 290
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 4.912642293026942e-05,
+ "loss": 0.0223,
+ "step": 300
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 4.906718331005979e-05,
+ "loss": 0.0405,
+ "step": 310
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 4.900603855065861e-05,
+ "loss": 0.0461,
+ "step": 320
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 4.894299349223665e-05,
+ "loss": 0.0199,
+ "step": 330
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 4.8878053125390875e-05,
+ "loss": 0.0193,
+ "step": 340
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 4.881122259074935e-05,
+ "loss": 0.004,
+ "step": 350
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.874250717856433e-05,
+ "loss": 0.0018,
+ "step": 360
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.867191232829348e-05,
+ "loss": 0.0021,
+ "step": 370
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 4.8599443628169295e-05,
+ "loss": 0.018,
+ "step": 380
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 4.8525106814756754e-05,
+ "loss": 0.0261,
+ "step": 390
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 4.84489077724992e-05,
+ "loss": 0.016,
+ "step": 400
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 4.8370852533252536e-05,
+ "loss": 0.0402,
+ "step": 410
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 4.8290947275807755e-05,
+ "loss": 0.0038,
+ "step": 420
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 4.8209198325401815e-05,
+ "loss": 0.008,
+ "step": 430
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 4.8125612153216976e-05,
+ "loss": 0.0296,
+ "step": 440
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 4.804019537586849e-05,
+ "loss": 0.0012,
+ "step": 450
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 4.7952954754880886e-05,
+ "loss": 0.0142,
+ "step": 460
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 4.7863897196152704e-05,
+ "loss": 0.0163,
+ "step": 470
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 4.7773029749409836e-05,
+ "loss": 0.0021,
+ "step": 480
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.76803596076475e-05,
+ "loss": 0.0355,
+ "step": 490
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.758589410656078e-05,
+ "loss": 0.0199,
+ "step": 500
+ },
+ {
+ "epoch": 0.42,
+ "eval_loss": 0.010466881096363068,
+ "eval_runtime": 88.037,
+ "eval_samples_per_second": 22.525,
+ "eval_steps_per_second": 2.817,
+ "step": 500
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 4.748964072396403e-05,
+ "loss": 0.0341,
+ "step": 510
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 4.7391607079198876e-05,
+ "loss": 0.0137,
+ "step": 520
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 4.7291800932531064e-05,
+ "loss": 0.0138,
+ "step": 530
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 4.719023018453623e-05,
+ "loss": 0.0063,
+ "step": 540
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 4.708690287547441e-05,
+ "loss": 0.0376,
+ "step": 550
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.698182718465368e-05,
+ "loss": 0.006,
+ "step": 560
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.687501142978258e-05,
+ "loss": 0.0371,
+ "step": 570
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 4.6766464066311765e-05,
+ "loss": 0.0322,
+ "step": 580
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 4.665619368676466e-05,
+ "loss": 0.0086,
+ "step": 590
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 4.6544209020057285e-05,
+ "loss": 0.002,
+ "step": 600
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 4.643051893080725e-05,
+ "loss": 0.0147,
+ "step": 610
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 4.631513241863209e-05,
+ "loss": 0.0038,
+ "step": 620
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.619805861743683e-05,
+ "loss": 0.0187,
+ "step": 630
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.607930679469096e-05,
+ "loss": 0.0063,
+ "step": 640
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 4.595888635069481e-05,
+ "loss": 0.0109,
+ "step": 650
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 4.5836806817835475e-05,
+ "loss": 0.005,
+ "step": 660
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 4.57130778598322e-05,
+ "loss": 0.0167,
+ "step": 670
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 4.5587709270971425e-05,
+ "loss": 0.0143,
+ "step": 680
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.546071097533145e-05,
+ "loss": 0.0015,
+ "step": 690
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.533209302599691e-05,
+ "loss": 0.0003,
+ "step": 700
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 4.520186560426292e-05,
+ "loss": 0.006,
+ "step": 710
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 4.507003901882915e-05,
+ "loss": 0.0093,
+ "step": 720
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 4.493662370498383e-05,
+ "loss": 0.0046,
+ "step": 730
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 4.4801630223777665e-05,
+ "loss": 0.0147,
+ "step": 740
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 4.466506926118782e-05,
+ "loss": 0.0102,
+ "step": 750
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.4526951627272074e-05,
+ "loss": 0.017,
+ "step": 760
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.438728825531305e-05,
+ "loss": 0.0033,
+ "step": 770
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 4.4246090200952816e-05,
+ "loss": 0.0061,
+ "step": 780
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 4.410336864131762e-05,
+ "loss": 0.0032,
+ "step": 790
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 4.395913487413324e-05,
+ "loss": 0.0043,
+ "step": 800
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 4.3813400316830576e-05,
+ "loss": 0.0063,
+ "step": 810
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 4.36661765056419e-05,
+ "loss": 0.0273,
+ "step": 820
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.351747509468763e-05,
+ "loss": 0.0125,
+ "step": 830
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.336730785505382e-05,
+ "loss": 0.0076,
+ "step": 840
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 4.3215686673860384e-05,
+ "loss": 0.0127,
+ "step": 850
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.306262355332006e-05,
+ "loss": 0.0161,
+ "step": 860
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.290813060978839e-05,
+ "loss": 0.0169,
+ "step": 870
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.2752220072804564e-05,
+ "loss": 0.0081,
+ "step": 880
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.259490428412335e-05,
+ "loss": 0.0131,
+ "step": 890
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.243619569673814e-05,
+ "loss": 0.0205,
+ "step": 900
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.2276106873895143e-05,
+ "loss": 0.0026,
+ "step": 910
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.2114650488098936e-05,
+ "loss": 0.018,
+ "step": 920
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.19518393201093e-05,
+ "loss": 0.0083,
+ "step": 930
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 4.178768625792949e-05,
+ "loss": 0.0291,
+ "step": 940
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.162220429578605e-05,
+ "loss": 0.0226,
+ "step": 950
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.145540653310018e-05,
+ "loss": 0.0042,
+ "step": 960
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.128730617345084e-05,
+ "loss": 0.0078,
+ "step": 970
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.111791652352952e-05,
+ "loss": 0.0084,
+ "step": 980
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 4.094725099208688e-05,
+ "loss": 0.0044,
+ "step": 990
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.077532308887141e-05,
+ "loss": 0.0011,
+ "step": 1000
+ },
+ {
+ "epoch": 0.85,
+ "eval_loss": 0.01175768580287695,
+ "eval_runtime": 88.0904,
+ "eval_samples_per_second": 22.511,
+ "eval_steps_per_second": 2.815,
+ "step": 1000
+ }
+ ],
+ "logging_steps": 10,
+ "max_steps": 3531,
+ "num_train_epochs": 3,
+ "save_steps": 1000,
+ "total_flos": 1.5022667366085427e+17,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/LLM-Detector-V1-4w/checkpoint-1000/training_args.bin b/LLM-Detector-V1-4w/checkpoint-1000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e37f40cb61d45b6c2efd87b62a5ee72c12d2b4c2
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-1000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c31eb820fabf5021fa0eda935da3d201c65c7331d3ce4ce4ad4631151a6068e9
+size 4664
diff --git a/LLM-Detector-V1-4w/checkpoint-2000/README.md b/LLM-Detector-V1-4w/checkpoint-2000/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a04450aa7d792898a89dd2c6093050ffd3808789
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-2000/README.md
@@ -0,0 +1,219 @@
+---
+library_name: peft
+base_model: ../Baichuan2-7B-Chat
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+
+
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: QuantizationMethod.BITS_AND_BYTES
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: float16
+
+### Framework versions
+
+
+- PEFT 0.6.0
diff --git a/LLM-Detector-V1-4w/checkpoint-2000/adapter_config.json b/LLM-Detector-V1-4w/checkpoint-2000/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..54acbac2c279e46331f75c340af98595a8683d48
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-2000/adapter_config.json
@@ -0,0 +1,22 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "../Baichuan2-7B-Chat",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 32.0,
+ "lora_dropout": 0.1,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "W_pack"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/LLM-Detector-V1-4w/checkpoint-2000/adapter_model.bin b/LLM-Detector-V1-4w/checkpoint-2000/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ab9e0c0667cf231e11af4e220fac12e4258bba8b
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-2000/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9664fcda9f9455692ac077930f9807e40a74bbb1391a6cc8dff6f1da2753d7b7
+size 16800430
diff --git a/LLM-Detector-V1-4w/checkpoint-2000/optimizer.pt b/LLM-Detector-V1-4w/checkpoint-2000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e107858a7f26f61a5bcbd4afc61168f27ba1f607
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-2000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19e1df2c7ebe4ba45177d9926132b2249e61306c5a47e8594117807499496934
+size 33608634
diff --git a/LLM-Detector-V1-4w/checkpoint-2000/rng_state.pth b/LLM-Detector-V1-4w/checkpoint-2000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3eee998d83421bb7a3d62dcc64800a817b09a27a
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-2000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:efd859252932a9e3ea8978d62ba6b8ca255ea2df13637ed0a28deb7bd5f76e91
+size 14244
diff --git a/LLM-Detector-V1-4w/checkpoint-2000/scheduler.pt b/LLM-Detector-V1-4w/checkpoint-2000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e5983ff4c0dbb8323a0ad5b5cf969f7468627ca3
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-2000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3dd0fc895c7505f36c0d10a7fb566f688f4529581ce3e22f1659966dcc265a99
+size 1064
diff --git a/LLM-Detector-V1-4w/checkpoint-2000/special_tokens_map.json b/LLM-Detector-V1-4w/checkpoint-2000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..5819ea25d7b1b6340063a0629c2143c44b0452da
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-2000/special_tokens_map.json
@@ -0,0 +1,30 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/LLM-Detector-V1-4w/checkpoint-2000/tokenization_baichuan.py b/LLM-Detector-V1-4w/checkpoint-2000/tokenization_baichuan.py
new file mode 100644
index 0000000000000000000000000000000000000000..256544b45542d2f5dcd12a65e2f0ddaeeb9def25
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-2000/tokenization_baichuan.py
@@ -0,0 +1,251 @@
+# Copyright 2023 Baichuan Inc. All Rights Reserved.
+
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {},
+ "tokenizer_file": {},
+}
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
+
+
+class BaichuanTokenizer(PreTrainedTokenizer):
+ """
+ Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ add_bos_token=True,
+ add_eos_token=False,
+ clean_up_tokenization_spaces=False,
+ **kwargs,
+ ):
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+ self.vocab_file = vocab_file
+ self.add_bos_token = add_bos_token
+ self.add_eos_token = add_eos_token
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ @property
+ def vocab_size(self):
+ """Returns vocab size"""
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ """Returns vocab as a dict"""
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text):
+ """Returns a tokenized string."""
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for i, token in enumerate(tokens):
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special and i != 0:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string
+
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ """
+ Save the vocabulary and special tokens file to a directory.
+
+ Args:
+ save_directory (`str`):
+ The directory in which to save the vocabulary.
+
+ Returns:
+ `Tuple(str)`: Paths to the files saved.
+ """
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ bos_token_id = [1] if self.add_bos_token else []
+ eos_token_id = [1] if self.add_eos_token else []
+
+ if token_ids_1 is None:
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
+ return (
+ bos_token_id
+ + ([0] * len(token_ids_0))
+ + eos_token_id
+ + bos_token_id
+ + ([0] * len(token_ids_1))
+ + eos_token_id
+ )
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
+
+ if token_ids_1 is not None:
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
+
+ return output
diff --git a/LLM-Detector-V1-4w/checkpoint-2000/tokenizer.model b/LLM-Detector-V1-4w/checkpoint-2000/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..b3902c4521d7f34868ac76dd16150ff5ca41b000
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-2000/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79452955be6b419a65984273a9f08af86042e1c2a75ee3ba989cbf620a133cc2
+size 2001107
diff --git a/LLM-Detector-V1-4w/checkpoint-2000/tokenizer_config.json b/LLM-Detector-V1-4w/checkpoint-2000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..d14486d6a5be1135bdda779a8ffcde1b77155302
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-2000/tokenizer_config.json
@@ -0,0 +1,49 @@
+{
+ "add_bos_token": false,
+ "add_eos_token": false,
+ "auto_map": {
+ "AutoTokenizer": [
+ "tokenization_baichuan.BaichuanTokenizer",
+ null
+ ]
+ },
+ "bos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "clean_up_tokenization_spaces": false,
+ "eos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "model_max_length": 4096,
+ "pad_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "split_special_tokens": false,
+ "tokenizer_class": "BaichuanTokenizer",
+ "unk_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "use_fast": false
+}
diff --git a/LLM-Detector-V1-4w/checkpoint-2000/trainer_state.json b/LLM-Detector-V1-4w/checkpoint-2000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..c900c83e19df6d25ce7c2d09c3dff91205869977
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-2000/trainer_state.json
@@ -0,0 +1,1251 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.699235344095157,
+ "eval_steps": 500,
+ "global_step": 2000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.999919851200522e-05,
+ "loss": 9.9461,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 4.9996428002198536e-05,
+ "loss": 6.4908,
+ "step": 20
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.9992242747551964e-05,
+ "loss": 3.708,
+ "step": 30
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.99857130295276e-05,
+ "loss": 0.8908,
+ "step": 40
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 4.997720546222574e-05,
+ "loss": 0.2454,
+ "step": 50
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 4.996672071909866e-05,
+ "loss": 0.1348,
+ "step": 60
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 4.995425963011034e-05,
+ "loss": 0.0487,
+ "step": 70
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.993982318167074e-05,
+ "loss": 0.0282,
+ "step": 80
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.992341251655768e-05,
+ "loss": 0.0455,
+ "step": 90
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.9905028933826435e-05,
+ "loss": 0.0472,
+ "step": 100
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 4.988467388870688e-05,
+ "loss": 0.0526,
+ "step": 110
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 4.986234899248826e-05,
+ "loss": 0.0679,
+ "step": 120
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 4.983805601239172e-05,
+ "loss": 0.0314,
+ "step": 130
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 4.981179687143034e-05,
+ "loss": 0.0136,
+ "step": 140
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 4.978357364825695e-05,
+ "loss": 0.0409,
+ "step": 150
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 4.975338857699956e-05,
+ "loss": 0.0284,
+ "step": 160
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 4.972124404708454e-05,
+ "loss": 0.0364,
+ "step": 170
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 4.968714260304743e-05,
+ "loss": 0.0147,
+ "step": 180
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 4.965108694433159e-05,
+ "loss": 0.0174,
+ "step": 190
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 4.961307992507443e-05,
+ "loss": 0.0244,
+ "step": 200
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 4.957312455388152e-05,
+ "loss": 0.0387,
+ "step": 210
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 4.953122399358845e-05,
+ "loss": 0.0264,
+ "step": 220
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.948738156101042e-05,
+ "loss": 0.0291,
+ "step": 230
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.9441600726679694e-05,
+ "loss": 0.0214,
+ "step": 240
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 4.939388511457092e-05,
+ "loss": 0.0116,
+ "step": 250
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 4.934423850181419e-05,
+ "loss": 0.0191,
+ "step": 260
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 4.9292664818396117e-05,
+ "loss": 0.0064,
+ "step": 270
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 4.9239168146848666e-05,
+ "loss": 0.0184,
+ "step": 280
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 4.9183752721926036e-05,
+ "loss": 0.0026,
+ "step": 290
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 4.912642293026942e-05,
+ "loss": 0.0223,
+ "step": 300
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 4.906718331005979e-05,
+ "loss": 0.0405,
+ "step": 310
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 4.900603855065861e-05,
+ "loss": 0.0461,
+ "step": 320
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 4.894299349223665e-05,
+ "loss": 0.0199,
+ "step": 330
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 4.8878053125390875e-05,
+ "loss": 0.0193,
+ "step": 340
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 4.881122259074935e-05,
+ "loss": 0.004,
+ "step": 350
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.874250717856433e-05,
+ "loss": 0.0018,
+ "step": 360
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.867191232829348e-05,
+ "loss": 0.0021,
+ "step": 370
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 4.8599443628169295e-05,
+ "loss": 0.018,
+ "step": 380
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 4.8525106814756754e-05,
+ "loss": 0.0261,
+ "step": 390
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 4.84489077724992e-05,
+ "loss": 0.016,
+ "step": 400
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 4.8370852533252536e-05,
+ "loss": 0.0402,
+ "step": 410
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 4.8290947275807755e-05,
+ "loss": 0.0038,
+ "step": 420
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 4.8209198325401815e-05,
+ "loss": 0.008,
+ "step": 430
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 4.8125612153216976e-05,
+ "loss": 0.0296,
+ "step": 440
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 4.804019537586849e-05,
+ "loss": 0.0012,
+ "step": 450
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 4.7952954754880886e-05,
+ "loss": 0.0142,
+ "step": 460
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 4.7863897196152704e-05,
+ "loss": 0.0163,
+ "step": 470
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 4.7773029749409836e-05,
+ "loss": 0.0021,
+ "step": 480
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.76803596076475e-05,
+ "loss": 0.0355,
+ "step": 490
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.758589410656078e-05,
+ "loss": 0.0199,
+ "step": 500
+ },
+ {
+ "epoch": 0.42,
+ "eval_loss": 0.010466881096363068,
+ "eval_runtime": 88.037,
+ "eval_samples_per_second": 22.525,
+ "eval_steps_per_second": 2.817,
+ "step": 500
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 4.748964072396403e-05,
+ "loss": 0.0341,
+ "step": 510
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 4.7391607079198876e-05,
+ "loss": 0.0137,
+ "step": 520
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 4.7291800932531064e-05,
+ "loss": 0.0138,
+ "step": 530
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 4.719023018453623e-05,
+ "loss": 0.0063,
+ "step": 540
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 4.708690287547441e-05,
+ "loss": 0.0376,
+ "step": 550
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.698182718465368e-05,
+ "loss": 0.006,
+ "step": 560
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.687501142978258e-05,
+ "loss": 0.0371,
+ "step": 570
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 4.6766464066311765e-05,
+ "loss": 0.0322,
+ "step": 580
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 4.665619368676466e-05,
+ "loss": 0.0086,
+ "step": 590
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 4.6544209020057285e-05,
+ "loss": 0.002,
+ "step": 600
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 4.643051893080725e-05,
+ "loss": 0.0147,
+ "step": 610
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 4.631513241863209e-05,
+ "loss": 0.0038,
+ "step": 620
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.619805861743683e-05,
+ "loss": 0.0187,
+ "step": 630
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.607930679469096e-05,
+ "loss": 0.0063,
+ "step": 640
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 4.595888635069481e-05,
+ "loss": 0.0109,
+ "step": 650
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 4.5836806817835475e-05,
+ "loss": 0.005,
+ "step": 660
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 4.57130778598322e-05,
+ "loss": 0.0167,
+ "step": 670
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 4.5587709270971425e-05,
+ "loss": 0.0143,
+ "step": 680
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.546071097533145e-05,
+ "loss": 0.0015,
+ "step": 690
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.533209302599691e-05,
+ "loss": 0.0003,
+ "step": 700
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 4.520186560426292e-05,
+ "loss": 0.006,
+ "step": 710
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 4.507003901882915e-05,
+ "loss": 0.0093,
+ "step": 720
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 4.493662370498383e-05,
+ "loss": 0.0046,
+ "step": 730
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 4.4801630223777665e-05,
+ "loss": 0.0147,
+ "step": 740
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 4.466506926118782e-05,
+ "loss": 0.0102,
+ "step": 750
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.4526951627272074e-05,
+ "loss": 0.017,
+ "step": 760
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.438728825531305e-05,
+ "loss": 0.0033,
+ "step": 770
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 4.4246090200952816e-05,
+ "loss": 0.0061,
+ "step": 780
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 4.410336864131762e-05,
+ "loss": 0.0032,
+ "step": 790
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 4.395913487413324e-05,
+ "loss": 0.0043,
+ "step": 800
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 4.3813400316830576e-05,
+ "loss": 0.0063,
+ "step": 810
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 4.36661765056419e-05,
+ "loss": 0.0273,
+ "step": 820
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.351747509468763e-05,
+ "loss": 0.0125,
+ "step": 830
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.336730785505382e-05,
+ "loss": 0.0076,
+ "step": 840
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 4.3215686673860384e-05,
+ "loss": 0.0127,
+ "step": 850
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.306262355332006e-05,
+ "loss": 0.0161,
+ "step": 860
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.290813060978839e-05,
+ "loss": 0.0169,
+ "step": 870
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.2752220072804564e-05,
+ "loss": 0.0081,
+ "step": 880
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.259490428412335e-05,
+ "loss": 0.0131,
+ "step": 890
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.243619569673814e-05,
+ "loss": 0.0205,
+ "step": 900
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.2276106873895143e-05,
+ "loss": 0.0026,
+ "step": 910
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.2114650488098936e-05,
+ "loss": 0.018,
+ "step": 920
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.19518393201093e-05,
+ "loss": 0.0083,
+ "step": 930
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 4.178768625792949e-05,
+ "loss": 0.0291,
+ "step": 940
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.162220429578605e-05,
+ "loss": 0.0226,
+ "step": 950
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.145540653310018e-05,
+ "loss": 0.0042,
+ "step": 960
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.128730617345084e-05,
+ "loss": 0.0078,
+ "step": 970
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.111791652352952e-05,
+ "loss": 0.0084,
+ "step": 980
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 4.094725099208688e-05,
+ "loss": 0.0044,
+ "step": 990
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.077532308887141e-05,
+ "loss": 0.0011,
+ "step": 1000
+ },
+ {
+ "epoch": 0.85,
+ "eval_loss": 0.01175768580287695,
+ "eval_runtime": 88.0904,
+ "eval_samples_per_second": 22.511,
+ "eval_steps_per_second": 2.815,
+ "step": 1000
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.060214642355989e-05,
+ "loss": 0.0011,
+ "step": 1010
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.042773470468016e-05,
+ "loss": 0.021,
+ "step": 1020
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 4.0252101738525916e-05,
+ "loss": 0.0424,
+ "step": 1030
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 4.0075261428063806e-05,
+ "loss": 0.0194,
+ "step": 1040
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 3.9897227771832924e-05,
+ "loss": 0.0025,
+ "step": 1050
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 3.971801486283665e-05,
+ "loss": 0.0044,
+ "step": 1060
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.953763688742708e-05,
+ "loss": 0.0051,
+ "step": 1070
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 3.9356108124182067e-05,
+ "loss": 0.0071,
+ "step": 1080
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.9173442942774885e-05,
+ "loss": 0.0145,
+ "step": 1090
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.898965580283681e-05,
+ "loss": 0.0371,
+ "step": 1100
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.880476125281244e-05,
+ "loss": 0.0076,
+ "step": 1110
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 3.861877392880808e-05,
+ "loss": 0.0035,
+ "step": 1120
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.843170855343317e-05,
+ "loss": 0.008,
+ "step": 1130
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.8243579934634846e-05,
+ "loss": 0.0089,
+ "step": 1140
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.805440296452574e-05,
+ "loss": 0.0034,
+ "step": 1150
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 3.786419261820514e-05,
+ "loss": 0.0019,
+ "step": 1160
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 3.7672963952573614e-05,
+ "loss": 0.0164,
+ "step": 1170
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.748073210514102e-05,
+ "loss": 0.0012,
+ "step": 1180
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.728751229282836e-05,
+ "loss": 0.0072,
+ "step": 1190
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.70933198107631e-05,
+ "loss": 0.0041,
+ "step": 1200
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 3.689817003106852e-05,
+ "loss": 0.0021,
+ "step": 1210
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.670207840164678e-05,
+ "loss": 0.0024,
+ "step": 1220
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.650506044495615e-05,
+ "loss": 0.0026,
+ "step": 1230
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.630713175678222e-05,
+ "loss": 0.005,
+ "step": 1240
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.610830800500335e-05,
+ "loss": 0.0069,
+ "step": 1250
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 3.590860492835046e-05,
+ "loss": 0.0081,
+ "step": 1260
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.5708038335161134e-05,
+ "loss": 0.0107,
+ "step": 1270
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.550662410212819e-05,
+ "loss": 0.0074,
+ "step": 1280
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 3.5304378173043e-05,
+ "loss": 0.0048,
+ "step": 1290
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 3.5101316557533294e-05,
+ "loss": 0.0006,
+ "step": 1300
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 3.489745532979593e-05,
+ "loss": 0.0076,
+ "step": 1310
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 3.469281062732442e-05,
+ "loss": 0.0002,
+ "step": 1320
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 3.448739864963154e-05,
+ "loss": 0.0073,
+ "step": 1330
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 3.4281235656966915e-05,
+ "loss": 0.0008,
+ "step": 1340
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 3.4074337969029965e-05,
+ "loss": 0.001,
+ "step": 1350
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 3.386672196367799e-05,
+ "loss": 0.0047,
+ "step": 1360
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 3.365840407562974e-05,
+ "loss": 0.0131,
+ "step": 1370
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 3.3449400795164416e-05,
+ "loss": 0.001,
+ "step": 1380
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 3.323972866681637e-05,
+ "loss": 0.0058,
+ "step": 1390
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 3.3029404288065426e-05,
+ "loss": 0.0047,
+ "step": 1400
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 3.2818444308023e-05,
+ "loss": 0.0029,
+ "step": 1410
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 3.2606865426114234e-05,
+ "loss": 0.0073,
+ "step": 1420
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 3.239468439075604e-05,
+ "loss": 0.0006,
+ "step": 1430
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 3.2181917998031326e-05,
+ "loss": 0.0028,
+ "step": 1440
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 3.196858309035941e-05,
+ "loss": 0.0003,
+ "step": 1450
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 3.175469655516284e-05,
+ "loss": 0.0007,
+ "step": 1460
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 3.154027532353052e-05,
+ "loss": 0.0037,
+ "step": 1470
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 3.132533636887753e-05,
+ "loss": 0.0065,
+ "step": 1480
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 3.1109896705601485e-05,
+ "loss": 0.0092,
+ "step": 1490
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 3.0893973387735687e-05,
+ "loss": 0.0001,
+ "step": 1500
+ },
+ {
+ "epoch": 1.27,
+ "eval_loss": 0.010954583063721657,
+ "eval_runtime": 88.0029,
+ "eval_samples_per_second": 22.533,
+ "eval_steps_per_second": 2.818,
+ "step": 1500
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 3.067758350759917e-05,
+ "loss": 0.0002,
+ "step": 1510
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 3.046074419444366e-05,
+ "loss": 0.0004,
+ "step": 1520
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 3.0243472613097656e-05,
+ "loss": 0.001,
+ "step": 1530
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 3.002578596260765e-05,
+ "loss": 0.0001,
+ "step": 1540
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 2.980770147487668e-05,
+ "loss": 0.0086,
+ "step": 1550
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 2.958923641330028e-05,
+ "loss": 0.0021,
+ "step": 1560
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 2.9370408071399898e-05,
+ "loss": 0.0001,
+ "step": 1570
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 2.9151233771453956e-05,
+ "loss": 0.0076,
+ "step": 1580
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 2.8931730863126666e-05,
+ "loss": 0.0001,
+ "step": 1590
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 2.871191672209459e-05,
+ "loss": 0.0001,
+ "step": 1600
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 2.8491808748671255e-05,
+ "loss": 0.0001,
+ "step": 1610
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 2.8271424366429706e-05,
+ "loss": 0.0115,
+ "step": 1620
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 2.8050781020823296e-05,
+ "loss": 0.0001,
+ "step": 1630
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 2.7829896177804716e-05,
+ "loss": 0.0003,
+ "step": 1640
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 2.760878732244339e-05,
+ "loss": 0.0003,
+ "step": 1650
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 2.7387471957541405e-05,
+ "loss": 0.0024,
+ "step": 1660
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 2.7165967602247964e-05,
+ "loss": 0.0005,
+ "step": 1670
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 2.694429179067261e-05,
+ "loss": 0.0018,
+ "step": 1680
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 2.6744651468034758e-05,
+ "loss": 0.002,
+ "step": 1690
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 2.6522698243485527e-05,
+ "loss": 0.0001,
+ "step": 1700
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 2.6300624483347926e-05,
+ "loss": 0.0058,
+ "step": 1710
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 2.607844776680513e-05,
+ "loss": 0.0001,
+ "step": 1720
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 2.585618568119027e-05,
+ "loss": 0.0001,
+ "step": 1730
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 2.56338558205942e-05,
+ "loss": 0.0008,
+ "step": 1740
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 2.5411475784472805e-05,
+ "loss": 0.0002,
+ "step": 1750
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 2.5189063176253825e-05,
+ "loss": 0.0001,
+ "step": 1760
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 2.496663560194338e-05,
+ "loss": 0.0001,
+ "step": 1770
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.4744210668732295e-05,
+ "loss": 0.0001,
+ "step": 1780
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.452180598360232e-05,
+ "loss": 0.0001,
+ "step": 1790
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.429943915193239e-05,
+ "loss": 0.0,
+ "step": 1800
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 2.4077127776104984e-05,
+ "loss": 0.0146,
+ "step": 1810
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.3854889454112748e-05,
+ "loss": 0.0017,
+ "step": 1820
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.3632741778165442e-05,
+ "loss": 0.0001,
+ "step": 1830
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.3410702333297356e-05,
+ "loss": 0.0001,
+ "step": 1840
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.318878869597528e-05,
+ "loss": 0.0001,
+ "step": 1850
+ },
+ {
+ "epoch": 1.58,
+ "learning_rate": 2.2967018432707213e-05,
+ "loss": 0.0073,
+ "step": 1860
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.2745409098651744e-05,
+ "loss": 0.0001,
+ "step": 1870
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 2.2523978236228442e-05,
+ "loss": 0.0001,
+ "step": 1880
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 2.2302743373729205e-05,
+ "loss": 0.0,
+ "step": 1890
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 2.2081722023930743e-05,
+ "loss": 0.0136,
+ "step": 1900
+ },
+ {
+ "epoch": 1.62,
+ "learning_rate": 2.1860931682708248e-05,
+ "loss": 0.0051,
+ "step": 1910
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 2.164038982765047e-05,
+ "loss": 0.0004,
+ "step": 1920
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 2.1420113916676183e-05,
+ "loss": 0.0002,
+ "step": 1930
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 2.1200121386652246e-05,
+ "loss": 0.0001,
+ "step": 1940
+ },
+ {
+ "epoch": 1.66,
+ "learning_rate": 2.0980429652013297e-05,
+ "loss": 0.0001,
+ "step": 1950
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 2.0761056103383258e-05,
+ "loss": 0.0001,
+ "step": 1960
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 2.0542018106198697e-05,
+ "loss": 0.0,
+ "step": 1970
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 2.0323332999334198e-05,
+ "loss": 0.005,
+ "step": 1980
+ },
+ {
+ "epoch": 1.69,
+ "learning_rate": 2.010501809372981e-05,
+ "loss": 0.0149,
+ "step": 1990
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 1.988709067102076e-05,
+ "loss": 0.0143,
+ "step": 2000
+ },
+ {
+ "epoch": 1.7,
+ "eval_loss": 0.013543435372412205,
+ "eval_runtime": 87.9904,
+ "eval_samples_per_second": 22.537,
+ "eval_steps_per_second": 2.818,
+ "step": 2000
+ }
+ ],
+ "logging_steps": 10,
+ "max_steps": 3531,
+ "num_train_epochs": 3,
+ "save_steps": 1000,
+ "total_flos": 3.0084561090831974e+17,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/LLM-Detector-V1-4w/checkpoint-2000/training_args.bin b/LLM-Detector-V1-4w/checkpoint-2000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e37f40cb61d45b6c2efd87b62a5ee72c12d2b4c2
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-2000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c31eb820fabf5021fa0eda935da3d201c65c7331d3ce4ce4ad4631151a6068e9
+size 4664
diff --git a/LLM-Detector-V1-4w/checkpoint-3000/README.md b/LLM-Detector-V1-4w/checkpoint-3000/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a04450aa7d792898a89dd2c6093050ffd3808789
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-3000/README.md
@@ -0,0 +1,219 @@
+---
+library_name: peft
+base_model: ../Baichuan2-7B-Chat
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+
+
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: QuantizationMethod.BITS_AND_BYTES
+- load_in_8bit: False
+- load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: float16
+
+### Framework versions
+
+
+- PEFT 0.6.0
diff --git a/LLM-Detector-V1-4w/checkpoint-3000/adapter_config.json b/LLM-Detector-V1-4w/checkpoint-3000/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..54acbac2c279e46331f75c340af98595a8683d48
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-3000/adapter_config.json
@@ -0,0 +1,22 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "../Baichuan2-7B-Chat",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 32.0,
+ "lora_dropout": 0.1,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "W_pack"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/LLM-Detector-V1-4w/checkpoint-3000/adapter_model.bin b/LLM-Detector-V1-4w/checkpoint-3000/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..88c1068195e1badd2d1c588064a8825f786847ef
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-3000/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:95683a3f84e8898c5638dc27af4722d83e15011e94d2d5b3dc5e5df5fb5f2957
+size 16800430
diff --git a/LLM-Detector-V1-4w/checkpoint-3000/optimizer.pt b/LLM-Detector-V1-4w/checkpoint-3000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8184f12059703df68f525ee7ee2d707fdba3a06d
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-3000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:382ed77666dc683727826a32b567726fa61aa8ce9683d06554f029848bbbbbe2
+size 33608634
diff --git a/LLM-Detector-V1-4w/checkpoint-3000/rng_state.pth b/LLM-Detector-V1-4w/checkpoint-3000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..eeae7fc4f8ace532a304d5224c02624469e27e01
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-3000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:940027b051c66fe81320f03ac39a79253b4b99e23f81f20a7b419a3e9c536ca7
+size 14244
diff --git a/LLM-Detector-V1-4w/checkpoint-3000/scheduler.pt b/LLM-Detector-V1-4w/checkpoint-3000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3a5eceb75e0aa089efffef76bff1277ceed1b0d8
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-3000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5d39fa5eb0d60aa11f89119712f921058fcd340118e8e922310dd30bb99e28ee
+size 1064
diff --git a/LLM-Detector-V1-4w/checkpoint-3000/special_tokens_map.json b/LLM-Detector-V1-4w/checkpoint-3000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..5819ea25d7b1b6340063a0629c2143c44b0452da
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-3000/special_tokens_map.json
@@ -0,0 +1,30 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/LLM-Detector-V1-4w/checkpoint-3000/tokenization_baichuan.py b/LLM-Detector-V1-4w/checkpoint-3000/tokenization_baichuan.py
new file mode 100644
index 0000000000000000000000000000000000000000..256544b45542d2f5dcd12a65e2f0ddaeeb9def25
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-3000/tokenization_baichuan.py
@@ -0,0 +1,251 @@
+# Copyright 2023 Baichuan Inc. All Rights Reserved.
+
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {},
+ "tokenizer_file": {},
+}
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
+
+
+class BaichuanTokenizer(PreTrainedTokenizer):
+ """
+ Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ add_bos_token=True,
+ add_eos_token=False,
+ clean_up_tokenization_spaces=False,
+ **kwargs,
+ ):
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+ self.vocab_file = vocab_file
+ self.add_bos_token = add_bos_token
+ self.add_eos_token = add_eos_token
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ @property
+ def vocab_size(self):
+ """Returns vocab size"""
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ """Returns vocab as a dict"""
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text):
+ """Returns a tokenized string."""
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for i, token in enumerate(tokens):
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special and i != 0:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string
+
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ """
+ Save the vocabulary and special tokens file to a directory.
+
+ Args:
+ save_directory (`str`):
+ The directory in which to save the vocabulary.
+
+ Returns:
+ `Tuple(str)`: Paths to the files saved.
+ """
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ bos_token_id = [1] if self.add_bos_token else []
+ eos_token_id = [1] if self.add_eos_token else []
+
+ if token_ids_1 is None:
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
+ return (
+ bos_token_id
+ + ([0] * len(token_ids_0))
+ + eos_token_id
+ + bos_token_id
+ + ([0] * len(token_ids_1))
+ + eos_token_id
+ )
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
+
+ if token_ids_1 is not None:
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
+
+ return output
diff --git a/LLM-Detector-V1-4w/checkpoint-3000/tokenizer.model b/LLM-Detector-V1-4w/checkpoint-3000/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..b3902c4521d7f34868ac76dd16150ff5ca41b000
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-3000/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79452955be6b419a65984273a9f08af86042e1c2a75ee3ba989cbf620a133cc2
+size 2001107
diff --git a/LLM-Detector-V1-4w/checkpoint-3000/tokenizer_config.json b/LLM-Detector-V1-4w/checkpoint-3000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..d14486d6a5be1135bdda779a8ffcde1b77155302
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-3000/tokenizer_config.json
@@ -0,0 +1,49 @@
+{
+ "add_bos_token": false,
+ "add_eos_token": false,
+ "auto_map": {
+ "AutoTokenizer": [
+ "tokenization_baichuan.BaichuanTokenizer",
+ null
+ ]
+ },
+ "bos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "clean_up_tokenization_spaces": false,
+ "eos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "model_max_length": 4096,
+ "pad_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "split_special_tokens": false,
+ "tokenizer_class": "BaichuanTokenizer",
+ "unk_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "use_fast": false
+}
diff --git a/LLM-Detector-V1-4w/checkpoint-3000/trainer_state.json b/LLM-Detector-V1-4w/checkpoint-3000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..ca65a752a0b0bd07bc781bc44a79a403388fb5dc
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-3000/trainer_state.json
@@ -0,0 +1,1867 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.548853016142736,
+ "eval_steps": 500,
+ "global_step": 3000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.999919851200522e-05,
+ "loss": 9.9461,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 4.9996428002198536e-05,
+ "loss": 6.4908,
+ "step": 20
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.9992242747551964e-05,
+ "loss": 3.708,
+ "step": 30
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.99857130295276e-05,
+ "loss": 0.8908,
+ "step": 40
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 4.997720546222574e-05,
+ "loss": 0.2454,
+ "step": 50
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 4.996672071909866e-05,
+ "loss": 0.1348,
+ "step": 60
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 4.995425963011034e-05,
+ "loss": 0.0487,
+ "step": 70
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.993982318167074e-05,
+ "loss": 0.0282,
+ "step": 80
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.992341251655768e-05,
+ "loss": 0.0455,
+ "step": 90
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.9905028933826435e-05,
+ "loss": 0.0472,
+ "step": 100
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 4.988467388870688e-05,
+ "loss": 0.0526,
+ "step": 110
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 4.986234899248826e-05,
+ "loss": 0.0679,
+ "step": 120
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 4.983805601239172e-05,
+ "loss": 0.0314,
+ "step": 130
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 4.981179687143034e-05,
+ "loss": 0.0136,
+ "step": 140
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 4.978357364825695e-05,
+ "loss": 0.0409,
+ "step": 150
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 4.975338857699956e-05,
+ "loss": 0.0284,
+ "step": 160
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 4.972124404708454e-05,
+ "loss": 0.0364,
+ "step": 170
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 4.968714260304743e-05,
+ "loss": 0.0147,
+ "step": 180
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 4.965108694433159e-05,
+ "loss": 0.0174,
+ "step": 190
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 4.961307992507443e-05,
+ "loss": 0.0244,
+ "step": 200
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 4.957312455388152e-05,
+ "loss": 0.0387,
+ "step": 210
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 4.953122399358845e-05,
+ "loss": 0.0264,
+ "step": 220
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.948738156101042e-05,
+ "loss": 0.0291,
+ "step": 230
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.9441600726679694e-05,
+ "loss": 0.0214,
+ "step": 240
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 4.939388511457092e-05,
+ "loss": 0.0116,
+ "step": 250
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 4.934423850181419e-05,
+ "loss": 0.0191,
+ "step": 260
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 4.9292664818396117e-05,
+ "loss": 0.0064,
+ "step": 270
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 4.9239168146848666e-05,
+ "loss": 0.0184,
+ "step": 280
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 4.9183752721926036e-05,
+ "loss": 0.0026,
+ "step": 290
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 4.912642293026942e-05,
+ "loss": 0.0223,
+ "step": 300
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 4.906718331005979e-05,
+ "loss": 0.0405,
+ "step": 310
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 4.900603855065861e-05,
+ "loss": 0.0461,
+ "step": 320
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 4.894299349223665e-05,
+ "loss": 0.0199,
+ "step": 330
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 4.8878053125390875e-05,
+ "loss": 0.0193,
+ "step": 340
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 4.881122259074935e-05,
+ "loss": 0.004,
+ "step": 350
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.874250717856433e-05,
+ "loss": 0.0018,
+ "step": 360
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.867191232829348e-05,
+ "loss": 0.0021,
+ "step": 370
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 4.8599443628169295e-05,
+ "loss": 0.018,
+ "step": 380
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 4.8525106814756754e-05,
+ "loss": 0.0261,
+ "step": 390
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 4.84489077724992e-05,
+ "loss": 0.016,
+ "step": 400
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 4.8370852533252536e-05,
+ "loss": 0.0402,
+ "step": 410
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 4.8290947275807755e-05,
+ "loss": 0.0038,
+ "step": 420
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 4.8209198325401815e-05,
+ "loss": 0.008,
+ "step": 430
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 4.8125612153216976e-05,
+ "loss": 0.0296,
+ "step": 440
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 4.804019537586849e-05,
+ "loss": 0.0012,
+ "step": 450
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 4.7952954754880886e-05,
+ "loss": 0.0142,
+ "step": 460
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 4.7863897196152704e-05,
+ "loss": 0.0163,
+ "step": 470
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 4.7773029749409836e-05,
+ "loss": 0.0021,
+ "step": 480
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.76803596076475e-05,
+ "loss": 0.0355,
+ "step": 490
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.758589410656078e-05,
+ "loss": 0.0199,
+ "step": 500
+ },
+ {
+ "epoch": 0.42,
+ "eval_loss": 0.010466881096363068,
+ "eval_runtime": 88.037,
+ "eval_samples_per_second": 22.525,
+ "eval_steps_per_second": 2.817,
+ "step": 500
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 4.748964072396403e-05,
+ "loss": 0.0341,
+ "step": 510
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 4.7391607079198876e-05,
+ "loss": 0.0137,
+ "step": 520
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 4.7291800932531064e-05,
+ "loss": 0.0138,
+ "step": 530
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 4.719023018453623e-05,
+ "loss": 0.0063,
+ "step": 540
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 4.708690287547441e-05,
+ "loss": 0.0376,
+ "step": 550
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.698182718465368e-05,
+ "loss": 0.006,
+ "step": 560
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.687501142978258e-05,
+ "loss": 0.0371,
+ "step": 570
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 4.6766464066311765e-05,
+ "loss": 0.0322,
+ "step": 580
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 4.665619368676466e-05,
+ "loss": 0.0086,
+ "step": 590
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 4.6544209020057285e-05,
+ "loss": 0.002,
+ "step": 600
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 4.643051893080725e-05,
+ "loss": 0.0147,
+ "step": 610
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 4.631513241863209e-05,
+ "loss": 0.0038,
+ "step": 620
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.619805861743683e-05,
+ "loss": 0.0187,
+ "step": 630
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.607930679469096e-05,
+ "loss": 0.0063,
+ "step": 640
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 4.595888635069481e-05,
+ "loss": 0.0109,
+ "step": 650
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 4.5836806817835475e-05,
+ "loss": 0.005,
+ "step": 660
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 4.57130778598322e-05,
+ "loss": 0.0167,
+ "step": 670
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 4.5587709270971425e-05,
+ "loss": 0.0143,
+ "step": 680
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.546071097533145e-05,
+ "loss": 0.0015,
+ "step": 690
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.533209302599691e-05,
+ "loss": 0.0003,
+ "step": 700
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 4.520186560426292e-05,
+ "loss": 0.006,
+ "step": 710
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 4.507003901882915e-05,
+ "loss": 0.0093,
+ "step": 720
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 4.493662370498383e-05,
+ "loss": 0.0046,
+ "step": 730
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 4.4801630223777665e-05,
+ "loss": 0.0147,
+ "step": 740
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 4.466506926118782e-05,
+ "loss": 0.0102,
+ "step": 750
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.4526951627272074e-05,
+ "loss": 0.017,
+ "step": 760
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.438728825531305e-05,
+ "loss": 0.0033,
+ "step": 770
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 4.4246090200952816e-05,
+ "loss": 0.0061,
+ "step": 780
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 4.410336864131762e-05,
+ "loss": 0.0032,
+ "step": 790
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 4.395913487413324e-05,
+ "loss": 0.0043,
+ "step": 800
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 4.3813400316830576e-05,
+ "loss": 0.0063,
+ "step": 810
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 4.36661765056419e-05,
+ "loss": 0.0273,
+ "step": 820
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.351747509468763e-05,
+ "loss": 0.0125,
+ "step": 830
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.336730785505382e-05,
+ "loss": 0.0076,
+ "step": 840
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 4.3215686673860384e-05,
+ "loss": 0.0127,
+ "step": 850
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.306262355332006e-05,
+ "loss": 0.0161,
+ "step": 860
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.290813060978839e-05,
+ "loss": 0.0169,
+ "step": 870
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.2752220072804564e-05,
+ "loss": 0.0081,
+ "step": 880
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.259490428412335e-05,
+ "loss": 0.0131,
+ "step": 890
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.243619569673814e-05,
+ "loss": 0.0205,
+ "step": 900
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.2276106873895143e-05,
+ "loss": 0.0026,
+ "step": 910
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.2114650488098936e-05,
+ "loss": 0.018,
+ "step": 920
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.19518393201093e-05,
+ "loss": 0.0083,
+ "step": 930
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 4.178768625792949e-05,
+ "loss": 0.0291,
+ "step": 940
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.162220429578605e-05,
+ "loss": 0.0226,
+ "step": 950
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.145540653310018e-05,
+ "loss": 0.0042,
+ "step": 960
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.128730617345084e-05,
+ "loss": 0.0078,
+ "step": 970
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.111791652352952e-05,
+ "loss": 0.0084,
+ "step": 980
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 4.094725099208688e-05,
+ "loss": 0.0044,
+ "step": 990
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.077532308887141e-05,
+ "loss": 0.0011,
+ "step": 1000
+ },
+ {
+ "epoch": 0.85,
+ "eval_loss": 0.01175768580287695,
+ "eval_runtime": 88.0904,
+ "eval_samples_per_second": 22.511,
+ "eval_steps_per_second": 2.815,
+ "step": 1000
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.060214642355989e-05,
+ "loss": 0.0011,
+ "step": 1010
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.042773470468016e-05,
+ "loss": 0.021,
+ "step": 1020
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 4.0252101738525916e-05,
+ "loss": 0.0424,
+ "step": 1030
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 4.0075261428063806e-05,
+ "loss": 0.0194,
+ "step": 1040
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 3.9897227771832924e-05,
+ "loss": 0.0025,
+ "step": 1050
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 3.971801486283665e-05,
+ "loss": 0.0044,
+ "step": 1060
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.953763688742708e-05,
+ "loss": 0.0051,
+ "step": 1070
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 3.9356108124182067e-05,
+ "loss": 0.0071,
+ "step": 1080
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.9173442942774885e-05,
+ "loss": 0.0145,
+ "step": 1090
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.898965580283681e-05,
+ "loss": 0.0371,
+ "step": 1100
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.880476125281244e-05,
+ "loss": 0.0076,
+ "step": 1110
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 3.861877392880808e-05,
+ "loss": 0.0035,
+ "step": 1120
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.843170855343317e-05,
+ "loss": 0.008,
+ "step": 1130
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.8243579934634846e-05,
+ "loss": 0.0089,
+ "step": 1140
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.805440296452574e-05,
+ "loss": 0.0034,
+ "step": 1150
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 3.786419261820514e-05,
+ "loss": 0.0019,
+ "step": 1160
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 3.7672963952573614e-05,
+ "loss": 0.0164,
+ "step": 1170
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.748073210514102e-05,
+ "loss": 0.0012,
+ "step": 1180
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.728751229282836e-05,
+ "loss": 0.0072,
+ "step": 1190
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.70933198107631e-05,
+ "loss": 0.0041,
+ "step": 1200
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 3.689817003106852e-05,
+ "loss": 0.0021,
+ "step": 1210
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.670207840164678e-05,
+ "loss": 0.0024,
+ "step": 1220
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.650506044495615e-05,
+ "loss": 0.0026,
+ "step": 1230
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.630713175678222e-05,
+ "loss": 0.005,
+ "step": 1240
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.610830800500335e-05,
+ "loss": 0.0069,
+ "step": 1250
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 3.590860492835046e-05,
+ "loss": 0.0081,
+ "step": 1260
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.5708038335161134e-05,
+ "loss": 0.0107,
+ "step": 1270
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.550662410212819e-05,
+ "loss": 0.0074,
+ "step": 1280
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 3.5304378173043e-05,
+ "loss": 0.0048,
+ "step": 1290
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 3.5101316557533294e-05,
+ "loss": 0.0006,
+ "step": 1300
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 3.489745532979593e-05,
+ "loss": 0.0076,
+ "step": 1310
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 3.469281062732442e-05,
+ "loss": 0.0002,
+ "step": 1320
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 3.448739864963154e-05,
+ "loss": 0.0073,
+ "step": 1330
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 3.4281235656966915e-05,
+ "loss": 0.0008,
+ "step": 1340
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 3.4074337969029965e-05,
+ "loss": 0.001,
+ "step": 1350
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 3.386672196367799e-05,
+ "loss": 0.0047,
+ "step": 1360
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 3.365840407562974e-05,
+ "loss": 0.0131,
+ "step": 1370
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 3.3449400795164416e-05,
+ "loss": 0.001,
+ "step": 1380
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 3.323972866681637e-05,
+ "loss": 0.0058,
+ "step": 1390
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 3.3029404288065426e-05,
+ "loss": 0.0047,
+ "step": 1400
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 3.2818444308023e-05,
+ "loss": 0.0029,
+ "step": 1410
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 3.2606865426114234e-05,
+ "loss": 0.0073,
+ "step": 1420
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 3.239468439075604e-05,
+ "loss": 0.0006,
+ "step": 1430
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 3.2181917998031326e-05,
+ "loss": 0.0028,
+ "step": 1440
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 3.196858309035941e-05,
+ "loss": 0.0003,
+ "step": 1450
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 3.175469655516284e-05,
+ "loss": 0.0007,
+ "step": 1460
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 3.154027532353052e-05,
+ "loss": 0.0037,
+ "step": 1470
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 3.132533636887753e-05,
+ "loss": 0.0065,
+ "step": 1480
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 3.1109896705601485e-05,
+ "loss": 0.0092,
+ "step": 1490
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 3.0893973387735687e-05,
+ "loss": 0.0001,
+ "step": 1500
+ },
+ {
+ "epoch": 1.27,
+ "eval_loss": 0.010954583063721657,
+ "eval_runtime": 88.0029,
+ "eval_samples_per_second": 22.533,
+ "eval_steps_per_second": 2.818,
+ "step": 1500
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 3.067758350759917e-05,
+ "loss": 0.0002,
+ "step": 1510
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 3.046074419444366e-05,
+ "loss": 0.0004,
+ "step": 1520
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 3.0243472613097656e-05,
+ "loss": 0.001,
+ "step": 1530
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 3.002578596260765e-05,
+ "loss": 0.0001,
+ "step": 1540
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 2.980770147487668e-05,
+ "loss": 0.0086,
+ "step": 1550
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 2.958923641330028e-05,
+ "loss": 0.0021,
+ "step": 1560
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 2.9370408071399898e-05,
+ "loss": 0.0001,
+ "step": 1570
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 2.9151233771453956e-05,
+ "loss": 0.0076,
+ "step": 1580
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 2.8931730863126666e-05,
+ "loss": 0.0001,
+ "step": 1590
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 2.871191672209459e-05,
+ "loss": 0.0001,
+ "step": 1600
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 2.8491808748671255e-05,
+ "loss": 0.0001,
+ "step": 1610
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 2.8271424366429706e-05,
+ "loss": 0.0115,
+ "step": 1620
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 2.8050781020823296e-05,
+ "loss": 0.0001,
+ "step": 1630
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 2.7829896177804716e-05,
+ "loss": 0.0003,
+ "step": 1640
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 2.760878732244339e-05,
+ "loss": 0.0003,
+ "step": 1650
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 2.7387471957541405e-05,
+ "loss": 0.0024,
+ "step": 1660
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 2.7165967602247964e-05,
+ "loss": 0.0005,
+ "step": 1670
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 2.694429179067261e-05,
+ "loss": 0.0018,
+ "step": 1680
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 2.6744651468034758e-05,
+ "loss": 0.002,
+ "step": 1690
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 2.6522698243485527e-05,
+ "loss": 0.0001,
+ "step": 1700
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 2.6300624483347926e-05,
+ "loss": 0.0058,
+ "step": 1710
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 2.607844776680513e-05,
+ "loss": 0.0001,
+ "step": 1720
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 2.585618568119027e-05,
+ "loss": 0.0001,
+ "step": 1730
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 2.56338558205942e-05,
+ "loss": 0.0008,
+ "step": 1740
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 2.5411475784472805e-05,
+ "loss": 0.0002,
+ "step": 1750
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 2.5189063176253825e-05,
+ "loss": 0.0001,
+ "step": 1760
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 2.496663560194338e-05,
+ "loss": 0.0001,
+ "step": 1770
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.4744210668732295e-05,
+ "loss": 0.0001,
+ "step": 1780
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.452180598360232e-05,
+ "loss": 0.0001,
+ "step": 1790
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.429943915193239e-05,
+ "loss": 0.0,
+ "step": 1800
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 2.4077127776104984e-05,
+ "loss": 0.0146,
+ "step": 1810
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.3854889454112748e-05,
+ "loss": 0.0017,
+ "step": 1820
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.3632741778165442e-05,
+ "loss": 0.0001,
+ "step": 1830
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.3410702333297356e-05,
+ "loss": 0.0001,
+ "step": 1840
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.318878869597528e-05,
+ "loss": 0.0001,
+ "step": 1850
+ },
+ {
+ "epoch": 1.58,
+ "learning_rate": 2.2967018432707213e-05,
+ "loss": 0.0073,
+ "step": 1860
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.2745409098651744e-05,
+ "loss": 0.0001,
+ "step": 1870
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 2.2523978236228442e-05,
+ "loss": 0.0001,
+ "step": 1880
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 2.2302743373729205e-05,
+ "loss": 0.0,
+ "step": 1890
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 2.2081722023930743e-05,
+ "loss": 0.0136,
+ "step": 1900
+ },
+ {
+ "epoch": 1.62,
+ "learning_rate": 2.1860931682708248e-05,
+ "loss": 0.0051,
+ "step": 1910
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 2.164038982765047e-05,
+ "loss": 0.0004,
+ "step": 1920
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 2.1420113916676183e-05,
+ "loss": 0.0002,
+ "step": 1930
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 2.1200121386652246e-05,
+ "loss": 0.0001,
+ "step": 1940
+ },
+ {
+ "epoch": 1.66,
+ "learning_rate": 2.0980429652013297e-05,
+ "loss": 0.0001,
+ "step": 1950
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 2.0761056103383258e-05,
+ "loss": 0.0001,
+ "step": 1960
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 2.0542018106198697e-05,
+ "loss": 0.0,
+ "step": 1970
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 2.0323332999334198e-05,
+ "loss": 0.005,
+ "step": 1980
+ },
+ {
+ "epoch": 1.69,
+ "learning_rate": 2.010501809372981e-05,
+ "loss": 0.0149,
+ "step": 1990
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 1.988709067102076e-05,
+ "loss": 0.0143,
+ "step": 2000
+ },
+ {
+ "epoch": 1.7,
+ "eval_loss": 0.013543435372412205,
+ "eval_runtime": 87.9904,
+ "eval_samples_per_second": 22.537,
+ "eval_steps_per_second": 2.818,
+ "step": 2000
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 1.966956798216943e-05,
+ "loss": 0.0017,
+ "step": 2010
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 1.945246724609978e-05,
+ "loss": 0.0038,
+ "step": 2020
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 1.9235805648334342e-05,
+ "loss": 0.0003,
+ "step": 2030
+ },
+ {
+ "epoch": 1.73,
+ "learning_rate": 1.9019600339633798e-05,
+ "loss": 0.0005,
+ "step": 2040
+ },
+ {
+ "epoch": 1.74,
+ "learning_rate": 1.8803868434639345e-05,
+ "loss": 0.0005,
+ "step": 2050
+ },
+ {
+ "epoch": 1.75,
+ "learning_rate": 1.858862701051791e-05,
+ "loss": 0.0009,
+ "step": 2060
+ },
+ {
+ "epoch": 1.76,
+ "learning_rate": 1.8373893105610356e-05,
+ "loss": 0.0002,
+ "step": 2070
+ },
+ {
+ "epoch": 1.77,
+ "learning_rate": 1.815968371808273e-05,
+ "loss": 0.0036,
+ "step": 2080
+ },
+ {
+ "epoch": 1.78,
+ "learning_rate": 1.7946015804580688e-05,
+ "loss": 0.0138,
+ "step": 2090
+ },
+ {
+ "epoch": 1.78,
+ "learning_rate": 1.7732906278887225e-05,
+ "loss": 0.0005,
+ "step": 2100
+ },
+ {
+ "epoch": 1.79,
+ "learning_rate": 1.7520372010583815e-05,
+ "loss": 0.0001,
+ "step": 2110
+ },
+ {
+ "epoch": 1.8,
+ "learning_rate": 1.7308429823714995e-05,
+ "loss": 0.0001,
+ "step": 2120
+ },
+ {
+ "epoch": 1.81,
+ "learning_rate": 1.709709649545662e-05,
+ "loss": 0.0001,
+ "step": 2130
+ },
+ {
+ "epoch": 1.82,
+ "learning_rate": 1.688638875478777e-05,
+ "loss": 0.0001,
+ "step": 2140
+ },
+ {
+ "epoch": 1.83,
+ "learning_rate": 1.66763232811665e-05,
+ "loss": 0.0,
+ "step": 2150
+ },
+ {
+ "epoch": 1.84,
+ "learning_rate": 1.6466916703209535e-05,
+ "loss": 0.012,
+ "step": 2160
+ },
+ {
+ "epoch": 1.84,
+ "learning_rate": 1.625818559737592e-05,
+ "loss": 0.0,
+ "step": 2170
+ },
+ {
+ "epoch": 1.85,
+ "learning_rate": 1.605014648665486e-05,
+ "loss": 0.0005,
+ "step": 2180
+ },
+ {
+ "epoch": 1.86,
+ "learning_rate": 1.584281583925779e-05,
+ "loss": 0.0047,
+ "step": 2190
+ },
+ {
+ "epoch": 1.87,
+ "learning_rate": 1.5636210067314744e-05,
+ "loss": 0.0126,
+ "step": 2200
+ },
+ {
+ "epoch": 1.88,
+ "learning_rate": 1.5430345525575186e-05,
+ "loss": 0.0015,
+ "step": 2210
+ },
+ {
+ "epoch": 1.89,
+ "learning_rate": 1.5225238510113377e-05,
+ "loss": 0.0018,
+ "step": 2220
+ },
+ {
+ "epoch": 1.89,
+ "learning_rate": 1.5020905257038403e-05,
+ "loss": 0.0057,
+ "step": 2230
+ },
+ {
+ "epoch": 1.9,
+ "learning_rate": 1.481736194120894e-05,
+ "loss": 0.0036,
+ "step": 2240
+ },
+ {
+ "epoch": 1.91,
+ "learning_rate": 1.4614624674952842e-05,
+ "loss": 0.0006,
+ "step": 2250
+ },
+ {
+ "epoch": 1.92,
+ "learning_rate": 1.4412709506791725e-05,
+ "loss": 0.0054,
+ "step": 2260
+ },
+ {
+ "epoch": 1.93,
+ "learning_rate": 1.4211632420170558e-05,
+ "loss": 0.0039,
+ "step": 2270
+ },
+ {
+ "epoch": 1.94,
+ "learning_rate": 1.4011409332192472e-05,
+ "loss": 0.0017,
+ "step": 2280
+ },
+ {
+ "epoch": 1.95,
+ "learning_rate": 1.3812056092358686e-05,
+ "loss": 0.0181,
+ "step": 2290
+ },
+ {
+ "epoch": 1.95,
+ "learning_rate": 1.3613588481313977e-05,
+ "loss": 0.0035,
+ "step": 2300
+ },
+ {
+ "epoch": 1.96,
+ "learning_rate": 1.3416022209597429e-05,
+ "loss": 0.0001,
+ "step": 2310
+ },
+ {
+ "epoch": 1.97,
+ "learning_rate": 1.3219372916398826e-05,
+ "loss": 0.0005,
+ "step": 2320
+ },
+ {
+ "epoch": 1.98,
+ "learning_rate": 1.302365616832063e-05,
+ "loss": 0.0012,
+ "step": 2330
+ },
+ {
+ "epoch": 1.99,
+ "learning_rate": 1.2828887458145806e-05,
+ "loss": 0.0052,
+ "step": 2340
+ },
+ {
+ "epoch": 2.0,
+ "learning_rate": 1.2635082203611375e-05,
+ "loss": 0.0008,
+ "step": 2350
+ },
+ {
+ "epoch": 2.01,
+ "learning_rate": 1.2442255746187954e-05,
+ "loss": 0.0002,
+ "step": 2360
+ },
+ {
+ "epoch": 2.01,
+ "learning_rate": 1.2250423349865387e-05,
+ "loss": 0.0009,
+ "step": 2370
+ },
+ {
+ "epoch": 2.02,
+ "learning_rate": 1.2059600199944388e-05,
+ "loss": 0.0002,
+ "step": 2380
+ },
+ {
+ "epoch": 2.03,
+ "learning_rate": 1.1869801401834564e-05,
+ "loss": 0.0001,
+ "step": 2390
+ },
+ {
+ "epoch": 2.04,
+ "learning_rate": 1.1681041979858626e-05,
+ "loss": 0.0001,
+ "step": 2400
+ },
+ {
+ "epoch": 2.05,
+ "learning_rate": 1.1493336876063071e-05,
+ "loss": 0.0001,
+ "step": 2410
+ },
+ {
+ "epoch": 2.06,
+ "learning_rate": 1.1306700949035462e-05,
+ "loss": 0.0,
+ "step": 2420
+ },
+ {
+ "epoch": 2.06,
+ "learning_rate": 1.1121148972728104e-05,
+ "loss": 0.0001,
+ "step": 2430
+ },
+ {
+ "epoch": 2.07,
+ "learning_rate": 1.0936695635288674e-05,
+ "loss": 0.0001,
+ "step": 2440
+ },
+ {
+ "epoch": 2.08,
+ "learning_rate": 1.0753355537897427e-05,
+ "loss": 0.0001,
+ "step": 2450
+ },
+ {
+ "epoch": 2.09,
+ "learning_rate": 1.0571143193611444e-05,
+ "loss": 0.0,
+ "step": 2460
+ },
+ {
+ "epoch": 2.1,
+ "learning_rate": 1.039007302621576e-05,
+ "loss": 0.0001,
+ "step": 2470
+ },
+ {
+ "epoch": 2.11,
+ "learning_rate": 1.0210159369081568e-05,
+ "loss": 0.0003,
+ "step": 2480
+ },
+ {
+ "epoch": 2.12,
+ "learning_rate": 1.0031416464031654e-05,
+ "loss": 0.0,
+ "step": 2490
+ },
+ {
+ "epoch": 2.12,
+ "learning_rate": 9.853858460212962e-06,
+ "loss": 0.0001,
+ "step": 2500
+ },
+ {
+ "epoch": 2.12,
+ "eval_loss": 0.012909023091197014,
+ "eval_runtime": 87.9835,
+ "eval_samples_per_second": 22.538,
+ "eval_steps_per_second": 2.819,
+ "step": 2500
+ },
+ {
+ "epoch": 2.13,
+ "learning_rate": 9.677499412976632e-06,
+ "loss": 0.0,
+ "step": 2510
+ },
+ {
+ "epoch": 2.14,
+ "learning_rate": 9.502353282765306e-06,
+ "loss": 0.0001,
+ "step": 2520
+ },
+ {
+ "epoch": 2.15,
+ "learning_rate": 9.328433934008107e-06,
+ "loss": 0.0002,
+ "step": 2530
+ },
+ {
+ "epoch": 2.16,
+ "learning_rate": 9.155755134023097e-06,
+ "loss": 0.0,
+ "step": 2540
+ },
+ {
+ "epoch": 2.17,
+ "learning_rate": 8.984330551927475e-06,
+ "loss": 0.0,
+ "step": 2550
+ },
+ {
+ "epoch": 2.18,
+ "learning_rate": 8.81417375755556e-06,
+ "loss": 0.0002,
+ "step": 2560
+ },
+ {
+ "epoch": 2.18,
+ "learning_rate": 8.645298220384567e-06,
+ "loss": 0.0001,
+ "step": 2570
+ },
+ {
+ "epoch": 2.19,
+ "learning_rate": 8.477717308468442e-06,
+ "loss": 0.0008,
+ "step": 2580
+ },
+ {
+ "epoch": 2.2,
+ "learning_rate": 8.31144428737958e-06,
+ "loss": 0.0,
+ "step": 2590
+ },
+ {
+ "epoch": 2.21,
+ "learning_rate": 8.146492319158805e-06,
+ "loss": 0.0,
+ "step": 2600
+ },
+ {
+ "epoch": 2.22,
+ "learning_rate": 7.982874461273438e-06,
+ "loss": 0.0001,
+ "step": 2610
+ },
+ {
+ "epoch": 2.23,
+ "learning_rate": 7.820603665583654e-06,
+ "loss": 0.0002,
+ "step": 2620
+ },
+ {
+ "epoch": 2.23,
+ "learning_rate": 7.659692777317288e-06,
+ "loss": 0.0001,
+ "step": 2630
+ },
+ {
+ "epoch": 2.24,
+ "learning_rate": 7.500154534052933e-06,
+ "loss": 0.0001,
+ "step": 2640
+ },
+ {
+ "epoch": 2.25,
+ "learning_rate": 7.342001564711756e-06,
+ "loss": 0.0001,
+ "step": 2650
+ },
+ {
+ "epoch": 2.26,
+ "learning_rate": 7.185246388557665e-06,
+ "loss": 0.0,
+ "step": 2660
+ },
+ {
+ "epoch": 2.27,
+ "learning_rate": 7.0299014142064106e-06,
+ "loss": 0.0,
+ "step": 2670
+ },
+ {
+ "epoch": 2.28,
+ "learning_rate": 6.875978938643277e-06,
+ "loss": 0.0001,
+ "step": 2680
+ },
+ {
+ "epoch": 2.29,
+ "learning_rate": 6.723491146249647e-06,
+ "loss": 0.0,
+ "step": 2690
+ },
+ {
+ "epoch": 2.29,
+ "learning_rate": 6.572450107838551e-06,
+ "loss": 0.0001,
+ "step": 2700
+ },
+ {
+ "epoch": 2.3,
+ "learning_rate": 6.422867779699088e-06,
+ "loss": 0.0002,
+ "step": 2710
+ },
+ {
+ "epoch": 2.31,
+ "learning_rate": 6.274756002650034e-06,
+ "loss": 0.0001,
+ "step": 2720
+ },
+ {
+ "epoch": 2.32,
+ "learning_rate": 6.128126501102479e-06,
+ "loss": 0.0001,
+ "step": 2730
+ },
+ {
+ "epoch": 2.33,
+ "learning_rate": 5.982990882131775e-06,
+ "loss": 0.0001,
+ "step": 2740
+ },
+ {
+ "epoch": 2.34,
+ "learning_rate": 5.83936063455871e-06,
+ "loss": 0.0001,
+ "step": 2750
+ },
+ {
+ "epoch": 2.34,
+ "learning_rate": 5.697247128040037e-06,
+ "loss": 0.0,
+ "step": 2760
+ },
+ {
+ "epoch": 2.35,
+ "learning_rate": 5.556661612168537e-06,
+ "loss": 0.0003,
+ "step": 2770
+ },
+ {
+ "epoch": 2.36,
+ "learning_rate": 5.417615215582408e-06,
+ "loss": 0.0001,
+ "step": 2780
+ },
+ {
+ "epoch": 2.37,
+ "learning_rate": 5.280118945084422e-06,
+ "loss": 0.0001,
+ "step": 2790
+ },
+ {
+ "epoch": 2.38,
+ "learning_rate": 5.144183684770565e-06,
+ "loss": 0.0,
+ "step": 2800
+ },
+ {
+ "epoch": 2.39,
+ "learning_rate": 5.00982019516851e-06,
+ "loss": 0.0,
+ "step": 2810
+ },
+ {
+ "epoch": 2.4,
+ "learning_rate": 4.877039112385815e-06,
+ "loss": 0.0001,
+ "step": 2820
+ },
+ {
+ "epoch": 2.4,
+ "learning_rate": 4.74585094726793e-06,
+ "loss": 0.0003,
+ "step": 2830
+ },
+ {
+ "epoch": 2.41,
+ "learning_rate": 4.616266084566243e-06,
+ "loss": 0.0,
+ "step": 2840
+ },
+ {
+ "epoch": 2.42,
+ "learning_rate": 4.488294782115957e-06,
+ "loss": 0.0001,
+ "step": 2850
+ },
+ {
+ "epoch": 2.43,
+ "learning_rate": 4.361947170024144e-06,
+ "loss": 0.0001,
+ "step": 2860
+ },
+ {
+ "epoch": 2.44,
+ "learning_rate": 4.2372332498678256e-06,
+ "loss": 0.0007,
+ "step": 2870
+ },
+ {
+ "epoch": 2.45,
+ "learning_rate": 4.11416289390226e-06,
+ "loss": 0.0061,
+ "step": 2880
+ },
+ {
+ "epoch": 2.46,
+ "learning_rate": 3.992745844279475e-06,
+ "loss": 0.0,
+ "step": 2890
+ },
+ {
+ "epoch": 2.46,
+ "learning_rate": 3.872991712277052e-06,
+ "loss": 0.0001,
+ "step": 2900
+ },
+ {
+ "epoch": 2.47,
+ "learning_rate": 3.7549099775373576e-06,
+ "loss": 0.0002,
+ "step": 2910
+ },
+ {
+ "epoch": 2.48,
+ "learning_rate": 3.6385099873170875e-06,
+ "loss": 0.0,
+ "step": 2920
+ },
+ {
+ "epoch": 2.49,
+ "learning_rate": 3.5238009557473946e-06,
+ "loss": 0.0,
+ "step": 2930
+ },
+ {
+ "epoch": 2.5,
+ "learning_rate": 3.4107919631044732e-06,
+ "loss": 0.0028,
+ "step": 2940
+ },
+ {
+ "epoch": 2.51,
+ "learning_rate": 3.299491955090775e-06,
+ "loss": 0.0003,
+ "step": 2950
+ },
+ {
+ "epoch": 2.51,
+ "learning_rate": 3.1899097421268924e-06,
+ "loss": 0.0001,
+ "step": 2960
+ },
+ {
+ "epoch": 2.52,
+ "learning_rate": 3.0820539986541054e-06,
+ "loss": 0.0,
+ "step": 2970
+ },
+ {
+ "epoch": 2.53,
+ "learning_rate": 2.97593326244775e-06,
+ "loss": 0.0013,
+ "step": 2980
+ },
+ {
+ "epoch": 2.54,
+ "learning_rate": 2.871555933941353e-06,
+ "loss": 0.0001,
+ "step": 2990
+ },
+ {
+ "epoch": 2.55,
+ "learning_rate": 2.7689302755616736e-06,
+ "loss": 0.0001,
+ "step": 3000
+ },
+ {
+ "epoch": 2.55,
+ "eval_loss": 0.014460938051342964,
+ "eval_runtime": 87.9924,
+ "eval_samples_per_second": 22.536,
+ "eval_steps_per_second": 2.818,
+ "step": 3000
+ }
+ ],
+ "logging_steps": 10,
+ "max_steps": 3531,
+ "num_train_epochs": 3,
+ "save_steps": 1000,
+ "total_flos": 4.510363947053875e+17,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/LLM-Detector-V1-4w/checkpoint-3000/training_args.bin b/LLM-Detector-V1-4w/checkpoint-3000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e37f40cb61d45b6c2efd87b62a5ee72c12d2b4c2
--- /dev/null
+++ b/LLM-Detector-V1-4w/checkpoint-3000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c31eb820fabf5021fa0eda935da3d201c65c7331d3ce4ce4ad4631151a6068e9
+size 4664
diff --git a/LLM-Detector-V1-4w/eval_results.json b/LLM-Detector-V1-4w/eval_results.json
new file mode 100644
index 0000000000000000000000000000000000000000..1cd58a4a1f8aa93461714bdbab830cf7e18af255
--- /dev/null
+++ b/LLM-Detector-V1-4w/eval_results.json
@@ -0,0 +1,7 @@
+{
+ "epoch": 3.0,
+ "eval_loss": 0.014986271038651466,
+ "eval_runtime": 87.9616,
+ "eval_samples_per_second": 22.544,
+ "eval_steps_per_second": 2.819
+}
\ No newline at end of file
diff --git a/LLM-Detector-V1-4w/special_tokens_map.json b/LLM-Detector-V1-4w/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..5819ea25d7b1b6340063a0629c2143c44b0452da
--- /dev/null
+++ b/LLM-Detector-V1-4w/special_tokens_map.json
@@ -0,0 +1,30 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/LLM-Detector-V1-4w/tokenization_baichuan.py b/LLM-Detector-V1-4w/tokenization_baichuan.py
new file mode 100644
index 0000000000000000000000000000000000000000..256544b45542d2f5dcd12a65e2f0ddaeeb9def25
--- /dev/null
+++ b/LLM-Detector-V1-4w/tokenization_baichuan.py
@@ -0,0 +1,251 @@
+# Copyright 2023 Baichuan Inc. All Rights Reserved.
+
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {},
+ "tokenizer_file": {},
+}
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
+
+
+class BaichuanTokenizer(PreTrainedTokenizer):
+ """
+ Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ add_bos_token=True,
+ add_eos_token=False,
+ clean_up_tokenization_spaces=False,
+ **kwargs,
+ ):
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+ self.vocab_file = vocab_file
+ self.add_bos_token = add_bos_token
+ self.add_eos_token = add_eos_token
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ @property
+ def vocab_size(self):
+ """Returns vocab size"""
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ """Returns vocab as a dict"""
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text):
+ """Returns a tokenized string."""
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for i, token in enumerate(tokens):
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special and i != 0:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string
+
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ """
+ Save the vocabulary and special tokens file to a directory.
+
+ Args:
+ save_directory (`str`):
+ The directory in which to save the vocabulary.
+
+ Returns:
+ `Tuple(str)`: Paths to the files saved.
+ """
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ bos_token_id = [1] if self.add_bos_token else []
+ eos_token_id = [1] if self.add_eos_token else []
+
+ if token_ids_1 is None:
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
+ return (
+ bos_token_id
+ + ([0] * len(token_ids_0))
+ + eos_token_id
+ + bos_token_id
+ + ([0] * len(token_ids_1))
+ + eos_token_id
+ )
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
+
+ if token_ids_1 is not None:
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
+
+ return output
diff --git a/LLM-Detector-V1-4w/tokenizer.model b/LLM-Detector-V1-4w/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..b3902c4521d7f34868ac76dd16150ff5ca41b000
--- /dev/null
+++ b/LLM-Detector-V1-4w/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79452955be6b419a65984273a9f08af86042e1c2a75ee3ba989cbf620a133cc2
+size 2001107
diff --git a/LLM-Detector-V1-4w/tokenizer_config.json b/LLM-Detector-V1-4w/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..d14486d6a5be1135bdda779a8ffcde1b77155302
--- /dev/null
+++ b/LLM-Detector-V1-4w/tokenizer_config.json
@@ -0,0 +1,49 @@
+{
+ "add_bos_token": false,
+ "add_eos_token": false,
+ "auto_map": {
+ "AutoTokenizer": [
+ "tokenization_baichuan.BaichuanTokenizer",
+ null
+ ]
+ },
+ "bos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "clean_up_tokenization_spaces": false,
+ "eos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "model_max_length": 4096,
+ "pad_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "split_special_tokens": false,
+ "tokenizer_class": "BaichuanTokenizer",
+ "unk_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "use_fast": false
+}
diff --git a/LLM-Detector-V1-4w/train_results.json b/LLM-Detector-V1-4w/train_results.json
new file mode 100644
index 0000000000000000000000000000000000000000..189495d4cc94b12504663e66226f82663fb61226
--- /dev/null
+++ b/LLM-Detector-V1-4w/train_results.json
@@ -0,0 +1,7 @@
+{
+ "epoch": 3.0,
+ "train_loss": 0.06714861565509712,
+ "train_runtime": 17560.0547,
+ "train_samples_per_second": 6.434,
+ "train_steps_per_second": 0.201
+}
\ No newline at end of file
diff --git a/LLM-Detector-V1-4w/trainer_log.jsonl b/LLM-Detector-V1-4w/trainer_log.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..4355f412a004abd57fafd49d7b4e7e7920a350f0
--- /dev/null
+++ b/LLM-Detector-V1-4w/trainer_log.jsonl
@@ -0,0 +1,362 @@
+{"current_steps": 10, "total_steps": 3531, "loss": 9.9461, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.999919851200522e-05, "epoch": 0.01, "percentage": 0.28, "elapsed_time": "0:00:50", "remaining_time": "4:59:13"}
+{"current_steps": 20, "total_steps": 3531, "loss": 6.4908, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9996428002198536e-05, "epoch": 0.02, "percentage": 0.57, "elapsed_time": "0:01:40", "remaining_time": "4:52:40"}
+{"current_steps": 30, "total_steps": 3531, "loss": 3.708, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9992242747551964e-05, "epoch": 0.03, "percentage": 0.85, "elapsed_time": "0:02:26", "remaining_time": "4:44:13"}
+{"current_steps": 40, "total_steps": 3531, "loss": 0.8908, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.99857130295276e-05, "epoch": 0.03, "percentage": 1.13, "elapsed_time": "0:03:14", "remaining_time": "4:42:21"}
+{"current_steps": 50, "total_steps": 3531, "loss": 0.2454, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.997720546222574e-05, "epoch": 0.04, "percentage": 1.42, "elapsed_time": "0:04:00", "remaining_time": "4:38:33"}
+{"current_steps": 60, "total_steps": 3531, "loss": 0.1348, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.996672071909866e-05, "epoch": 0.05, "percentage": 1.7, "elapsed_time": "0:04:47", "remaining_time": "4:36:52"}
+{"current_steps": 70, "total_steps": 3531, "loss": 0.0487, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.995425963011034e-05, "epoch": 0.06, "percentage": 1.98, "elapsed_time": "0:05:35", "remaining_time": "4:36:09"}
+{"current_steps": 80, "total_steps": 3531, "loss": 0.0282, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.993982318167074e-05, "epoch": 0.07, "percentage": 2.27, "elapsed_time": "0:06:17", "remaining_time": "4:31:36"}
+{"current_steps": 90, "total_steps": 3531, "loss": 0.0455, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.992341251655768e-05, "epoch": 0.08, "percentage": 2.55, "elapsed_time": "0:07:16", "remaining_time": "4:38:08"}
+{"current_steps": 100, "total_steps": 3531, "loss": 0.0472, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9905028933826435e-05, "epoch": 0.08, "percentage": 2.83, "elapsed_time": "0:08:09", "remaining_time": "4:39:57"}
+{"current_steps": 110, "total_steps": 3531, "loss": 0.0526, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.988467388870688e-05, "epoch": 0.09, "percentage": 3.12, "elapsed_time": "0:08:55", "remaining_time": "4:37:32"}
+{"current_steps": 120, "total_steps": 3531, "loss": 0.0679, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.986234899248826e-05, "epoch": 0.1, "percentage": 3.4, "elapsed_time": "0:09:42", "remaining_time": "4:35:57"}
+{"current_steps": 130, "total_steps": 3531, "loss": 0.0314, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.983805601239172e-05, "epoch": 0.11, "percentage": 3.68, "elapsed_time": "0:10:31", "remaining_time": "4:35:12"}
+{"current_steps": 140, "total_steps": 3531, "loss": 0.0136, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.981179687143034e-05, "epoch": 0.12, "percentage": 3.96, "elapsed_time": "0:11:15", "remaining_time": "4:32:50"}
+{"current_steps": 150, "total_steps": 3531, "loss": 0.0409, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.978357364825695e-05, "epoch": 0.13, "percentage": 4.25, "elapsed_time": "0:12:02", "remaining_time": "4:31:18"}
+{"current_steps": 160, "total_steps": 3531, "loss": 0.0284, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.975338857699956e-05, "epoch": 0.14, "percentage": 4.53, "elapsed_time": "0:12:51", "remaining_time": "4:30:46"}
+{"current_steps": 170, "total_steps": 3531, "loss": 0.0364, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.972124404708454e-05, "epoch": 0.14, "percentage": 4.81, "elapsed_time": "0:13:35", "remaining_time": "4:28:44"}
+{"current_steps": 180, "total_steps": 3531, "loss": 0.0147, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.968714260304743e-05, "epoch": 0.15, "percentage": 5.1, "elapsed_time": "0:14:27", "remaining_time": "4:29:04"}
+{"current_steps": 190, "total_steps": 3531, "loss": 0.0174, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.965108694433159e-05, "epoch": 0.16, "percentage": 5.38, "elapsed_time": "0:15:14", "remaining_time": "4:27:51"}
+{"current_steps": 200, "total_steps": 3531, "loss": 0.0244, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.961307992507443e-05, "epoch": 0.17, "percentage": 5.66, "elapsed_time": "0:16:09", "remaining_time": "4:29:05"}
+{"current_steps": 210, "total_steps": 3531, "loss": 0.0387, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.957312455388152e-05, "epoch": 0.18, "percentage": 5.95, "elapsed_time": "0:16:54", "remaining_time": "4:27:22"}
+{"current_steps": 220, "total_steps": 3531, "loss": 0.0264, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.953122399358845e-05, "epoch": 0.19, "percentage": 6.23, "elapsed_time": "0:17:38", "remaining_time": "4:25:37"}
+{"current_steps": 230, "total_steps": 3531, "loss": 0.0291, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.948738156101042e-05, "epoch": 0.2, "percentage": 6.51, "elapsed_time": "0:18:21", "remaining_time": "4:23:32"}
+{"current_steps": 240, "total_steps": 3531, "loss": 0.0214, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9441600726679694e-05, "epoch": 0.2, "percentage": 6.8, "elapsed_time": "0:19:03", "remaining_time": "4:21:27"}
+{"current_steps": 250, "total_steps": 3531, "loss": 0.0116, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.939388511457092e-05, "epoch": 0.21, "percentage": 7.08, "elapsed_time": "0:19:46", "remaining_time": "4:19:35"}
+{"current_steps": 260, "total_steps": 3531, "loss": 0.0191, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.934423850181419e-05, "epoch": 0.22, "percentage": 7.36, "elapsed_time": "0:20:32", "remaining_time": "4:18:26"}
+{"current_steps": 270, "total_steps": 3531, "loss": 0.0064, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9292664818396117e-05, "epoch": 0.23, "percentage": 7.65, "elapsed_time": "0:21:13", "remaining_time": "4:16:15"}
+{"current_steps": 280, "total_steps": 3531, "loss": 0.0184, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9239168146848666e-05, "epoch": 0.24, "percentage": 7.93, "elapsed_time": "0:22:03", "remaining_time": "4:16:06"}
+{"current_steps": 290, "total_steps": 3531, "loss": 0.0026, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9183752721926036e-05, "epoch": 0.25, "percentage": 8.21, "elapsed_time": "0:22:49", "remaining_time": "4:15:10"}
+{"current_steps": 300, "total_steps": 3531, "loss": 0.0223, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.912642293026942e-05, "epoch": 0.25, "percentage": 8.5, "elapsed_time": "0:23:32", "remaining_time": "4:13:37"}
+{"current_steps": 310, "total_steps": 3531, "loss": 0.0405, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.906718331005979e-05, "epoch": 0.26, "percentage": 8.78, "elapsed_time": "0:24:20", "remaining_time": "4:12:50"}
+{"current_steps": 320, "total_steps": 3531, "loss": 0.0461, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.900603855065861e-05, "epoch": 0.27, "percentage": 9.06, "elapsed_time": "0:25:14", "remaining_time": "4:13:13"}
+{"current_steps": 330, "total_steps": 3531, "loss": 0.0199, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.894299349223665e-05, "epoch": 0.28, "percentage": 9.35, "elapsed_time": "0:25:58", "remaining_time": "4:12:00"}
+{"current_steps": 340, "total_steps": 3531, "loss": 0.0193, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8878053125390875e-05, "epoch": 0.29, "percentage": 9.63, "elapsed_time": "0:26:45", "remaining_time": "4:11:11"}
+{"current_steps": 350, "total_steps": 3531, "loss": 0.004, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.881122259074935e-05, "epoch": 0.3, "percentage": 9.91, "elapsed_time": "0:27:27", "remaining_time": "4:09:36"}
+{"current_steps": 360, "total_steps": 3531, "loss": 0.0018, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.874250717856433e-05, "epoch": 0.31, "percentage": 10.2, "elapsed_time": "0:28:12", "remaining_time": "4:08:30"}
+{"current_steps": 370, "total_steps": 3531, "loss": 0.0021, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.867191232829348e-05, "epoch": 0.31, "percentage": 10.48, "elapsed_time": "0:29:00", "remaining_time": "4:07:51"}
+{"current_steps": 380, "total_steps": 3531, "loss": 0.018, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8599443628169295e-05, "epoch": 0.32, "percentage": 10.76, "elapsed_time": "0:29:44", "remaining_time": "4:06:37"}
+{"current_steps": 390, "total_steps": 3531, "loss": 0.0261, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8525106814756754e-05, "epoch": 0.33, "percentage": 11.05, "elapsed_time": "0:30:35", "remaining_time": "4:06:20"}
+{"current_steps": 400, "total_steps": 3531, "loss": 0.016, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.84489077724992e-05, "epoch": 0.34, "percentage": 11.33, "elapsed_time": "0:31:26", "remaining_time": "4:06:09"}
+{"current_steps": 410, "total_steps": 3531, "loss": 0.0402, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8370852533252536e-05, "epoch": 0.35, "percentage": 11.61, "elapsed_time": "0:32:16", "remaining_time": "4:05:39"}
+{"current_steps": 420, "total_steps": 3531, "loss": 0.0038, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8290947275807755e-05, "epoch": 0.36, "percentage": 11.89, "elapsed_time": "0:33:03", "remaining_time": "4:04:54"}
+{"current_steps": 430, "total_steps": 3531, "loss": 0.008, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8209198325401815e-05, "epoch": 0.37, "percentage": 12.18, "elapsed_time": "0:33:50", "remaining_time": "4:04:01"}
+{"current_steps": 440, "total_steps": 3531, "loss": 0.0296, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8125612153216976e-05, "epoch": 0.37, "percentage": 12.46, "elapsed_time": "0:34:40", "remaining_time": "4:03:34"}
+{"current_steps": 450, "total_steps": 3531, "loss": 0.0012, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.804019537586849e-05, "epoch": 0.38, "percentage": 12.74, "elapsed_time": "0:35:25", "remaining_time": "4:02:33"}
+{"current_steps": 460, "total_steps": 3531, "loss": 0.0142, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7952954754880886e-05, "epoch": 0.39, "percentage": 13.03, "elapsed_time": "0:36:13", "remaining_time": "4:01:52"}
+{"current_steps": 470, "total_steps": 3531, "loss": 0.0163, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7863897196152704e-05, "epoch": 0.4, "percentage": 13.31, "elapsed_time": "0:37:04", "remaining_time": "4:01:27"}
+{"current_steps": 480, "total_steps": 3531, "loss": 0.0021, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7773029749409836e-05, "epoch": 0.41, "percentage": 13.59, "elapsed_time": "0:37:53", "remaining_time": "4:00:52"}
+{"current_steps": 490, "total_steps": 3531, "loss": 0.0355, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.76803596076475e-05, "epoch": 0.42, "percentage": 13.88, "elapsed_time": "0:38:40", "remaining_time": "4:00:00"}
+{"current_steps": 500, "total_steps": 3531, "loss": 0.0199, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.758589410656078e-05, "epoch": 0.42, "percentage": 14.16, "elapsed_time": "0:39:24", "remaining_time": "3:58:55"}
+{"current_steps": 500, "total_steps": 3531, "loss": null, "eval_loss": 0.010466881096363068, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.42, "percentage": 14.16, "elapsed_time": "0:39:24", "remaining_time": "3:58:55"}
+{"current_steps": 510, "total_steps": 3531, "loss": 0.0341, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.748964072396403e-05, "epoch": 0.43, "percentage": 14.44, "elapsed_time": "0:41:49", "remaining_time": "4:07:42"}
+{"current_steps": 520, "total_steps": 3531, "loss": 0.0137, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7391607079198876e-05, "epoch": 0.44, "percentage": 14.73, "elapsed_time": "0:42:32", "remaining_time": "4:06:17"}
+{"current_steps": 530, "total_steps": 3531, "loss": 0.0138, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7291800932531064e-05, "epoch": 0.45, "percentage": 15.01, "elapsed_time": "0:43:25", "remaining_time": "4:05:55"}
+{"current_steps": 540, "total_steps": 3531, "loss": 0.0063, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.719023018453623e-05, "epoch": 0.46, "percentage": 15.29, "elapsed_time": "0:44:16", "remaining_time": "4:05:12"}
+{"current_steps": 550, "total_steps": 3531, "loss": 0.0376, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.708690287547441e-05, "epoch": 0.47, "percentage": 15.58, "elapsed_time": "0:45:07", "remaining_time": "4:04:36"}
+{"current_steps": 560, "total_steps": 3531, "loss": 0.006, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.698182718465368e-05, "epoch": 0.48, "percentage": 15.86, "elapsed_time": "0:45:54", "remaining_time": "4:03:30"}
+{"current_steps": 570, "total_steps": 3531, "loss": 0.0371, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.687501142978258e-05, "epoch": 0.48, "percentage": 16.14, "elapsed_time": "0:46:42", "remaining_time": "4:02:38"}
+{"current_steps": 580, "total_steps": 3531, "loss": 0.0322, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6766464066311765e-05, "epoch": 0.49, "percentage": 16.43, "elapsed_time": "0:47:39", "remaining_time": "4:02:27"}
+{"current_steps": 590, "total_steps": 3531, "loss": 0.0086, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.665619368676466e-05, "epoch": 0.5, "percentage": 16.71, "elapsed_time": "0:48:24", "remaining_time": "4:01:20"}
+{"current_steps": 600, "total_steps": 3531, "loss": 0.002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6544209020057285e-05, "epoch": 0.51, "percentage": 16.99, "elapsed_time": "0:49:11", "remaining_time": "4:00:18"}
+{"current_steps": 610, "total_steps": 3531, "loss": 0.0147, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.643051893080725e-05, "epoch": 0.52, "percentage": 17.28, "elapsed_time": "0:49:55", "remaining_time": "3:59:02"}
+{"current_steps": 620, "total_steps": 3531, "loss": 0.0038, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.631513241863209e-05, "epoch": 0.53, "percentage": 17.56, "elapsed_time": "0:50:44", "remaining_time": "3:58:13"}
+{"current_steps": 630, "total_steps": 3531, "loss": 0.0187, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.619805861743683e-05, "epoch": 0.54, "percentage": 17.84, "elapsed_time": "0:51:31", "remaining_time": "3:57:14"}
+{"current_steps": 640, "total_steps": 3531, "loss": 0.0063, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.607930679469096e-05, "epoch": 0.54, "percentage": 18.13, "elapsed_time": "0:52:23", "remaining_time": "3:56:39"}
+{"current_steps": 650, "total_steps": 3531, "loss": 0.0109, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.595888635069481e-05, "epoch": 0.55, "percentage": 18.41, "elapsed_time": "0:53:13", "remaining_time": "3:55:53"}
+{"current_steps": 660, "total_steps": 3531, "loss": 0.005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.5836806817835475e-05, "epoch": 0.56, "percentage": 18.69, "elapsed_time": "0:54:05", "remaining_time": "3:55:19"}
+{"current_steps": 670, "total_steps": 3531, "loss": 0.0167, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.57130778598322e-05, "epoch": 0.57, "percentage": 18.97, "elapsed_time": "0:54:52", "remaining_time": "3:54:19"}
+{"current_steps": 680, "total_steps": 3531, "loss": 0.0143, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.5587709270971425e-05, "epoch": 0.58, "percentage": 19.26, "elapsed_time": "0:55:42", "remaining_time": "3:53:35"}
+{"current_steps": 690, "total_steps": 3531, "loss": 0.0015, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.546071097533145e-05, "epoch": 0.59, "percentage": 19.54, "elapsed_time": "0:56:31", "remaining_time": "3:52:42"}
+{"current_steps": 700, "total_steps": 3531, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.533209302599691e-05, "epoch": 0.59, "percentage": 19.82, "elapsed_time": "0:57:19", "remaining_time": "3:51:51"}
+{"current_steps": 710, "total_steps": 3531, "loss": 0.006, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.520186560426292e-05, "epoch": 0.6, "percentage": 20.11, "elapsed_time": "0:58:08", "remaining_time": "3:50:59"}
+{"current_steps": 720, "total_steps": 3531, "loss": 0.0093, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.507003901882915e-05, "epoch": 0.61, "percentage": 20.39, "elapsed_time": "0:59:02", "remaining_time": "3:50:31"}
+{"current_steps": 730, "total_steps": 3531, "loss": 0.0046, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.493662370498383e-05, "epoch": 0.62, "percentage": 20.67, "elapsed_time": "0:59:49", "remaining_time": "3:49:31"}
+{"current_steps": 740, "total_steps": 3531, "loss": 0.0147, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.4801630223777665e-05, "epoch": 0.63, "percentage": 20.96, "elapsed_time": "1:00:38", "remaining_time": "3:48:43"}
+{"current_steps": 750, "total_steps": 3531, "loss": 0.0102, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.466506926118782e-05, "epoch": 0.64, "percentage": 21.24, "elapsed_time": "1:01:26", "remaining_time": "3:47:51"}
+{"current_steps": 760, "total_steps": 3531, "loss": 0.017, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.4526951627272074e-05, "epoch": 0.65, "percentage": 21.52, "elapsed_time": "1:02:18", "remaining_time": "3:47:10"}
+{"current_steps": 770, "total_steps": 3531, "loss": 0.0033, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.438728825531305e-05, "epoch": 0.65, "percentage": 21.81, "elapsed_time": "1:03:04", "remaining_time": "3:46:11"}
+{"current_steps": 780, "total_steps": 3531, "loss": 0.0061, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.4246090200952816e-05, "epoch": 0.66, "percentage": 22.09, "elapsed_time": "1:03:46", "remaining_time": "3:44:57"}
+{"current_steps": 790, "total_steps": 3531, "loss": 0.0032, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.410336864131762e-05, "epoch": 0.67, "percentage": 22.37, "elapsed_time": "1:04:35", "remaining_time": "3:44:07"}
+{"current_steps": 800, "total_steps": 3531, "loss": 0.0043, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.395913487413324e-05, "epoch": 0.68, "percentage": 22.66, "elapsed_time": "1:05:23", "remaining_time": "3:43:13"}
+{"current_steps": 810, "total_steps": 3531, "loss": 0.0063, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.3813400316830576e-05, "epoch": 0.69, "percentage": 22.94, "elapsed_time": "1:06:14", "remaining_time": "3:42:32"}
+{"current_steps": 820, "total_steps": 3531, "loss": 0.0273, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.36661765056419e-05, "epoch": 0.7, "percentage": 23.22, "elapsed_time": "1:07:01", "remaining_time": "3:41:34"}
+{"current_steps": 830, "total_steps": 3531, "loss": 0.0125, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.351747509468763e-05, "epoch": 0.71, "percentage": 23.51, "elapsed_time": "1:07:50", "remaining_time": "3:40:45"}
+{"current_steps": 840, "total_steps": 3531, "loss": 0.0076, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.336730785505382e-05, "epoch": 0.71, "percentage": 23.79, "elapsed_time": "1:08:46", "remaining_time": "3:40:18"}
+{"current_steps": 850, "total_steps": 3531, "loss": 0.0127, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.3215686673860384e-05, "epoch": 0.72, "percentage": 24.07, "elapsed_time": "1:09:39", "remaining_time": "3:39:42"}
+{"current_steps": 860, "total_steps": 3531, "loss": 0.0161, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.306262355332006e-05, "epoch": 0.73, "percentage": 24.36, "elapsed_time": "1:10:26", "remaining_time": "3:38:45"}
+{"current_steps": 870, "total_steps": 3531, "loss": 0.0169, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.290813060978839e-05, "epoch": 0.74, "percentage": 24.64, "elapsed_time": "1:11:10", "remaining_time": "3:37:41"}
+{"current_steps": 880, "total_steps": 3531, "loss": 0.0081, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.2752220072804564e-05, "epoch": 0.75, "percentage": 24.92, "elapsed_time": "1:12:00", "remaining_time": "3:36:56"}
+{"current_steps": 890, "total_steps": 3531, "loss": 0.0131, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.259490428412335e-05, "epoch": 0.76, "percentage": 25.21, "elapsed_time": "1:12:43", "remaining_time": "3:35:47"}
+{"current_steps": 900, "total_steps": 3531, "loss": 0.0205, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.243619569673814e-05, "epoch": 0.76, "percentage": 25.49, "elapsed_time": "1:13:33", "remaining_time": "3:35:01"}
+{"current_steps": 910, "total_steps": 3531, "loss": 0.0026, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.2276106873895143e-05, "epoch": 0.77, "percentage": 25.77, "elapsed_time": "1:14:18", "remaining_time": "3:34:02"}
+{"current_steps": 920, "total_steps": 3531, "loss": 0.018, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.2114650488098936e-05, "epoch": 0.78, "percentage": 26.05, "elapsed_time": "1:15:02", "remaining_time": "3:32:59"}
+{"current_steps": 930, "total_steps": 3531, "loss": 0.0083, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.19518393201093e-05, "epoch": 0.79, "percentage": 26.34, "elapsed_time": "1:15:46", "remaining_time": "3:31:56"}
+{"current_steps": 940, "total_steps": 3531, "loss": 0.0291, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.178768625792949e-05, "epoch": 0.8, "percentage": 26.62, "elapsed_time": "1:16:35", "remaining_time": "3:31:06"}
+{"current_steps": 950, "total_steps": 3531, "loss": 0.0226, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.162220429578605e-05, "epoch": 0.81, "percentage": 26.9, "elapsed_time": "1:17:20", "remaining_time": "3:30:06"}
+{"current_steps": 960, "total_steps": 3531, "loss": 0.0042, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.145540653310018e-05, "epoch": 0.82, "percentage": 27.19, "elapsed_time": "1:18:15", "remaining_time": "3:29:34"}
+{"current_steps": 970, "total_steps": 3531, "loss": 0.0078, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.128730617345084e-05, "epoch": 0.82, "percentage": 27.47, "elapsed_time": "1:19:05", "remaining_time": "3:28:47"}
+{"current_steps": 980, "total_steps": 3531, "loss": 0.0084, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.111791652352952e-05, "epoch": 0.83, "percentage": 27.75, "elapsed_time": "1:19:54", "remaining_time": "3:28:00"}
+{"current_steps": 990, "total_steps": 3531, "loss": 0.0044, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.094725099208688e-05, "epoch": 0.84, "percentage": 28.04, "elapsed_time": "1:20:38", "remaining_time": "3:26:58"}
+{"current_steps": 1000, "total_steps": 3531, "loss": 0.0011, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.077532308887141e-05, "epoch": 0.85, "percentage": 28.32, "elapsed_time": "1:21:29", "remaining_time": "3:26:14"}
+{"current_steps": 1000, "total_steps": 3531, "loss": null, "eval_loss": 0.01175768580287695, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.85, "percentage": 28.32, "elapsed_time": "1:21:29", "remaining_time": "3:26:14"}
+{"current_steps": 1010, "total_steps": 3531, "loss": 0.0011, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.060214642355989e-05, "epoch": 0.86, "percentage": 28.6, "elapsed_time": "1:23:41", "remaining_time": "3:28:53"}
+{"current_steps": 1020, "total_steps": 3531, "loss": 0.021, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.042773470468016e-05, "epoch": 0.87, "percentage": 28.89, "elapsed_time": "1:24:23", "remaining_time": "3:27:43"}
+{"current_steps": 1030, "total_steps": 3531, "loss": 0.0424, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.0252101738525916e-05, "epoch": 0.88, "percentage": 29.17, "elapsed_time": "1:25:17", "remaining_time": "3:27:05"}
+{"current_steps": 1040, "total_steps": 3531, "loss": 0.0194, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.0075261428063806e-05, "epoch": 0.88, "percentage": 29.45, "elapsed_time": "1:26:08", "remaining_time": "3:26:18"}
+{"current_steps": 1050, "total_steps": 3531, "loss": 0.0025, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9897227771832924e-05, "epoch": 0.89, "percentage": 29.74, "elapsed_time": "1:26:56", "remaining_time": "3:25:25"}
+{"current_steps": 1060, "total_steps": 3531, "loss": 0.0044, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.971801486283665e-05, "epoch": 0.9, "percentage": 30.02, "elapsed_time": "1:27:46", "remaining_time": "3:24:36"}
+{"current_steps": 1070, "total_steps": 3531, "loss": 0.0051, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.953763688742708e-05, "epoch": 0.91, "percentage": 30.3, "elapsed_time": "1:28:37", "remaining_time": "3:23:50"}
+{"current_steps": 1080, "total_steps": 3531, "loss": 0.0071, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9356108124182067e-05, "epoch": 0.92, "percentage": 30.59, "elapsed_time": "1:29:21", "remaining_time": "3:22:48"}
+{"current_steps": 1090, "total_steps": 3531, "loss": 0.0145, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9173442942774885e-05, "epoch": 0.93, "percentage": 30.87, "elapsed_time": "1:30:12", "remaining_time": "3:22:00"}
+{"current_steps": 1100, "total_steps": 3531, "loss": 0.0371, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.898965580283681e-05, "epoch": 0.93, "percentage": 31.15, "elapsed_time": "1:30:55", "remaining_time": "3:20:55"}
+{"current_steps": 1110, "total_steps": 3531, "loss": 0.0076, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.880476125281244e-05, "epoch": 0.94, "percentage": 31.44, "elapsed_time": "1:31:47", "remaining_time": "3:20:13"}
+{"current_steps": 1120, "total_steps": 3531, "loss": 0.0035, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.861877392880808e-05, "epoch": 0.95, "percentage": 31.72, "elapsed_time": "1:32:42", "remaining_time": "3:19:34"}
+{"current_steps": 1130, "total_steps": 3531, "loss": 0.008, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.843170855343317e-05, "epoch": 0.96, "percentage": 32.0, "elapsed_time": "1:33:28", "remaining_time": "3:18:36"}
+{"current_steps": 1140, "total_steps": 3531, "loss": 0.0089, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.8243579934634846e-05, "epoch": 0.97, "percentage": 32.29, "elapsed_time": "1:34:16", "remaining_time": "3:17:44"}
+{"current_steps": 1150, "total_steps": 3531, "loss": 0.0034, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.805440296452574e-05, "epoch": 0.98, "percentage": 32.57, "elapsed_time": "1:35:03", "remaining_time": "3:16:48"}
+{"current_steps": 1160, "total_steps": 3531, "loss": 0.0019, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.786419261820514e-05, "epoch": 0.99, "percentage": 32.85, "elapsed_time": "1:35:53", "remaining_time": "3:15:59"}
+{"current_steps": 1170, "total_steps": 3531, "loss": 0.0164, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.7672963952573614e-05, "epoch": 0.99, "percentage": 33.14, "elapsed_time": "1:36:42", "remaining_time": "3:15:09"}
+{"current_steps": 1180, "total_steps": 3531, "loss": 0.0012, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.748073210514102e-05, "epoch": 1.0, "percentage": 33.42, "elapsed_time": "1:37:25", "remaining_time": "3:14:07"}
+{"current_steps": 1190, "total_steps": 3531, "loss": 0.0072, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.728751229282836e-05, "epoch": 1.01, "percentage": 33.7, "elapsed_time": "1:38:14", "remaining_time": "3:13:16"}
+{"current_steps": 1200, "total_steps": 3531, "loss": 0.0041, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.70933198107631e-05, "epoch": 1.02, "percentage": 33.98, "elapsed_time": "1:39:02", "remaining_time": "3:12:22"}
+{"current_steps": 1210, "total_steps": 3531, "loss": 0.0021, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.689817003106852e-05, "epoch": 1.03, "percentage": 34.27, "elapsed_time": "1:39:45", "remaining_time": "3:11:20"}
+{"current_steps": 1220, "total_steps": 3531, "loss": 0.0024, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.670207840164678e-05, "epoch": 1.04, "percentage": 34.55, "elapsed_time": "1:40:32", "remaining_time": "3:10:27"}
+{"current_steps": 1230, "total_steps": 3531, "loss": 0.0026, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.650506044495615e-05, "epoch": 1.05, "percentage": 34.83, "elapsed_time": "1:41:23", "remaining_time": "3:09:40"}
+{"current_steps": 1240, "total_steps": 3531, "loss": 0.005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.630713175678222e-05, "epoch": 1.05, "percentage": 35.12, "elapsed_time": "1:42:11", "remaining_time": "3:08:49"}
+{"current_steps": 1250, "total_steps": 3531, "loss": 0.0069, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.610830800500335e-05, "epoch": 1.06, "percentage": 35.4, "elapsed_time": "1:43:06", "remaining_time": "3:08:09"}
+{"current_steps": 1260, "total_steps": 3531, "loss": 0.0081, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.590860492835046e-05, "epoch": 1.07, "percentage": 35.68, "elapsed_time": "1:43:59", "remaining_time": "3:07:26"}
+{"current_steps": 1270, "total_steps": 3531, "loss": 0.0107, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5708038335161134e-05, "epoch": 1.08, "percentage": 35.97, "elapsed_time": "1:44:51", "remaining_time": "3:06:40"}
+{"current_steps": 1280, "total_steps": 3531, "loss": 0.0074, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.550662410212819e-05, "epoch": 1.09, "percentage": 36.25, "elapsed_time": "1:45:35", "remaining_time": "3:05:41"}
+{"current_steps": 1290, "total_steps": 3531, "loss": 0.0048, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5304378173043e-05, "epoch": 1.1, "percentage": 36.53, "elapsed_time": "1:46:22", "remaining_time": "3:04:46"}
+{"current_steps": 1300, "total_steps": 3531, "loss": 0.0006, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5101316557533294e-05, "epoch": 1.1, "percentage": 36.82, "elapsed_time": "1:47:13", "remaining_time": "3:04:00"}
+{"current_steps": 1310, "total_steps": 3531, "loss": 0.0076, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.489745532979593e-05, "epoch": 1.11, "percentage": 37.1, "elapsed_time": "1:47:59", "remaining_time": "3:03:05"}
+{"current_steps": 1320, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.469281062732442e-05, "epoch": 1.12, "percentage": 37.38, "elapsed_time": "1:48:52", "remaining_time": "3:02:21"}
+{"current_steps": 1330, "total_steps": 3531, "loss": 0.0073, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.448739864963154e-05, "epoch": 1.13, "percentage": 37.67, "elapsed_time": "1:49:37", "remaining_time": "3:01:25"}
+{"current_steps": 1340, "total_steps": 3531, "loss": 0.0008, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.4281235656966915e-05, "epoch": 1.14, "percentage": 37.95, "elapsed_time": "1:50:24", "remaining_time": "3:00:30"}
+{"current_steps": 1350, "total_steps": 3531, "loss": 0.001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.4074337969029965e-05, "epoch": 1.15, "percentage": 38.23, "elapsed_time": "1:51:09", "remaining_time": "2:59:35"}
+{"current_steps": 1360, "total_steps": 3531, "loss": 0.0047, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.386672196367799e-05, "epoch": 1.16, "percentage": 38.52, "elapsed_time": "1:52:06", "remaining_time": "2:58:58"}
+{"current_steps": 1370, "total_steps": 3531, "loss": 0.0131, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.365840407562974e-05, "epoch": 1.16, "percentage": 38.8, "elapsed_time": "1:52:52", "remaining_time": "2:58:02"}
+{"current_steps": 1380, "total_steps": 3531, "loss": 0.001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.3449400795164416e-05, "epoch": 1.17, "percentage": 39.08, "elapsed_time": "1:53:36", "remaining_time": "2:57:04"}
+{"current_steps": 1390, "total_steps": 3531, "loss": 0.0058, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.323972866681637e-05, "epoch": 1.18, "percentage": 39.37, "elapsed_time": "1:54:22", "remaining_time": "2:56:09"}
+{"current_steps": 1400, "total_steps": 3531, "loss": 0.0047, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.3029404288065426e-05, "epoch": 1.19, "percentage": 39.65, "elapsed_time": "1:55:10", "remaining_time": "2:55:18"}
+{"current_steps": 1410, "total_steps": 3531, "loss": 0.0029, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.2818444308023e-05, "epoch": 1.2, "percentage": 39.93, "elapsed_time": "1:55:56", "remaining_time": "2:54:24"}
+{"current_steps": 1420, "total_steps": 3531, "loss": 0.0073, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.2606865426114234e-05, "epoch": 1.21, "percentage": 40.22, "elapsed_time": "1:56:46", "remaining_time": "2:53:36"}
+{"current_steps": 1430, "total_steps": 3531, "loss": 0.0006, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.239468439075604e-05, "epoch": 1.21, "percentage": 40.5, "elapsed_time": "1:57:36", "remaining_time": "2:52:47"}
+{"current_steps": 1440, "total_steps": 3531, "loss": 0.0028, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.2181917998031326e-05, "epoch": 1.22, "percentage": 40.78, "elapsed_time": "1:58:17", "remaining_time": "2:51:46"}
+{"current_steps": 1450, "total_steps": 3531, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.196858309035941e-05, "epoch": 1.23, "percentage": 41.06, "elapsed_time": "1:59:09", "remaining_time": "2:51:00"}
+{"current_steps": 1460, "total_steps": 3531, "loss": 0.0007, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.175469655516284e-05, "epoch": 1.24, "percentage": 41.35, "elapsed_time": "1:59:58", "remaining_time": "2:50:10"}
+{"current_steps": 1470, "total_steps": 3531, "loss": 0.0037, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.154027532353052e-05, "epoch": 1.25, "percentage": 41.63, "elapsed_time": "2:00:51", "remaining_time": "2:49:27"}
+{"current_steps": 1480, "total_steps": 3531, "loss": 0.0065, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.132533636887753e-05, "epoch": 1.26, "percentage": 41.91, "elapsed_time": "2:01:40", "remaining_time": "2:48:37"}
+{"current_steps": 1490, "total_steps": 3531, "loss": 0.0092, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.1109896705601485e-05, "epoch": 1.27, "percentage": 42.2, "elapsed_time": "2:02:28", "remaining_time": "2:47:46"}
+{"current_steps": 1500, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0893973387735687e-05, "epoch": 1.27, "percentage": 42.48, "elapsed_time": "2:03:13", "remaining_time": "2:46:50"}
+{"current_steps": 1500, "total_steps": 3531, "loss": null, "eval_loss": 0.010954583063721657, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.27, "percentage": 42.48, "elapsed_time": "2:03:13", "remaining_time": "2:46:50"}
+{"current_steps": 1510, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.067758350759917e-05, "epoch": 1.28, "percentage": 42.76, "elapsed_time": "2:05:24", "remaining_time": "2:47:51"}
+{"current_steps": 1520, "total_steps": 3531, "loss": 0.0004, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.046074419444366e-05, "epoch": 1.29, "percentage": 43.05, "elapsed_time": "2:06:10", "remaining_time": "2:46:55"}
+{"current_steps": 1530, "total_steps": 3531, "loss": 0.001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0243472613097656e-05, "epoch": 1.3, "percentage": 43.33, "elapsed_time": "2:07:01", "remaining_time": "2:46:07"}
+{"current_steps": 1540, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.002578596260765e-05, "epoch": 1.31, "percentage": 43.61, "elapsed_time": "2:07:47", "remaining_time": "2:45:13"}
+{"current_steps": 1550, "total_steps": 3531, "loss": 0.0086, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.980770147487668e-05, "epoch": 1.32, "percentage": 43.9, "elapsed_time": "2:08:29", "remaining_time": "2:44:13"}
+{"current_steps": 1560, "total_steps": 3531, "loss": 0.0021, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.958923641330028e-05, "epoch": 1.33, "percentage": 44.18, "elapsed_time": "2:09:24", "remaining_time": "2:43:30"}
+{"current_steps": 1570, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9370408071399898e-05, "epoch": 1.33, "percentage": 44.46, "elapsed_time": "2:10:11", "remaining_time": "2:42:37"}
+{"current_steps": 1580, "total_steps": 3531, "loss": 0.0076, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9151233771453956e-05, "epoch": 1.34, "percentage": 44.75, "elapsed_time": "2:11:11", "remaining_time": "2:41:59"}
+{"current_steps": 1590, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8931730863126666e-05, "epoch": 1.35, "percentage": 45.03, "elapsed_time": "2:12:04", "remaining_time": "2:41:14"}
+{"current_steps": 1600, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.871191672209459e-05, "epoch": 1.36, "percentage": 45.31, "elapsed_time": "2:12:56", "remaining_time": "2:40:26"}
+{"current_steps": 1610, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8491808748671255e-05, "epoch": 1.37, "percentage": 45.6, "elapsed_time": "2:13:44", "remaining_time": "2:39:34"}
+{"current_steps": 1620, "total_steps": 3531, "loss": 0.0115, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8271424366429706e-05, "epoch": 1.38, "percentage": 45.88, "elapsed_time": "2:14:35", "remaining_time": "2:38:45"}
+{"current_steps": 1630, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8050781020823296e-05, "epoch": 1.38, "percentage": 46.16, "elapsed_time": "2:15:21", "remaining_time": "2:37:51"}
+{"current_steps": 1640, "total_steps": 3531, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7829896177804716e-05, "epoch": 1.39, "percentage": 46.45, "elapsed_time": "2:16:13", "remaining_time": "2:37:04"}
+{"current_steps": 1650, "total_steps": 3531, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.760878732244339e-05, "epoch": 1.4, "percentage": 46.73, "elapsed_time": "2:17:04", "remaining_time": "2:36:15"}
+{"current_steps": 1660, "total_steps": 3531, "loss": 0.0024, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7387471957541405e-05, "epoch": 1.41, "percentage": 47.01, "elapsed_time": "2:17:52", "remaining_time": "2:35:24"}
+{"current_steps": 1670, "total_steps": 3531, "loss": 0.0005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7165967602247964e-05, "epoch": 1.42, "percentage": 47.3, "elapsed_time": "2:18:40", "remaining_time": "2:34:31"}
+{"current_steps": 1680, "total_steps": 3531, "loss": 0.0018, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.694429179067261e-05, "epoch": 1.43, "percentage": 47.58, "elapsed_time": "2:19:25", "remaining_time": "2:33:37"}
+{"current_steps": 1690, "total_steps": 3531, "loss": 0.002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6744651468034758e-05, "epoch": 1.44, "percentage": 47.86, "elapsed_time": "2:20:14", "remaining_time": "2:32:46"}
+{"current_steps": 1700, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6522698243485527e-05, "epoch": 1.44, "percentage": 48.15, "elapsed_time": "2:20:59", "remaining_time": "2:31:51"}
+{"current_steps": 1710, "total_steps": 3531, "loss": 0.0058, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6300624483347926e-05, "epoch": 1.45, "percentage": 48.43, "elapsed_time": "2:21:44", "remaining_time": "2:30:56"}
+{"current_steps": 1720, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.607844776680513e-05, "epoch": 1.46, "percentage": 48.71, "elapsed_time": "2:22:34", "remaining_time": "2:30:07"}
+{"current_steps": 1730, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.585618568119027e-05, "epoch": 1.47, "percentage": 48.99, "elapsed_time": "2:23:21", "remaining_time": "2:29:14"}
+{"current_steps": 1740, "total_steps": 3531, "loss": 0.0008, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.56338558205942e-05, "epoch": 1.48, "percentage": 49.28, "elapsed_time": "2:24:08", "remaining_time": "2:28:21"}
+{"current_steps": 1750, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5411475784472805e-05, "epoch": 1.49, "percentage": 49.56, "elapsed_time": "2:24:58", "remaining_time": "2:27:32"}
+{"current_steps": 1760, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5189063176253825e-05, "epoch": 1.5, "percentage": 49.84, "elapsed_time": "2:25:41", "remaining_time": "2:26:36"}
+{"current_steps": 1770, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.496663560194338e-05, "epoch": 1.5, "percentage": 50.13, "elapsed_time": "2:26:28", "remaining_time": "2:25:43"}
+{"current_steps": 1780, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4744210668732295e-05, "epoch": 1.51, "percentage": 50.41, "elapsed_time": "2:27:14", "remaining_time": "2:24:50"}
+{"current_steps": 1790, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.452180598360232e-05, "epoch": 1.52, "percentage": 50.69, "elapsed_time": "2:28:01", "remaining_time": "2:23:58"}
+{"current_steps": 1800, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.429943915193239e-05, "epoch": 1.53, "percentage": 50.98, "elapsed_time": "2:28:52", "remaining_time": "2:23:10"}
+{"current_steps": 1810, "total_steps": 3531, "loss": 0.0146, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4077127776104984e-05, "epoch": 1.54, "percentage": 51.26, "elapsed_time": "2:29:40", "remaining_time": "2:22:18"}
+{"current_steps": 1820, "total_steps": 3531, "loss": 0.0017, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3854889454112748e-05, "epoch": 1.55, "percentage": 51.54, "elapsed_time": "2:30:26", "remaining_time": "2:21:25"}
+{"current_steps": 1830, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3632741778165442e-05, "epoch": 1.55, "percentage": 51.83, "elapsed_time": "2:31:09", "remaining_time": "2:20:30"}
+{"current_steps": 1840, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3410702333297356e-05, "epoch": 1.56, "percentage": 52.11, "elapsed_time": "2:31:58", "remaining_time": "2:19:40"}
+{"current_steps": 1850, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.318878869597528e-05, "epoch": 1.57, "percentage": 52.39, "elapsed_time": "2:32:47", "remaining_time": "2:18:49"}
+{"current_steps": 1860, "total_steps": 3531, "loss": 0.0073, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2967018432707213e-05, "epoch": 1.58, "percentage": 52.68, "elapsed_time": "2:33:35", "remaining_time": "2:17:59"}
+{"current_steps": 1870, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2745409098651744e-05, "epoch": 1.59, "percentage": 52.96, "elapsed_time": "2:34:20", "remaining_time": "2:17:05"}
+{"current_steps": 1880, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2523978236228442e-05, "epoch": 1.6, "percentage": 53.24, "elapsed_time": "2:35:06", "remaining_time": "2:16:12"}
+{"current_steps": 1890, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2302743373729205e-05, "epoch": 1.61, "percentage": 53.53, "elapsed_time": "2:35:55", "remaining_time": "2:15:22"}
+{"current_steps": 1900, "total_steps": 3531, "loss": 0.0136, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2081722023930743e-05, "epoch": 1.61, "percentage": 53.81, "elapsed_time": "2:36:42", "remaining_time": "2:14:31"}
+{"current_steps": 1910, "total_steps": 3531, "loss": 0.0051, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1860931682708248e-05, "epoch": 1.62, "percentage": 54.09, "elapsed_time": "2:37:31", "remaining_time": "2:13:41"}
+{"current_steps": 1920, "total_steps": 3531, "loss": 0.0004, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.164038982765047e-05, "epoch": 1.63, "percentage": 54.38, "elapsed_time": "2:38:20", "remaining_time": "2:12:51"}
+{"current_steps": 1930, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1420113916676183e-05, "epoch": 1.64, "percentage": 54.66, "elapsed_time": "2:39:08", "remaining_time": "2:12:00"}
+{"current_steps": 1940, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1200121386652246e-05, "epoch": 1.65, "percentage": 54.94, "elapsed_time": "2:39:54", "remaining_time": "2:11:08"}
+{"current_steps": 1950, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0980429652013297e-05, "epoch": 1.66, "percentage": 55.23, "elapsed_time": "2:40:39", "remaining_time": "2:10:15"}
+{"current_steps": 1960, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0761056103383258e-05, "epoch": 1.67, "percentage": 55.51, "elapsed_time": "2:41:20", "remaining_time": "2:09:19"}
+{"current_steps": 1970, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0542018106198697e-05, "epoch": 1.67, "percentage": 55.79, "elapsed_time": "2:42:03", "remaining_time": "2:08:24"}
+{"current_steps": 1980, "total_steps": 3531, "loss": 0.005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0323332999334198e-05, "epoch": 1.68, "percentage": 56.07, "elapsed_time": "2:42:44", "remaining_time": "2:07:29"}
+{"current_steps": 1990, "total_steps": 3531, "loss": 0.0149, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.010501809372981e-05, "epoch": 1.69, "percentage": 56.36, "elapsed_time": "2:43:46", "remaining_time": "2:06:49"}
+{"current_steps": 2000, "total_steps": 3531, "loss": 0.0143, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.988709067102076e-05, "epoch": 1.7, "percentage": 56.64, "elapsed_time": "2:44:34", "remaining_time": "2:05:58"}
+{"current_steps": 2000, "total_steps": 3531, "loss": null, "eval_loss": 0.013543435372412205, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.7, "percentage": 56.64, "elapsed_time": "2:44:34", "remaining_time": "2:05:58"}
+{"current_steps": 2010, "total_steps": 3531, "loss": 0.0017, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.966956798216943e-05, "epoch": 1.71, "percentage": 56.92, "elapsed_time": "2:46:50", "remaining_time": "2:06:14"}
+{"current_steps": 2020, "total_steps": 3531, "loss": 0.0038, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.945246724609978e-05, "epoch": 1.72, "percentage": 57.21, "elapsed_time": "2:47:34", "remaining_time": "2:05:20"}
+{"current_steps": 2030, "total_steps": 3531, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9235805648334342e-05, "epoch": 1.72, "percentage": 57.49, "elapsed_time": "2:48:23", "remaining_time": "2:04:30"}
+{"current_steps": 2040, "total_steps": 3531, "loss": 0.0005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9019600339633798e-05, "epoch": 1.73, "percentage": 57.77, "elapsed_time": "2:49:08", "remaining_time": "2:03:37"}
+{"current_steps": 2050, "total_steps": 3531, "loss": 0.0005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8803868434639345e-05, "epoch": 1.74, "percentage": 58.06, "elapsed_time": "2:49:50", "remaining_time": "2:02:42"}
+{"current_steps": 2060, "total_steps": 3531, "loss": 0.0009, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.858862701051791e-05, "epoch": 1.75, "percentage": 58.34, "elapsed_time": "2:50:42", "remaining_time": "2:01:54"}
+{"current_steps": 2070, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8373893105610356e-05, "epoch": 1.76, "percentage": 58.62, "elapsed_time": "2:51:24", "remaining_time": "2:00:58"}
+{"current_steps": 2080, "total_steps": 3531, "loss": 0.0036, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.815968371808273e-05, "epoch": 1.77, "percentage": 58.91, "elapsed_time": "2:52:07", "remaining_time": "2:00:04"}
+{"current_steps": 2090, "total_steps": 3531, "loss": 0.0138, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7946015804580688e-05, "epoch": 1.78, "percentage": 59.19, "elapsed_time": "2:52:59", "remaining_time": "1:59:16"}
+{"current_steps": 2100, "total_steps": 3531, "loss": 0.0005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7732906278887225e-05, "epoch": 1.78, "percentage": 59.47, "elapsed_time": "2:53:55", "remaining_time": "1:58:30"}
+{"current_steps": 2110, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7520372010583815e-05, "epoch": 1.79, "percentage": 59.76, "elapsed_time": "2:54:47", "remaining_time": "1:57:43"}
+{"current_steps": 2120, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7308429823714995e-05, "epoch": 1.8, "percentage": 60.04, "elapsed_time": "2:55:30", "remaining_time": "1:56:48"}
+{"current_steps": 2130, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.709709649545662e-05, "epoch": 1.81, "percentage": 60.32, "elapsed_time": "2:56:15", "remaining_time": "1:55:55"}
+{"current_steps": 2140, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.688638875478777e-05, "epoch": 1.82, "percentage": 60.61, "elapsed_time": "2:57:03", "remaining_time": "1:55:05"}
+{"current_steps": 2150, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.66763232811665e-05, "epoch": 1.83, "percentage": 60.89, "elapsed_time": "2:57:49", "remaining_time": "1:54:13"}
+{"current_steps": 2160, "total_steps": 3531, "loss": 0.012, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6466916703209535e-05, "epoch": 1.84, "percentage": 61.17, "elapsed_time": "2:58:32", "remaining_time": "1:53:19"}
+{"current_steps": 2170, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.625818559737592e-05, "epoch": 1.84, "percentage": 61.46, "elapsed_time": "2:59:20", "remaining_time": "1:52:29"}
+{"current_steps": 2180, "total_steps": 3531, "loss": 0.0005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.605014648665486e-05, "epoch": 1.85, "percentage": 61.74, "elapsed_time": "3:00:11", "remaining_time": "1:51:40"}
+{"current_steps": 2190, "total_steps": 3531, "loss": 0.0047, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.584281583925779e-05, "epoch": 1.86, "percentage": 62.02, "elapsed_time": "3:00:56", "remaining_time": "1:50:47"}
+{"current_steps": 2200, "total_steps": 3531, "loss": 0.0126, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5636210067314744e-05, "epoch": 1.87, "percentage": 62.31, "elapsed_time": "3:01:41", "remaining_time": "1:49:55"}
+{"current_steps": 2210, "total_steps": 3531, "loss": 0.0015, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5430345525575186e-05, "epoch": 1.88, "percentage": 62.59, "elapsed_time": "3:02:31", "remaining_time": "1:49:06"}
+{"current_steps": 2220, "total_steps": 3531, "loss": 0.0018, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5225238510113377e-05, "epoch": 1.89, "percentage": 62.87, "elapsed_time": "3:03:17", "remaining_time": "1:48:14"}
+{"current_steps": 2230, "total_steps": 3531, "loss": 0.0057, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5020905257038403e-05, "epoch": 1.89, "percentage": 63.15, "elapsed_time": "3:04:14", "remaining_time": "1:47:29"}
+{"current_steps": 2240, "total_steps": 3531, "loss": 0.0036, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.481736194120894e-05, "epoch": 1.9, "percentage": 63.44, "elapsed_time": "3:04:58", "remaining_time": "1:46:36"}
+{"current_steps": 2250, "total_steps": 3531, "loss": 0.0006, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4614624674952842e-05, "epoch": 1.91, "percentage": 63.72, "elapsed_time": "3:05:56", "remaining_time": "1:45:52"}
+{"current_steps": 2260, "total_steps": 3531, "loss": 0.0054, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4412709506791725e-05, "epoch": 1.92, "percentage": 64.0, "elapsed_time": "3:06:46", "remaining_time": "1:45:02"}
+{"current_steps": 2270, "total_steps": 3531, "loss": 0.0039, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4211632420170558e-05, "epoch": 1.93, "percentage": 64.29, "elapsed_time": "3:07:38", "remaining_time": "1:44:14"}
+{"current_steps": 2280, "total_steps": 3531, "loss": 0.0017, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4011409332192472e-05, "epoch": 1.94, "percentage": 64.57, "elapsed_time": "3:08:26", "remaining_time": "1:43:23"}
+{"current_steps": 2290, "total_steps": 3531, "loss": 0.0181, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3812056092358686e-05, "epoch": 1.95, "percentage": 64.85, "elapsed_time": "3:09:13", "remaining_time": "1:42:32"}
+{"current_steps": 2300, "total_steps": 3531, "loss": 0.0035, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3613588481313977e-05, "epoch": 1.95, "percentage": 65.14, "elapsed_time": "3:10:01", "remaining_time": "1:41:42"}
+{"current_steps": 2310, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3416022209597429e-05, "epoch": 1.96, "percentage": 65.42, "elapsed_time": "3:10:49", "remaining_time": "1:40:51"}
+{"current_steps": 2320, "total_steps": 3531, "loss": 0.0005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3219372916398826e-05, "epoch": 1.97, "percentage": 65.7, "elapsed_time": "3:11:36", "remaining_time": "1:40:00"}
+{"current_steps": 2330, "total_steps": 3531, "loss": 0.0012, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.302365616832063e-05, "epoch": 1.98, "percentage": 65.99, "elapsed_time": "3:12:23", "remaining_time": "1:39:10"}
+{"current_steps": 2340, "total_steps": 3531, "loss": 0.0052, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2828887458145806e-05, "epoch": 1.99, "percentage": 66.27, "elapsed_time": "3:13:16", "remaining_time": "1:38:22"}
+{"current_steps": 2350, "total_steps": 3531, "loss": 0.0008, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2635082203611375e-05, "epoch": 2.0, "percentage": 66.55, "elapsed_time": "3:14:02", "remaining_time": "1:37:31"}
+{"current_steps": 2360, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2442255746187954e-05, "epoch": 2.01, "percentage": 66.84, "elapsed_time": "3:14:42", "remaining_time": "1:36:36"}
+{"current_steps": 2370, "total_steps": 3531, "loss": 0.0009, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2250423349865387e-05, "epoch": 2.01, "percentage": 67.12, "elapsed_time": "3:15:30", "remaining_time": "1:35:46"}
+{"current_steps": 2380, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2059600199944388e-05, "epoch": 2.02, "percentage": 67.4, "elapsed_time": "3:16:18", "remaining_time": "1:34:56"}
+{"current_steps": 2390, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1869801401834564e-05, "epoch": 2.03, "percentage": 67.69, "elapsed_time": "3:17:04", "remaining_time": "1:34:04"}
+{"current_steps": 2400, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1681041979858626e-05, "epoch": 2.04, "percentage": 67.97, "elapsed_time": "3:17:46", "remaining_time": "1:33:12"}
+{"current_steps": 2410, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1493336876063071e-05, "epoch": 2.05, "percentage": 68.25, "elapsed_time": "3:18:28", "remaining_time": "1:32:19"}
+{"current_steps": 2420, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1306700949035462e-05, "epoch": 2.06, "percentage": 68.54, "elapsed_time": "3:19:11", "remaining_time": "1:31:26"}
+{"current_steps": 2430, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1121148972728104e-05, "epoch": 2.06, "percentage": 68.82, "elapsed_time": "3:20:00", "remaining_time": "1:30:37"}
+{"current_steps": 2440, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0936695635288674e-05, "epoch": 2.07, "percentage": 69.1, "elapsed_time": "3:20:45", "remaining_time": "1:29:46"}
+{"current_steps": 2450, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0753355537897427e-05, "epoch": 2.08, "percentage": 69.39, "elapsed_time": "3:21:37", "remaining_time": "1:28:57"}
+{"current_steps": 2460, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0571143193611444e-05, "epoch": 2.09, "percentage": 69.67, "elapsed_time": "3:22:27", "remaining_time": "1:28:08"}
+{"current_steps": 2470, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.039007302621576e-05, "epoch": 2.1, "percentage": 69.95, "elapsed_time": "3:23:12", "remaining_time": "1:27:17"}
+{"current_steps": 2480, "total_steps": 3531, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0210159369081568e-05, "epoch": 2.11, "percentage": 70.24, "elapsed_time": "3:24:04", "remaining_time": "1:26:29"}
+{"current_steps": 2490, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0031416464031654e-05, "epoch": 2.12, "percentage": 70.52, "elapsed_time": "3:24:52", "remaining_time": "1:25:38"}
+{"current_steps": 2500, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.853858460212962e-06, "epoch": 2.12, "percentage": 70.8, "elapsed_time": "3:25:44", "remaining_time": "1:24:50"}
+{"current_steps": 2500, "total_steps": 3531, "loss": null, "eval_loss": 0.012909023091197014, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 2.12, "percentage": 70.8, "elapsed_time": "3:25:44", "remaining_time": "1:24:50"}
+{"current_steps": 2510, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.677499412976632e-06, "epoch": 2.13, "percentage": 71.08, "elapsed_time": "3:27:55", "remaining_time": "1:24:34"}
+{"current_steps": 2520, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.502353282765306e-06, "epoch": 2.14, "percentage": 71.37, "elapsed_time": "3:28:45", "remaining_time": "1:23:45"}
+{"current_steps": 2530, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.328433934008107e-06, "epoch": 2.15, "percentage": 71.65, "elapsed_time": "3:29:37", "remaining_time": "1:22:56"}
+{"current_steps": 2540, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.155755134023097e-06, "epoch": 2.16, "percentage": 71.93, "elapsed_time": "3:30:22", "remaining_time": "1:22:04"}
+{"current_steps": 2550, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.984330551927475e-06, "epoch": 2.17, "percentage": 72.22, "elapsed_time": "3:31:15", "remaining_time": "1:21:16"}
+{"current_steps": 2560, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.81417375755556e-06, "epoch": 2.18, "percentage": 72.5, "elapsed_time": "3:32:03", "remaining_time": "1:20:26"}
+{"current_steps": 2570, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.645298220384567e-06, "epoch": 2.18, "percentage": 72.78, "elapsed_time": "3:32:53", "remaining_time": "1:19:36"}
+{"current_steps": 2580, "total_steps": 3531, "loss": 0.0008, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.477717308468442e-06, "epoch": 2.19, "percentage": 73.07, "elapsed_time": "3:33:38", "remaining_time": "1:18:45"}
+{"current_steps": 2590, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.31144428737958e-06, "epoch": 2.2, "percentage": 73.35, "elapsed_time": "3:34:33", "remaining_time": "1:17:57"}
+{"current_steps": 2600, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.146492319158805e-06, "epoch": 2.21, "percentage": 73.63, "elapsed_time": "3:35:24", "remaining_time": "1:17:08"}
+{"current_steps": 2610, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.982874461273438e-06, "epoch": 2.22, "percentage": 73.92, "elapsed_time": "3:36:10", "remaining_time": "1:16:16"}
+{"current_steps": 2620, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.820603665583654e-06, "epoch": 2.23, "percentage": 74.2, "elapsed_time": "3:36:52", "remaining_time": "1:15:24"}
+{"current_steps": 2630, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.659692777317288e-06, "epoch": 2.23, "percentage": 74.48, "elapsed_time": "3:37:41", "remaining_time": "1:14:34"}
+{"current_steps": 2640, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.500154534052933e-06, "epoch": 2.24, "percentage": 74.77, "elapsed_time": "3:38:26", "remaining_time": "1:13:43"}
+{"current_steps": 2650, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.342001564711756e-06, "epoch": 2.25, "percentage": 75.05, "elapsed_time": "3:39:20", "remaining_time": "1:12:55"}
+{"current_steps": 2660, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.185246388557665e-06, "epoch": 2.26, "percentage": 75.33, "elapsed_time": "3:40:10", "remaining_time": "1:12:05"}
+{"current_steps": 2670, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.0299014142064106e-06, "epoch": 2.27, "percentage": 75.62, "elapsed_time": "3:40:57", "remaining_time": "1:11:15"}
+{"current_steps": 2680, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.875978938643277e-06, "epoch": 2.28, "percentage": 75.9, "elapsed_time": "3:41:52", "remaining_time": "1:10:27"}
+{"current_steps": 2690, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.723491146249647e-06, "epoch": 2.29, "percentage": 76.18, "elapsed_time": "3:42:34", "remaining_time": "1:09:35"}
+{"current_steps": 2700, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.572450107838551e-06, "epoch": 2.29, "percentage": 76.47, "elapsed_time": "3:43:23", "remaining_time": "1:08:45"}
+{"current_steps": 2710, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.422867779699088e-06, "epoch": 2.3, "percentage": 76.75, "elapsed_time": "3:44:05", "remaining_time": "1:07:53"}
+{"current_steps": 2720, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.274756002650034e-06, "epoch": 2.31, "percentage": 77.03, "elapsed_time": "3:44:50", "remaining_time": "1:07:02"}
+{"current_steps": 2730, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.128126501102479e-06, "epoch": 2.32, "percentage": 77.32, "elapsed_time": "3:45:42", "remaining_time": "1:06:13"}
+{"current_steps": 2740, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.982990882131775e-06, "epoch": 2.33, "percentage": 77.6, "elapsed_time": "3:46:27", "remaining_time": "1:05:22"}
+{"current_steps": 2750, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.83936063455871e-06, "epoch": 2.34, "percentage": 77.88, "elapsed_time": "3:47:12", "remaining_time": "1:04:31"}
+{"current_steps": 2760, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.697247128040037e-06, "epoch": 2.34, "percentage": 78.16, "elapsed_time": "3:47:54", "remaining_time": "1:03:39"}
+{"current_steps": 2770, "total_steps": 3531, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.556661612168537e-06, "epoch": 2.35, "percentage": 78.45, "elapsed_time": "3:48:41", "remaining_time": "1:02:49"}
+{"current_steps": 2780, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.417615215582408e-06, "epoch": 2.36, "percentage": 78.73, "elapsed_time": "3:49:41", "remaining_time": "1:02:02"}
+{"current_steps": 2790, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.280118945084422e-06, "epoch": 2.37, "percentage": 79.01, "elapsed_time": "3:50:25", "remaining_time": "1:01:12"}
+{"current_steps": 2800, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.144183684770565e-06, "epoch": 2.38, "percentage": 79.3, "elapsed_time": "3:51:18", "remaining_time": "1:00:23"}
+{"current_steps": 2810, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.00982019516851e-06, "epoch": 2.39, "percentage": 79.58, "elapsed_time": "3:52:07", "remaining_time": "0:59:33"}
+{"current_steps": 2820, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.877039112385815e-06, "epoch": 2.4, "percentage": 79.86, "elapsed_time": "3:52:53", "remaining_time": "0:58:43"}
+{"current_steps": 2830, "total_steps": 3531, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.74585094726793e-06, "epoch": 2.4, "percentage": 80.15, "elapsed_time": "3:53:40", "remaining_time": "0:57:53"}
+{"current_steps": 2840, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.616266084566243e-06, "epoch": 2.41, "percentage": 80.43, "elapsed_time": "3:54:29", "remaining_time": "0:57:03"}
+{"current_steps": 2850, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.488294782115957e-06, "epoch": 2.42, "percentage": 80.71, "elapsed_time": "3:55:14", "remaining_time": "0:56:12"}
+{"current_steps": 2860, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.361947170024144e-06, "epoch": 2.43, "percentage": 81.0, "elapsed_time": "3:55:58", "remaining_time": "0:55:21"}
+{"current_steps": 2870, "total_steps": 3531, "loss": 0.0007, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.2372332498678256e-06, "epoch": 2.44, "percentage": 81.28, "elapsed_time": "3:56:48", "remaining_time": "0:54:32"}
+{"current_steps": 2880, "total_steps": 3531, "loss": 0.0061, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.11416289390226e-06, "epoch": 2.45, "percentage": 81.56, "elapsed_time": "3:57:34", "remaining_time": "0:53:42"}
+{"current_steps": 2890, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.992745844279475e-06, "epoch": 2.46, "percentage": 81.85, "elapsed_time": "3:58:24", "remaining_time": "0:52:52"}
+{"current_steps": 2900, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.872991712277052e-06, "epoch": 2.46, "percentage": 82.13, "elapsed_time": "3:59:17", "remaining_time": "0:52:04"}
+{"current_steps": 2910, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.7549099775373576e-06, "epoch": 2.47, "percentage": 82.41, "elapsed_time": "4:00:03", "remaining_time": "0:51:13"}
+{"current_steps": 2920, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.6385099873170875e-06, "epoch": 2.48, "percentage": 82.7, "elapsed_time": "4:00:53", "remaining_time": "0:50:24"}
+{"current_steps": 2930, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5238009557473946e-06, "epoch": 2.49, "percentage": 82.98, "elapsed_time": "4:01:40", "remaining_time": "0:49:34"}
+{"current_steps": 2940, "total_steps": 3531, "loss": 0.0028, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.4107919631044732e-06, "epoch": 2.5, "percentage": 83.26, "elapsed_time": "4:02:24", "remaining_time": "0:48:43"}
+{"current_steps": 2950, "total_steps": 3531, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.299491955090775e-06, "epoch": 2.51, "percentage": 83.55, "elapsed_time": "4:03:11", "remaining_time": "0:47:53"}
+{"current_steps": 2960, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.1899097421268924e-06, "epoch": 2.51, "percentage": 83.83, "elapsed_time": "4:03:55", "remaining_time": "0:47:03"}
+{"current_steps": 2970, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0820539986541054e-06, "epoch": 2.52, "percentage": 84.11, "elapsed_time": "4:04:54", "remaining_time": "0:46:15"}
+{"current_steps": 2980, "total_steps": 3531, "loss": 0.0013, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.97593326244775e-06, "epoch": 2.53, "percentage": 84.4, "elapsed_time": "4:05:45", "remaining_time": "0:45:26"}
+{"current_steps": 2990, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.871555933941353e-06, "epoch": 2.54, "percentage": 84.68, "elapsed_time": "4:06:33", "remaining_time": "0:44:36"}
+{"current_steps": 3000, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7689302755616736e-06, "epoch": 2.55, "percentage": 84.96, "elapsed_time": "4:07:27", "remaining_time": "0:43:47"}
+{"current_steps": 3000, "total_steps": 3531, "loss": null, "eval_loss": 0.014460938051342964, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 2.55, "percentage": 84.96, "elapsed_time": "4:07:27", "remaining_time": "0:43:47"}
+{"current_steps": 3010, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6680644110746305e-06, "epoch": 2.56, "percentage": 85.24, "elapsed_time": "4:09:41", "remaining_time": "0:43:13"}
+{"current_steps": 3020, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.568966324942268e-06, "epoch": 2.57, "percentage": 85.53, "elapsed_time": "4:10:32", "remaining_time": "0:42:23"}
+{"current_steps": 3030, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4716438616906977e-06, "epoch": 2.57, "percentage": 85.81, "elapsed_time": "4:11:22", "remaining_time": "0:41:33"}
+{"current_steps": 3040, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.376104725289105e-06, "epoch": 2.58, "percentage": 86.09, "elapsed_time": "4:12:05", "remaining_time": "0:40:43"}
+{"current_steps": 3050, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2823564785399596e-06, "epoch": 2.59, "percentage": 86.38, "elapsed_time": "4:13:01", "remaining_time": "0:39:54"}
+{"current_steps": 3060, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1904065424803e-06, "epoch": 2.6, "percentage": 86.66, "elapsed_time": "4:13:47", "remaining_time": "0:39:03"}
+{"current_steps": 3070, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1002621957943308e-06, "epoch": 2.61, "percentage": 86.94, "elapsed_time": "4:14:42", "remaining_time": "0:38:14"}
+{"current_steps": 3080, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.011930574237228e-06, "epoch": 2.62, "percentage": 87.23, "elapsed_time": "4:15:32", "remaining_time": "0:37:25"}
+{"current_steps": 3090, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.925418670070267e-06, "epoch": 2.63, "percentage": 87.51, "elapsed_time": "4:16:16", "remaining_time": "0:36:34"}
+{"current_steps": 3100, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8407333315073466e-06, "epoch": 2.63, "percentage": 87.79, "elapsed_time": "4:17:03", "remaining_time": "0:35:44"}
+{"current_steps": 3110, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7578812621728751e-06, "epoch": 2.64, "percentage": 88.08, "elapsed_time": "4:17:50", "remaining_time": "0:34:54"}
+{"current_steps": 3120, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6768690205711173e-06, "epoch": 2.65, "percentage": 88.36, "elapsed_time": "4:18:35", "remaining_time": "0:34:03"}
+{"current_steps": 3130, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5977030195670289e-06, "epoch": 2.66, "percentage": 88.64, "elapsed_time": "4:19:22", "remaining_time": "0:33:13"}
+{"current_steps": 3140, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5203895258786238e-06, "epoch": 2.67, "percentage": 88.93, "elapsed_time": "4:20:10", "remaining_time": "0:32:23"}
+{"current_steps": 3150, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4449346595809015e-06, "epoch": 2.68, "percentage": 89.21, "elapsed_time": "4:20:53", "remaining_time": "0:31:33"}
+{"current_steps": 3160, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3713443936213822e-06, "epoch": 2.68, "percentage": 89.49, "elapsed_time": "4:21:42", "remaining_time": "0:30:43"}
+{"current_steps": 3170, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.299624553347309e-06, "epoch": 2.69, "percentage": 89.78, "elapsed_time": "4:22:25", "remaining_time": "0:29:53"}
+{"current_steps": 3180, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2297808160444929e-06, "epoch": 2.7, "percentage": 90.06, "elapsed_time": "4:23:19", "remaining_time": "0:29:03"}
+{"current_steps": 3190, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.161818710487933e-06, "epoch": 2.71, "percentage": 90.34, "elapsed_time": "4:24:05", "remaining_time": "0:28:13"}
+{"current_steps": 3200, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.095743616504144e-06, "epoch": 2.72, "percentage": 90.63, "elapsed_time": "4:24:49", "remaining_time": "0:27:23"}
+{"current_steps": 3210, "total_steps": 3531, "loss": 0.0006, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0315607645452836e-06, "epoch": 2.73, "percentage": 90.91, "elapsed_time": "4:25:36", "remaining_time": "0:26:33"}
+{"current_steps": 3220, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.692752352751466e-07, "epoch": 2.74, "percentage": 91.19, "elapsed_time": "4:26:24", "remaining_time": "0:25:43"}
+{"current_steps": 3230, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.088919591669548e-07, "epoch": 2.74, "percentage": 91.48, "elapsed_time": "4:27:12", "remaining_time": "0:24:54"}
+{"current_steps": 3240, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.504157161130788e-07, "epoch": 2.75, "percentage": 91.76, "elapsed_time": "4:27:58", "remaining_time": "0:24:04"}
+{"current_steps": 3250, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.938511350466527e-07, "epoch": 2.76, "percentage": 92.04, "elapsed_time": "4:28:44", "remaining_time": "0:23:14"}
+{"current_steps": 3260, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.39202693575175e-07, "epoch": 2.77, "percentage": 92.33, "elapsed_time": "4:29:27", "remaining_time": "0:22:23"}
+{"current_steps": 3270, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.864747176260289e-07, "epoch": 2.78, "percentage": 92.61, "elapsed_time": "4:30:13", "remaining_time": "0:21:34"}
+{"current_steps": 3280, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.356713811040888e-07, "epoch": 2.79, "percentage": 92.89, "elapsed_time": "4:30:57", "remaining_time": "0:20:44"}
+{"current_steps": 3290, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.867967055612794e-07, "epoch": 2.8, "percentage": 93.17, "elapsed_time": "4:31:45", "remaining_time": "0:19:54"}
+{"current_steps": 3300, "total_steps": 3531, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.398545598782528e-07, "epoch": 2.8, "percentage": 93.46, "elapsed_time": "4:32:27", "remaining_time": "0:19:04"}
+{"current_steps": 3310, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.948486599581276e-07, "epoch": 2.81, "percentage": 93.74, "elapsed_time": "4:33:19", "remaining_time": "0:18:14"}
+{"current_steps": 3320, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.517825684323324e-07, "epoch": 2.82, "percentage": 94.02, "elapsed_time": "4:34:17", "remaining_time": "0:17:25"}
+{"current_steps": 3330, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.1065969437860954e-07, "epoch": 2.83, "percentage": 94.31, "elapsed_time": "4:35:05", "remaining_time": "0:16:36"}
+{"current_steps": 3340, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.714832930511336e-07, "epoch": 2.84, "percentage": 94.59, "elapsed_time": "4:35:51", "remaining_time": "0:15:46"}
+{"current_steps": 3350, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.342564656228453e-07, "epoch": 2.85, "percentage": 94.87, "elapsed_time": "4:36:36", "remaining_time": "0:14:56"}
+{"current_steps": 3360, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9898215893995054e-07, "epoch": 2.85, "percentage": 95.16, "elapsed_time": "4:37:31", "remaining_time": "0:14:07"}
+{"current_steps": 3370, "total_steps": 3531, "loss": 0.0052, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6566316528866264e-07, "epoch": 2.86, "percentage": 95.44, "elapsed_time": "4:38:14", "remaining_time": "0:13:17"}
+{"current_steps": 3380, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3430212217415982e-07, "epoch": 2.87, "percentage": 95.72, "elapsed_time": "4:38:59", "remaining_time": "0:12:27"}
+{"current_steps": 3390, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0490151211180752e-07, "epoch": 2.88, "percentage": 96.01, "elapsed_time": "4:39:52", "remaining_time": "0:11:38"}
+{"current_steps": 3400, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7746366243063806e-07, "epoch": 2.89, "percentage": 96.29, "elapsed_time": "4:40:40", "remaining_time": "0:10:48"}
+{"current_steps": 3410, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5199074508912836e-07, "epoch": 2.9, "percentage": 96.57, "elapsed_time": "4:41:31", "remaining_time": "0:09:59"}
+{"current_steps": 3420, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2848477650325984e-07, "epoch": 2.91, "percentage": 96.86, "elapsed_time": "4:42:20", "remaining_time": "0:09:09"}
+{"current_steps": 3430, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.069476173869155e-07, "epoch": 2.91, "percentage": 97.14, "elapsed_time": "4:43:11", "remaining_time": "0:08:20"}
+{"current_steps": 3440, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.738097260456713e-08, "epoch": 2.92, "percentage": 97.42, "elapsed_time": "4:43:55", "remaining_time": "0:07:30"}
+{"current_steps": 3450, "total_steps": 3531, "loss": 0.0042, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.978639103634444e-08, "epoch": 2.93, "percentage": 97.71, "elapsed_time": "4:44:49", "remaining_time": "0:06:41"}
+{"current_steps": 3460, "total_steps": 3531, "loss": 0.0004, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.41652654553998e-08, "epoch": 2.94, "percentage": 97.99, "elapsed_time": "4:45:32", "remaining_time": "0:05:51"}
+{"current_steps": 3470, "total_steps": 3531, "loss": 0.0042, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.051883241767973e-08, "epoch": 2.95, "percentage": 98.27, "elapsed_time": "4:46:21", "remaining_time": "0:05:02"}
+{"current_steps": 3480, "total_steps": 3531, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8848172164025465e-08, "epoch": 2.96, "percentage": 98.56, "elapsed_time": "4:47:10", "remaining_time": "0:04:12"}
+{"current_steps": 3490, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.915420853467187e-08, "epoch": 2.97, "percentage": 98.84, "elapsed_time": "4:47:58", "remaining_time": "0:03:22"}
+{"current_steps": 3500, "total_steps": 3531, "loss": 0.002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1437708896108733e-08, "epoch": 2.97, "percentage": 99.12, "elapsed_time": "4:48:43", "remaining_time": "0:02:33"}
+{"current_steps": 3500, "total_steps": 3531, "loss": null, "eval_loss": 0.014973307959735394, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 2.97, "percentage": 99.12, "elapsed_time": "4:48:43", "remaining_time": "0:02:33"}
+{"current_steps": 3510, "total_steps": 3531, "loss": 0.0064, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.1840990213946074e-09, "epoch": 2.98, "percentage": 99.41, "elapsed_time": "4:50:58", "remaining_time": "0:01:44"}
+{"current_steps": 3520, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2263343925993386e-09, "epoch": 2.99, "percentage": 99.69, "elapsed_time": "4:51:45", "remaining_time": "0:00:54"}
+{"current_steps": 3530, "total_steps": 3531, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4737375172445563e-10, "epoch": 3.0, "percentage": 99.97, "elapsed_time": "4:52:34", "remaining_time": "0:00:04"}
+{"current_steps": 3531, "total_steps": 3531, "loss": null, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 3.0, "percentage": 100.0, "elapsed_time": "4:52:40", "remaining_time": "0:00:00"}
+{"current_steps": 248, "total_steps": 248, "loss": null, "eval_loss": 0.014986271038651466, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 3.0, "percentage": 100.0, "elapsed_time": "4:54:08", "remaining_time": "0:00:00"}
diff --git a/LLM-Detector-V1-4w/trainer_state.json b/LLM-Detector-V1-4w/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..5af6a1ba02e27afcf9645c9e2800385a9e04f88e
--- /dev/null
+++ b/LLM-Detector-V1-4w/trainer_state.json
@@ -0,0 +1,2202 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 3.0,
+ "eval_steps": 500,
+ "global_step": 3531,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.999919851200522e-05,
+ "loss": 9.9461,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 4.9996428002198536e-05,
+ "loss": 6.4908,
+ "step": 20
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.9992242747551964e-05,
+ "loss": 3.708,
+ "step": 30
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.99857130295276e-05,
+ "loss": 0.8908,
+ "step": 40
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 4.997720546222574e-05,
+ "loss": 0.2454,
+ "step": 50
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 4.996672071909866e-05,
+ "loss": 0.1348,
+ "step": 60
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 4.995425963011034e-05,
+ "loss": 0.0487,
+ "step": 70
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.993982318167074e-05,
+ "loss": 0.0282,
+ "step": 80
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.992341251655768e-05,
+ "loss": 0.0455,
+ "step": 90
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.9905028933826435e-05,
+ "loss": 0.0472,
+ "step": 100
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 4.988467388870688e-05,
+ "loss": 0.0526,
+ "step": 110
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 4.986234899248826e-05,
+ "loss": 0.0679,
+ "step": 120
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 4.983805601239172e-05,
+ "loss": 0.0314,
+ "step": 130
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 4.981179687143034e-05,
+ "loss": 0.0136,
+ "step": 140
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 4.978357364825695e-05,
+ "loss": 0.0409,
+ "step": 150
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 4.975338857699956e-05,
+ "loss": 0.0284,
+ "step": 160
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 4.972124404708454e-05,
+ "loss": 0.0364,
+ "step": 170
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 4.968714260304743e-05,
+ "loss": 0.0147,
+ "step": 180
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 4.965108694433159e-05,
+ "loss": 0.0174,
+ "step": 190
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 4.961307992507443e-05,
+ "loss": 0.0244,
+ "step": 200
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 4.957312455388152e-05,
+ "loss": 0.0387,
+ "step": 210
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 4.953122399358845e-05,
+ "loss": 0.0264,
+ "step": 220
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.948738156101042e-05,
+ "loss": 0.0291,
+ "step": 230
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.9441600726679694e-05,
+ "loss": 0.0214,
+ "step": 240
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 4.939388511457092e-05,
+ "loss": 0.0116,
+ "step": 250
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 4.934423850181419e-05,
+ "loss": 0.0191,
+ "step": 260
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 4.9292664818396117e-05,
+ "loss": 0.0064,
+ "step": 270
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 4.9239168146848666e-05,
+ "loss": 0.0184,
+ "step": 280
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 4.9183752721926036e-05,
+ "loss": 0.0026,
+ "step": 290
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 4.912642293026942e-05,
+ "loss": 0.0223,
+ "step": 300
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 4.906718331005979e-05,
+ "loss": 0.0405,
+ "step": 310
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 4.900603855065861e-05,
+ "loss": 0.0461,
+ "step": 320
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 4.894299349223665e-05,
+ "loss": 0.0199,
+ "step": 330
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 4.8878053125390875e-05,
+ "loss": 0.0193,
+ "step": 340
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 4.881122259074935e-05,
+ "loss": 0.004,
+ "step": 350
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.874250717856433e-05,
+ "loss": 0.0018,
+ "step": 360
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.867191232829348e-05,
+ "loss": 0.0021,
+ "step": 370
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 4.8599443628169295e-05,
+ "loss": 0.018,
+ "step": 380
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 4.8525106814756754e-05,
+ "loss": 0.0261,
+ "step": 390
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 4.84489077724992e-05,
+ "loss": 0.016,
+ "step": 400
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 4.8370852533252536e-05,
+ "loss": 0.0402,
+ "step": 410
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 4.8290947275807755e-05,
+ "loss": 0.0038,
+ "step": 420
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 4.8209198325401815e-05,
+ "loss": 0.008,
+ "step": 430
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 4.8125612153216976e-05,
+ "loss": 0.0296,
+ "step": 440
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 4.804019537586849e-05,
+ "loss": 0.0012,
+ "step": 450
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 4.7952954754880886e-05,
+ "loss": 0.0142,
+ "step": 460
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 4.7863897196152704e-05,
+ "loss": 0.0163,
+ "step": 470
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 4.7773029749409836e-05,
+ "loss": 0.0021,
+ "step": 480
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.76803596076475e-05,
+ "loss": 0.0355,
+ "step": 490
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.758589410656078e-05,
+ "loss": 0.0199,
+ "step": 500
+ },
+ {
+ "epoch": 0.42,
+ "eval_loss": 0.010466881096363068,
+ "eval_runtime": 88.037,
+ "eval_samples_per_second": 22.525,
+ "eval_steps_per_second": 2.817,
+ "step": 500
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 4.748964072396403e-05,
+ "loss": 0.0341,
+ "step": 510
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 4.7391607079198876e-05,
+ "loss": 0.0137,
+ "step": 520
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 4.7291800932531064e-05,
+ "loss": 0.0138,
+ "step": 530
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 4.719023018453623e-05,
+ "loss": 0.0063,
+ "step": 540
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 4.708690287547441e-05,
+ "loss": 0.0376,
+ "step": 550
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.698182718465368e-05,
+ "loss": 0.006,
+ "step": 560
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.687501142978258e-05,
+ "loss": 0.0371,
+ "step": 570
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 4.6766464066311765e-05,
+ "loss": 0.0322,
+ "step": 580
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 4.665619368676466e-05,
+ "loss": 0.0086,
+ "step": 590
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 4.6544209020057285e-05,
+ "loss": 0.002,
+ "step": 600
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 4.643051893080725e-05,
+ "loss": 0.0147,
+ "step": 610
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 4.631513241863209e-05,
+ "loss": 0.0038,
+ "step": 620
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.619805861743683e-05,
+ "loss": 0.0187,
+ "step": 630
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.607930679469096e-05,
+ "loss": 0.0063,
+ "step": 640
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 4.595888635069481e-05,
+ "loss": 0.0109,
+ "step": 650
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 4.5836806817835475e-05,
+ "loss": 0.005,
+ "step": 660
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 4.57130778598322e-05,
+ "loss": 0.0167,
+ "step": 670
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 4.5587709270971425e-05,
+ "loss": 0.0143,
+ "step": 680
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.546071097533145e-05,
+ "loss": 0.0015,
+ "step": 690
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.533209302599691e-05,
+ "loss": 0.0003,
+ "step": 700
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 4.520186560426292e-05,
+ "loss": 0.006,
+ "step": 710
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 4.507003901882915e-05,
+ "loss": 0.0093,
+ "step": 720
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 4.493662370498383e-05,
+ "loss": 0.0046,
+ "step": 730
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 4.4801630223777665e-05,
+ "loss": 0.0147,
+ "step": 740
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 4.466506926118782e-05,
+ "loss": 0.0102,
+ "step": 750
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.4526951627272074e-05,
+ "loss": 0.017,
+ "step": 760
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.438728825531305e-05,
+ "loss": 0.0033,
+ "step": 770
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 4.4246090200952816e-05,
+ "loss": 0.0061,
+ "step": 780
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 4.410336864131762e-05,
+ "loss": 0.0032,
+ "step": 790
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 4.395913487413324e-05,
+ "loss": 0.0043,
+ "step": 800
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 4.3813400316830576e-05,
+ "loss": 0.0063,
+ "step": 810
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 4.36661765056419e-05,
+ "loss": 0.0273,
+ "step": 820
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.351747509468763e-05,
+ "loss": 0.0125,
+ "step": 830
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.336730785505382e-05,
+ "loss": 0.0076,
+ "step": 840
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 4.3215686673860384e-05,
+ "loss": 0.0127,
+ "step": 850
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.306262355332006e-05,
+ "loss": 0.0161,
+ "step": 860
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.290813060978839e-05,
+ "loss": 0.0169,
+ "step": 870
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.2752220072804564e-05,
+ "loss": 0.0081,
+ "step": 880
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.259490428412335e-05,
+ "loss": 0.0131,
+ "step": 890
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 4.243619569673814e-05,
+ "loss": 0.0205,
+ "step": 900
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.2276106873895143e-05,
+ "loss": 0.0026,
+ "step": 910
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.2114650488098936e-05,
+ "loss": 0.018,
+ "step": 920
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.19518393201093e-05,
+ "loss": 0.0083,
+ "step": 930
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 4.178768625792949e-05,
+ "loss": 0.0291,
+ "step": 940
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.162220429578605e-05,
+ "loss": 0.0226,
+ "step": 950
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.145540653310018e-05,
+ "loss": 0.0042,
+ "step": 960
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.128730617345084e-05,
+ "loss": 0.0078,
+ "step": 970
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.111791652352952e-05,
+ "loss": 0.0084,
+ "step": 980
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 4.094725099208688e-05,
+ "loss": 0.0044,
+ "step": 990
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.077532308887141e-05,
+ "loss": 0.0011,
+ "step": 1000
+ },
+ {
+ "epoch": 0.85,
+ "eval_loss": 0.01175768580287695,
+ "eval_runtime": 88.0904,
+ "eval_samples_per_second": 22.511,
+ "eval_steps_per_second": 2.815,
+ "step": 1000
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.060214642355989e-05,
+ "loss": 0.0011,
+ "step": 1010
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.042773470468016e-05,
+ "loss": 0.021,
+ "step": 1020
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 4.0252101738525916e-05,
+ "loss": 0.0424,
+ "step": 1030
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 4.0075261428063806e-05,
+ "loss": 0.0194,
+ "step": 1040
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 3.9897227771832924e-05,
+ "loss": 0.0025,
+ "step": 1050
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 3.971801486283665e-05,
+ "loss": 0.0044,
+ "step": 1060
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.953763688742708e-05,
+ "loss": 0.0051,
+ "step": 1070
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 3.9356108124182067e-05,
+ "loss": 0.0071,
+ "step": 1080
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.9173442942774885e-05,
+ "loss": 0.0145,
+ "step": 1090
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.898965580283681e-05,
+ "loss": 0.0371,
+ "step": 1100
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.880476125281244e-05,
+ "loss": 0.0076,
+ "step": 1110
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 3.861877392880808e-05,
+ "loss": 0.0035,
+ "step": 1120
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.843170855343317e-05,
+ "loss": 0.008,
+ "step": 1130
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.8243579934634846e-05,
+ "loss": 0.0089,
+ "step": 1140
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.805440296452574e-05,
+ "loss": 0.0034,
+ "step": 1150
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 3.786419261820514e-05,
+ "loss": 0.0019,
+ "step": 1160
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 3.7672963952573614e-05,
+ "loss": 0.0164,
+ "step": 1170
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.748073210514102e-05,
+ "loss": 0.0012,
+ "step": 1180
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.728751229282836e-05,
+ "loss": 0.0072,
+ "step": 1190
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.70933198107631e-05,
+ "loss": 0.0041,
+ "step": 1200
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 3.689817003106852e-05,
+ "loss": 0.0021,
+ "step": 1210
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.670207840164678e-05,
+ "loss": 0.0024,
+ "step": 1220
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.650506044495615e-05,
+ "loss": 0.0026,
+ "step": 1230
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.630713175678222e-05,
+ "loss": 0.005,
+ "step": 1240
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.610830800500335e-05,
+ "loss": 0.0069,
+ "step": 1250
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 3.590860492835046e-05,
+ "loss": 0.0081,
+ "step": 1260
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.5708038335161134e-05,
+ "loss": 0.0107,
+ "step": 1270
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.550662410212819e-05,
+ "loss": 0.0074,
+ "step": 1280
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 3.5304378173043e-05,
+ "loss": 0.0048,
+ "step": 1290
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 3.5101316557533294e-05,
+ "loss": 0.0006,
+ "step": 1300
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 3.489745532979593e-05,
+ "loss": 0.0076,
+ "step": 1310
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 3.469281062732442e-05,
+ "loss": 0.0002,
+ "step": 1320
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 3.448739864963154e-05,
+ "loss": 0.0073,
+ "step": 1330
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 3.4281235656966915e-05,
+ "loss": 0.0008,
+ "step": 1340
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 3.4074337969029965e-05,
+ "loss": 0.001,
+ "step": 1350
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 3.386672196367799e-05,
+ "loss": 0.0047,
+ "step": 1360
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 3.365840407562974e-05,
+ "loss": 0.0131,
+ "step": 1370
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 3.3449400795164416e-05,
+ "loss": 0.001,
+ "step": 1380
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 3.323972866681637e-05,
+ "loss": 0.0058,
+ "step": 1390
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 3.3029404288065426e-05,
+ "loss": 0.0047,
+ "step": 1400
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 3.2818444308023e-05,
+ "loss": 0.0029,
+ "step": 1410
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 3.2606865426114234e-05,
+ "loss": 0.0073,
+ "step": 1420
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 3.239468439075604e-05,
+ "loss": 0.0006,
+ "step": 1430
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 3.2181917998031326e-05,
+ "loss": 0.0028,
+ "step": 1440
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 3.196858309035941e-05,
+ "loss": 0.0003,
+ "step": 1450
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 3.175469655516284e-05,
+ "loss": 0.0007,
+ "step": 1460
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 3.154027532353052e-05,
+ "loss": 0.0037,
+ "step": 1470
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 3.132533636887753e-05,
+ "loss": 0.0065,
+ "step": 1480
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 3.1109896705601485e-05,
+ "loss": 0.0092,
+ "step": 1490
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 3.0893973387735687e-05,
+ "loss": 0.0001,
+ "step": 1500
+ },
+ {
+ "epoch": 1.27,
+ "eval_loss": 0.010954583063721657,
+ "eval_runtime": 88.0029,
+ "eval_samples_per_second": 22.533,
+ "eval_steps_per_second": 2.818,
+ "step": 1500
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 3.067758350759917e-05,
+ "loss": 0.0002,
+ "step": 1510
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 3.046074419444366e-05,
+ "loss": 0.0004,
+ "step": 1520
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 3.0243472613097656e-05,
+ "loss": 0.001,
+ "step": 1530
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 3.002578596260765e-05,
+ "loss": 0.0001,
+ "step": 1540
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 2.980770147487668e-05,
+ "loss": 0.0086,
+ "step": 1550
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 2.958923641330028e-05,
+ "loss": 0.0021,
+ "step": 1560
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 2.9370408071399898e-05,
+ "loss": 0.0001,
+ "step": 1570
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 2.9151233771453956e-05,
+ "loss": 0.0076,
+ "step": 1580
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 2.8931730863126666e-05,
+ "loss": 0.0001,
+ "step": 1590
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 2.871191672209459e-05,
+ "loss": 0.0001,
+ "step": 1600
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 2.8491808748671255e-05,
+ "loss": 0.0001,
+ "step": 1610
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 2.8271424366429706e-05,
+ "loss": 0.0115,
+ "step": 1620
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 2.8050781020823296e-05,
+ "loss": 0.0001,
+ "step": 1630
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 2.7829896177804716e-05,
+ "loss": 0.0003,
+ "step": 1640
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 2.760878732244339e-05,
+ "loss": 0.0003,
+ "step": 1650
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 2.7387471957541405e-05,
+ "loss": 0.0024,
+ "step": 1660
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 2.7165967602247964e-05,
+ "loss": 0.0005,
+ "step": 1670
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 2.694429179067261e-05,
+ "loss": 0.0018,
+ "step": 1680
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 2.6744651468034758e-05,
+ "loss": 0.002,
+ "step": 1690
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 2.6522698243485527e-05,
+ "loss": 0.0001,
+ "step": 1700
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 2.6300624483347926e-05,
+ "loss": 0.0058,
+ "step": 1710
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 2.607844776680513e-05,
+ "loss": 0.0001,
+ "step": 1720
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 2.585618568119027e-05,
+ "loss": 0.0001,
+ "step": 1730
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 2.56338558205942e-05,
+ "loss": 0.0008,
+ "step": 1740
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 2.5411475784472805e-05,
+ "loss": 0.0002,
+ "step": 1750
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 2.5189063176253825e-05,
+ "loss": 0.0001,
+ "step": 1760
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 2.496663560194338e-05,
+ "loss": 0.0001,
+ "step": 1770
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.4744210668732295e-05,
+ "loss": 0.0001,
+ "step": 1780
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.452180598360232e-05,
+ "loss": 0.0001,
+ "step": 1790
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.429943915193239e-05,
+ "loss": 0.0,
+ "step": 1800
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 2.4077127776104984e-05,
+ "loss": 0.0146,
+ "step": 1810
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.3854889454112748e-05,
+ "loss": 0.0017,
+ "step": 1820
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.3632741778165442e-05,
+ "loss": 0.0001,
+ "step": 1830
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.3410702333297356e-05,
+ "loss": 0.0001,
+ "step": 1840
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.318878869597528e-05,
+ "loss": 0.0001,
+ "step": 1850
+ },
+ {
+ "epoch": 1.58,
+ "learning_rate": 2.2967018432707213e-05,
+ "loss": 0.0073,
+ "step": 1860
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.2745409098651744e-05,
+ "loss": 0.0001,
+ "step": 1870
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 2.2523978236228442e-05,
+ "loss": 0.0001,
+ "step": 1880
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 2.2302743373729205e-05,
+ "loss": 0.0,
+ "step": 1890
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 2.2081722023930743e-05,
+ "loss": 0.0136,
+ "step": 1900
+ },
+ {
+ "epoch": 1.62,
+ "learning_rate": 2.1860931682708248e-05,
+ "loss": 0.0051,
+ "step": 1910
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 2.164038982765047e-05,
+ "loss": 0.0004,
+ "step": 1920
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 2.1420113916676183e-05,
+ "loss": 0.0002,
+ "step": 1930
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 2.1200121386652246e-05,
+ "loss": 0.0001,
+ "step": 1940
+ },
+ {
+ "epoch": 1.66,
+ "learning_rate": 2.0980429652013297e-05,
+ "loss": 0.0001,
+ "step": 1950
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 2.0761056103383258e-05,
+ "loss": 0.0001,
+ "step": 1960
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 2.0542018106198697e-05,
+ "loss": 0.0,
+ "step": 1970
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 2.0323332999334198e-05,
+ "loss": 0.005,
+ "step": 1980
+ },
+ {
+ "epoch": 1.69,
+ "learning_rate": 2.010501809372981e-05,
+ "loss": 0.0149,
+ "step": 1990
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 1.988709067102076e-05,
+ "loss": 0.0143,
+ "step": 2000
+ },
+ {
+ "epoch": 1.7,
+ "eval_loss": 0.013543435372412205,
+ "eval_runtime": 87.9904,
+ "eval_samples_per_second": 22.537,
+ "eval_steps_per_second": 2.818,
+ "step": 2000
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 1.966956798216943e-05,
+ "loss": 0.0017,
+ "step": 2010
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 1.945246724609978e-05,
+ "loss": 0.0038,
+ "step": 2020
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 1.9235805648334342e-05,
+ "loss": 0.0003,
+ "step": 2030
+ },
+ {
+ "epoch": 1.73,
+ "learning_rate": 1.9019600339633798e-05,
+ "loss": 0.0005,
+ "step": 2040
+ },
+ {
+ "epoch": 1.74,
+ "learning_rate": 1.8803868434639345e-05,
+ "loss": 0.0005,
+ "step": 2050
+ },
+ {
+ "epoch": 1.75,
+ "learning_rate": 1.858862701051791e-05,
+ "loss": 0.0009,
+ "step": 2060
+ },
+ {
+ "epoch": 1.76,
+ "learning_rate": 1.8373893105610356e-05,
+ "loss": 0.0002,
+ "step": 2070
+ },
+ {
+ "epoch": 1.77,
+ "learning_rate": 1.815968371808273e-05,
+ "loss": 0.0036,
+ "step": 2080
+ },
+ {
+ "epoch": 1.78,
+ "learning_rate": 1.7946015804580688e-05,
+ "loss": 0.0138,
+ "step": 2090
+ },
+ {
+ "epoch": 1.78,
+ "learning_rate": 1.7732906278887225e-05,
+ "loss": 0.0005,
+ "step": 2100
+ },
+ {
+ "epoch": 1.79,
+ "learning_rate": 1.7520372010583815e-05,
+ "loss": 0.0001,
+ "step": 2110
+ },
+ {
+ "epoch": 1.8,
+ "learning_rate": 1.7308429823714995e-05,
+ "loss": 0.0001,
+ "step": 2120
+ },
+ {
+ "epoch": 1.81,
+ "learning_rate": 1.709709649545662e-05,
+ "loss": 0.0001,
+ "step": 2130
+ },
+ {
+ "epoch": 1.82,
+ "learning_rate": 1.688638875478777e-05,
+ "loss": 0.0001,
+ "step": 2140
+ },
+ {
+ "epoch": 1.83,
+ "learning_rate": 1.66763232811665e-05,
+ "loss": 0.0,
+ "step": 2150
+ },
+ {
+ "epoch": 1.84,
+ "learning_rate": 1.6466916703209535e-05,
+ "loss": 0.012,
+ "step": 2160
+ },
+ {
+ "epoch": 1.84,
+ "learning_rate": 1.625818559737592e-05,
+ "loss": 0.0,
+ "step": 2170
+ },
+ {
+ "epoch": 1.85,
+ "learning_rate": 1.605014648665486e-05,
+ "loss": 0.0005,
+ "step": 2180
+ },
+ {
+ "epoch": 1.86,
+ "learning_rate": 1.584281583925779e-05,
+ "loss": 0.0047,
+ "step": 2190
+ },
+ {
+ "epoch": 1.87,
+ "learning_rate": 1.5636210067314744e-05,
+ "loss": 0.0126,
+ "step": 2200
+ },
+ {
+ "epoch": 1.88,
+ "learning_rate": 1.5430345525575186e-05,
+ "loss": 0.0015,
+ "step": 2210
+ },
+ {
+ "epoch": 1.89,
+ "learning_rate": 1.5225238510113377e-05,
+ "loss": 0.0018,
+ "step": 2220
+ },
+ {
+ "epoch": 1.89,
+ "learning_rate": 1.5020905257038403e-05,
+ "loss": 0.0057,
+ "step": 2230
+ },
+ {
+ "epoch": 1.9,
+ "learning_rate": 1.481736194120894e-05,
+ "loss": 0.0036,
+ "step": 2240
+ },
+ {
+ "epoch": 1.91,
+ "learning_rate": 1.4614624674952842e-05,
+ "loss": 0.0006,
+ "step": 2250
+ },
+ {
+ "epoch": 1.92,
+ "learning_rate": 1.4412709506791725e-05,
+ "loss": 0.0054,
+ "step": 2260
+ },
+ {
+ "epoch": 1.93,
+ "learning_rate": 1.4211632420170558e-05,
+ "loss": 0.0039,
+ "step": 2270
+ },
+ {
+ "epoch": 1.94,
+ "learning_rate": 1.4011409332192472e-05,
+ "loss": 0.0017,
+ "step": 2280
+ },
+ {
+ "epoch": 1.95,
+ "learning_rate": 1.3812056092358686e-05,
+ "loss": 0.0181,
+ "step": 2290
+ },
+ {
+ "epoch": 1.95,
+ "learning_rate": 1.3613588481313977e-05,
+ "loss": 0.0035,
+ "step": 2300
+ },
+ {
+ "epoch": 1.96,
+ "learning_rate": 1.3416022209597429e-05,
+ "loss": 0.0001,
+ "step": 2310
+ },
+ {
+ "epoch": 1.97,
+ "learning_rate": 1.3219372916398826e-05,
+ "loss": 0.0005,
+ "step": 2320
+ },
+ {
+ "epoch": 1.98,
+ "learning_rate": 1.302365616832063e-05,
+ "loss": 0.0012,
+ "step": 2330
+ },
+ {
+ "epoch": 1.99,
+ "learning_rate": 1.2828887458145806e-05,
+ "loss": 0.0052,
+ "step": 2340
+ },
+ {
+ "epoch": 2.0,
+ "learning_rate": 1.2635082203611375e-05,
+ "loss": 0.0008,
+ "step": 2350
+ },
+ {
+ "epoch": 2.01,
+ "learning_rate": 1.2442255746187954e-05,
+ "loss": 0.0002,
+ "step": 2360
+ },
+ {
+ "epoch": 2.01,
+ "learning_rate": 1.2250423349865387e-05,
+ "loss": 0.0009,
+ "step": 2370
+ },
+ {
+ "epoch": 2.02,
+ "learning_rate": 1.2059600199944388e-05,
+ "loss": 0.0002,
+ "step": 2380
+ },
+ {
+ "epoch": 2.03,
+ "learning_rate": 1.1869801401834564e-05,
+ "loss": 0.0001,
+ "step": 2390
+ },
+ {
+ "epoch": 2.04,
+ "learning_rate": 1.1681041979858626e-05,
+ "loss": 0.0001,
+ "step": 2400
+ },
+ {
+ "epoch": 2.05,
+ "learning_rate": 1.1493336876063071e-05,
+ "loss": 0.0001,
+ "step": 2410
+ },
+ {
+ "epoch": 2.06,
+ "learning_rate": 1.1306700949035462e-05,
+ "loss": 0.0,
+ "step": 2420
+ },
+ {
+ "epoch": 2.06,
+ "learning_rate": 1.1121148972728104e-05,
+ "loss": 0.0001,
+ "step": 2430
+ },
+ {
+ "epoch": 2.07,
+ "learning_rate": 1.0936695635288674e-05,
+ "loss": 0.0001,
+ "step": 2440
+ },
+ {
+ "epoch": 2.08,
+ "learning_rate": 1.0753355537897427e-05,
+ "loss": 0.0001,
+ "step": 2450
+ },
+ {
+ "epoch": 2.09,
+ "learning_rate": 1.0571143193611444e-05,
+ "loss": 0.0,
+ "step": 2460
+ },
+ {
+ "epoch": 2.1,
+ "learning_rate": 1.039007302621576e-05,
+ "loss": 0.0001,
+ "step": 2470
+ },
+ {
+ "epoch": 2.11,
+ "learning_rate": 1.0210159369081568e-05,
+ "loss": 0.0003,
+ "step": 2480
+ },
+ {
+ "epoch": 2.12,
+ "learning_rate": 1.0031416464031654e-05,
+ "loss": 0.0,
+ "step": 2490
+ },
+ {
+ "epoch": 2.12,
+ "learning_rate": 9.853858460212962e-06,
+ "loss": 0.0001,
+ "step": 2500
+ },
+ {
+ "epoch": 2.12,
+ "eval_loss": 0.012909023091197014,
+ "eval_runtime": 87.9835,
+ "eval_samples_per_second": 22.538,
+ "eval_steps_per_second": 2.819,
+ "step": 2500
+ },
+ {
+ "epoch": 2.13,
+ "learning_rate": 9.677499412976632e-06,
+ "loss": 0.0,
+ "step": 2510
+ },
+ {
+ "epoch": 2.14,
+ "learning_rate": 9.502353282765306e-06,
+ "loss": 0.0001,
+ "step": 2520
+ },
+ {
+ "epoch": 2.15,
+ "learning_rate": 9.328433934008107e-06,
+ "loss": 0.0002,
+ "step": 2530
+ },
+ {
+ "epoch": 2.16,
+ "learning_rate": 9.155755134023097e-06,
+ "loss": 0.0,
+ "step": 2540
+ },
+ {
+ "epoch": 2.17,
+ "learning_rate": 8.984330551927475e-06,
+ "loss": 0.0,
+ "step": 2550
+ },
+ {
+ "epoch": 2.18,
+ "learning_rate": 8.81417375755556e-06,
+ "loss": 0.0002,
+ "step": 2560
+ },
+ {
+ "epoch": 2.18,
+ "learning_rate": 8.645298220384567e-06,
+ "loss": 0.0001,
+ "step": 2570
+ },
+ {
+ "epoch": 2.19,
+ "learning_rate": 8.477717308468442e-06,
+ "loss": 0.0008,
+ "step": 2580
+ },
+ {
+ "epoch": 2.2,
+ "learning_rate": 8.31144428737958e-06,
+ "loss": 0.0,
+ "step": 2590
+ },
+ {
+ "epoch": 2.21,
+ "learning_rate": 8.146492319158805e-06,
+ "loss": 0.0,
+ "step": 2600
+ },
+ {
+ "epoch": 2.22,
+ "learning_rate": 7.982874461273438e-06,
+ "loss": 0.0001,
+ "step": 2610
+ },
+ {
+ "epoch": 2.23,
+ "learning_rate": 7.820603665583654e-06,
+ "loss": 0.0002,
+ "step": 2620
+ },
+ {
+ "epoch": 2.23,
+ "learning_rate": 7.659692777317288e-06,
+ "loss": 0.0001,
+ "step": 2630
+ },
+ {
+ "epoch": 2.24,
+ "learning_rate": 7.500154534052933e-06,
+ "loss": 0.0001,
+ "step": 2640
+ },
+ {
+ "epoch": 2.25,
+ "learning_rate": 7.342001564711756e-06,
+ "loss": 0.0001,
+ "step": 2650
+ },
+ {
+ "epoch": 2.26,
+ "learning_rate": 7.185246388557665e-06,
+ "loss": 0.0,
+ "step": 2660
+ },
+ {
+ "epoch": 2.27,
+ "learning_rate": 7.0299014142064106e-06,
+ "loss": 0.0,
+ "step": 2670
+ },
+ {
+ "epoch": 2.28,
+ "learning_rate": 6.875978938643277e-06,
+ "loss": 0.0001,
+ "step": 2680
+ },
+ {
+ "epoch": 2.29,
+ "learning_rate": 6.723491146249647e-06,
+ "loss": 0.0,
+ "step": 2690
+ },
+ {
+ "epoch": 2.29,
+ "learning_rate": 6.572450107838551e-06,
+ "loss": 0.0001,
+ "step": 2700
+ },
+ {
+ "epoch": 2.3,
+ "learning_rate": 6.422867779699088e-06,
+ "loss": 0.0002,
+ "step": 2710
+ },
+ {
+ "epoch": 2.31,
+ "learning_rate": 6.274756002650034e-06,
+ "loss": 0.0001,
+ "step": 2720
+ },
+ {
+ "epoch": 2.32,
+ "learning_rate": 6.128126501102479e-06,
+ "loss": 0.0001,
+ "step": 2730
+ },
+ {
+ "epoch": 2.33,
+ "learning_rate": 5.982990882131775e-06,
+ "loss": 0.0001,
+ "step": 2740
+ },
+ {
+ "epoch": 2.34,
+ "learning_rate": 5.83936063455871e-06,
+ "loss": 0.0001,
+ "step": 2750
+ },
+ {
+ "epoch": 2.34,
+ "learning_rate": 5.697247128040037e-06,
+ "loss": 0.0,
+ "step": 2760
+ },
+ {
+ "epoch": 2.35,
+ "learning_rate": 5.556661612168537e-06,
+ "loss": 0.0003,
+ "step": 2770
+ },
+ {
+ "epoch": 2.36,
+ "learning_rate": 5.417615215582408e-06,
+ "loss": 0.0001,
+ "step": 2780
+ },
+ {
+ "epoch": 2.37,
+ "learning_rate": 5.280118945084422e-06,
+ "loss": 0.0001,
+ "step": 2790
+ },
+ {
+ "epoch": 2.38,
+ "learning_rate": 5.144183684770565e-06,
+ "loss": 0.0,
+ "step": 2800
+ },
+ {
+ "epoch": 2.39,
+ "learning_rate": 5.00982019516851e-06,
+ "loss": 0.0,
+ "step": 2810
+ },
+ {
+ "epoch": 2.4,
+ "learning_rate": 4.877039112385815e-06,
+ "loss": 0.0001,
+ "step": 2820
+ },
+ {
+ "epoch": 2.4,
+ "learning_rate": 4.74585094726793e-06,
+ "loss": 0.0003,
+ "step": 2830
+ },
+ {
+ "epoch": 2.41,
+ "learning_rate": 4.616266084566243e-06,
+ "loss": 0.0,
+ "step": 2840
+ },
+ {
+ "epoch": 2.42,
+ "learning_rate": 4.488294782115957e-06,
+ "loss": 0.0001,
+ "step": 2850
+ },
+ {
+ "epoch": 2.43,
+ "learning_rate": 4.361947170024144e-06,
+ "loss": 0.0001,
+ "step": 2860
+ },
+ {
+ "epoch": 2.44,
+ "learning_rate": 4.2372332498678256e-06,
+ "loss": 0.0007,
+ "step": 2870
+ },
+ {
+ "epoch": 2.45,
+ "learning_rate": 4.11416289390226e-06,
+ "loss": 0.0061,
+ "step": 2880
+ },
+ {
+ "epoch": 2.46,
+ "learning_rate": 3.992745844279475e-06,
+ "loss": 0.0,
+ "step": 2890
+ },
+ {
+ "epoch": 2.46,
+ "learning_rate": 3.872991712277052e-06,
+ "loss": 0.0001,
+ "step": 2900
+ },
+ {
+ "epoch": 2.47,
+ "learning_rate": 3.7549099775373576e-06,
+ "loss": 0.0002,
+ "step": 2910
+ },
+ {
+ "epoch": 2.48,
+ "learning_rate": 3.6385099873170875e-06,
+ "loss": 0.0,
+ "step": 2920
+ },
+ {
+ "epoch": 2.49,
+ "learning_rate": 3.5238009557473946e-06,
+ "loss": 0.0,
+ "step": 2930
+ },
+ {
+ "epoch": 2.5,
+ "learning_rate": 3.4107919631044732e-06,
+ "loss": 0.0028,
+ "step": 2940
+ },
+ {
+ "epoch": 2.51,
+ "learning_rate": 3.299491955090775e-06,
+ "loss": 0.0003,
+ "step": 2950
+ },
+ {
+ "epoch": 2.51,
+ "learning_rate": 3.1899097421268924e-06,
+ "loss": 0.0001,
+ "step": 2960
+ },
+ {
+ "epoch": 2.52,
+ "learning_rate": 3.0820539986541054e-06,
+ "loss": 0.0,
+ "step": 2970
+ },
+ {
+ "epoch": 2.53,
+ "learning_rate": 2.97593326244775e-06,
+ "loss": 0.0013,
+ "step": 2980
+ },
+ {
+ "epoch": 2.54,
+ "learning_rate": 2.871555933941353e-06,
+ "loss": 0.0001,
+ "step": 2990
+ },
+ {
+ "epoch": 2.55,
+ "learning_rate": 2.7689302755616736e-06,
+ "loss": 0.0001,
+ "step": 3000
+ },
+ {
+ "epoch": 2.55,
+ "eval_loss": 0.014460938051342964,
+ "eval_runtime": 87.9924,
+ "eval_samples_per_second": 22.536,
+ "eval_steps_per_second": 2.818,
+ "step": 3000
+ },
+ {
+ "epoch": 2.56,
+ "learning_rate": 2.6680644110746305e-06,
+ "loss": 0.0,
+ "step": 3010
+ },
+ {
+ "epoch": 2.57,
+ "learning_rate": 2.568966324942268e-06,
+ "loss": 0.0001,
+ "step": 3020
+ },
+ {
+ "epoch": 2.57,
+ "learning_rate": 2.4716438616906977e-06,
+ "loss": 0.0,
+ "step": 3030
+ },
+ {
+ "epoch": 2.58,
+ "learning_rate": 2.376104725289105e-06,
+ "loss": 0.0002,
+ "step": 3040
+ },
+ {
+ "epoch": 2.59,
+ "learning_rate": 2.2823564785399596e-06,
+ "loss": 0.0,
+ "step": 3050
+ },
+ {
+ "epoch": 2.6,
+ "learning_rate": 2.1904065424803e-06,
+ "loss": 0.0,
+ "step": 3060
+ },
+ {
+ "epoch": 2.61,
+ "learning_rate": 2.1002621957943308e-06,
+ "loss": 0.0001,
+ "step": 3070
+ },
+ {
+ "epoch": 2.62,
+ "learning_rate": 2.011930574237228e-06,
+ "loss": 0.0,
+ "step": 3080
+ },
+ {
+ "epoch": 2.63,
+ "learning_rate": 1.925418670070267e-06,
+ "loss": 0.0,
+ "step": 3090
+ },
+ {
+ "epoch": 2.63,
+ "learning_rate": 1.8407333315073466e-06,
+ "loss": 0.0,
+ "step": 3100
+ },
+ {
+ "epoch": 2.64,
+ "learning_rate": 1.7578812621728751e-06,
+ "loss": 0.0001,
+ "step": 3110
+ },
+ {
+ "epoch": 2.65,
+ "learning_rate": 1.6768690205711173e-06,
+ "loss": 0.0002,
+ "step": 3120
+ },
+ {
+ "epoch": 2.66,
+ "learning_rate": 1.5977030195670289e-06,
+ "loss": 0.0,
+ "step": 3130
+ },
+ {
+ "epoch": 2.67,
+ "learning_rate": 1.5203895258786238e-06,
+ "loss": 0.0001,
+ "step": 3140
+ },
+ {
+ "epoch": 2.68,
+ "learning_rate": 1.4449346595809015e-06,
+ "loss": 0.0001,
+ "step": 3150
+ },
+ {
+ "epoch": 2.68,
+ "learning_rate": 1.3713443936213822e-06,
+ "loss": 0.0,
+ "step": 3160
+ },
+ {
+ "epoch": 2.69,
+ "learning_rate": 1.299624553347309e-06,
+ "loss": 0.0,
+ "step": 3170
+ },
+ {
+ "epoch": 2.7,
+ "learning_rate": 1.2297808160444929e-06,
+ "loss": 0.0,
+ "step": 3180
+ },
+ {
+ "epoch": 2.71,
+ "learning_rate": 1.161818710487933e-06,
+ "loss": 0.0,
+ "step": 3190
+ },
+ {
+ "epoch": 2.72,
+ "learning_rate": 1.095743616504144e-06,
+ "loss": 0.0001,
+ "step": 3200
+ },
+ {
+ "epoch": 2.73,
+ "learning_rate": 1.0315607645452836e-06,
+ "loss": 0.0006,
+ "step": 3210
+ },
+ {
+ "epoch": 2.74,
+ "learning_rate": 9.692752352751466e-07,
+ "loss": 0.0001,
+ "step": 3220
+ },
+ {
+ "epoch": 2.74,
+ "learning_rate": 9.088919591669548e-07,
+ "loss": 0.0001,
+ "step": 3230
+ },
+ {
+ "epoch": 2.75,
+ "learning_rate": 8.504157161130788e-07,
+ "loss": 0.0,
+ "step": 3240
+ },
+ {
+ "epoch": 2.76,
+ "learning_rate": 7.938511350466527e-07,
+ "loss": 0.0001,
+ "step": 3250
+ },
+ {
+ "epoch": 2.77,
+ "learning_rate": 7.39202693575175e-07,
+ "loss": 0.0001,
+ "step": 3260
+ },
+ {
+ "epoch": 2.78,
+ "learning_rate": 6.864747176260289e-07,
+ "loss": 0.0001,
+ "step": 3270
+ },
+ {
+ "epoch": 2.79,
+ "learning_rate": 6.356713811040888e-07,
+ "loss": 0.0002,
+ "step": 3280
+ },
+ {
+ "epoch": 2.8,
+ "learning_rate": 5.867967055612794e-07,
+ "loss": 0.0,
+ "step": 3290
+ },
+ {
+ "epoch": 2.8,
+ "learning_rate": 5.398545598782528e-07,
+ "loss": 0.0002,
+ "step": 3300
+ },
+ {
+ "epoch": 2.81,
+ "learning_rate": 4.948486599581276e-07,
+ "loss": 0.0,
+ "step": 3310
+ },
+ {
+ "epoch": 2.82,
+ "learning_rate": 4.517825684323324e-07,
+ "loss": 0.0,
+ "step": 3320
+ },
+ {
+ "epoch": 2.83,
+ "learning_rate": 4.1065969437860954e-07,
+ "loss": 0.0001,
+ "step": 3330
+ },
+ {
+ "epoch": 2.84,
+ "learning_rate": 3.714832930511336e-07,
+ "loss": 0.0,
+ "step": 3340
+ },
+ {
+ "epoch": 2.85,
+ "learning_rate": 3.342564656228453e-07,
+ "loss": 0.0,
+ "step": 3350
+ },
+ {
+ "epoch": 2.85,
+ "learning_rate": 2.9898215893995054e-07,
+ "loss": 0.0,
+ "step": 3360
+ },
+ {
+ "epoch": 2.86,
+ "learning_rate": 2.6566316528866264e-07,
+ "loss": 0.0052,
+ "step": 3370
+ },
+ {
+ "epoch": 2.87,
+ "learning_rate": 2.3430212217415982e-07,
+ "loss": 0.0,
+ "step": 3380
+ },
+ {
+ "epoch": 2.88,
+ "learning_rate": 2.0490151211180752e-07,
+ "loss": 0.0,
+ "step": 3390
+ },
+ {
+ "epoch": 2.89,
+ "learning_rate": 1.7746366243063806e-07,
+ "loss": 0.0,
+ "step": 3400
+ },
+ {
+ "epoch": 2.9,
+ "learning_rate": 1.5199074508912836e-07,
+ "loss": 0.0001,
+ "step": 3410
+ },
+ {
+ "epoch": 2.91,
+ "learning_rate": 1.2848477650325984e-07,
+ "loss": 0.0001,
+ "step": 3420
+ },
+ {
+ "epoch": 2.91,
+ "learning_rate": 1.069476173869155e-07,
+ "loss": 0.0,
+ "step": 3430
+ },
+ {
+ "epoch": 2.92,
+ "learning_rate": 8.738097260456713e-08,
+ "loss": 0.0,
+ "step": 3440
+ },
+ {
+ "epoch": 2.93,
+ "learning_rate": 6.978639103634444e-08,
+ "loss": 0.0042,
+ "step": 3450
+ },
+ {
+ "epoch": 2.94,
+ "learning_rate": 5.41652654553998e-08,
+ "loss": 0.0004,
+ "step": 3460
+ },
+ {
+ "epoch": 2.95,
+ "learning_rate": 4.051883241767973e-08,
+ "loss": 0.0042,
+ "step": 3470
+ },
+ {
+ "epoch": 2.96,
+ "learning_rate": 2.8848172164025465e-08,
+ "loss": 0.0001,
+ "step": 3480
+ },
+ {
+ "epoch": 2.97,
+ "learning_rate": 1.915420853467187e-08,
+ "loss": 0.0,
+ "step": 3490
+ },
+ {
+ "epoch": 2.97,
+ "learning_rate": 1.1437708896108733e-08,
+ "loss": 0.002,
+ "step": 3500
+ },
+ {
+ "epoch": 2.97,
+ "eval_loss": 0.014973307959735394,
+ "eval_runtime": 87.9852,
+ "eval_samples_per_second": 22.538,
+ "eval_steps_per_second": 2.819,
+ "step": 3500
+ },
+ {
+ "epoch": 2.98,
+ "learning_rate": 6.1840990213946074e-09,
+ "loss": 0.0064,
+ "step": 3510
+ },
+ {
+ "epoch": 2.99,
+ "learning_rate": 2.2263343925993386e-09,
+ "loss": 0.0,
+ "step": 3520
+ },
+ {
+ "epoch": 3.0,
+ "learning_rate": 2.4737375172445563e-10,
+ "loss": 0.0,
+ "step": 3530
+ },
+ {
+ "epoch": 3.0,
+ "step": 3531,
+ "total_flos": 5.305750058460119e+17,
+ "train_loss": 0.06714861565509712,
+ "train_runtime": 17560.0547,
+ "train_samples_per_second": 6.434,
+ "train_steps_per_second": 0.201
+ }
+ ],
+ "logging_steps": 10,
+ "max_steps": 3531,
+ "num_train_epochs": 3,
+ "save_steps": 1000,
+ "total_flos": 5.305750058460119e+17,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/LLM-Detector-V1-4w/training_args.bin b/LLM-Detector-V1-4w/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e37f40cb61d45b6c2efd87b62a5ee72c12d2b4c2
--- /dev/null
+++ b/LLM-Detector-V1-4w/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c31eb820fabf5021fa0eda935da3d201c65c7331d3ce4ce4ad4631151a6068e9
+size 4664
diff --git a/LLM-Detector-V1-4w/training_eval_loss.png b/LLM-Detector-V1-4w/training_eval_loss.png
new file mode 100644
index 0000000000000000000000000000000000000000..5438d86dfc1f7798d809b1da81eacd253bc350de
Binary files /dev/null and b/LLM-Detector-V1-4w/training_eval_loss.png differ
diff --git a/LLM-Detector-V1-4w/training_loss.png b/LLM-Detector-V1-4w/training_loss.png
new file mode 100644
index 0000000000000000000000000000000000000000..9205d065a26789bd9c4899a68ed68bc9a8672034
Binary files /dev/null and b/LLM-Detector-V1-4w/training_loss.png differ