hienbm commited on
Commit
02bb674
1 Parent(s): a014727

Training in progress, step 3

Browse files
adapter_config.json CHANGED
@@ -20,8 +20,8 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
- "v_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
24
+ "q_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27873295bec5e73c6c51c65e38a593e00d021cdbabe964896511138e18f07232
3
  size 27280152
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:930c2c8531a25aee78949e34103db4f55f9c31db7ddc3a11246e072d3bc24a55
3
  size 27280152
config.json CHANGED
@@ -1,24 +1,35 @@
1
  {
2
- "dataset_name": "data/packaged_pretrain_dataset.parquet",
3
- "num_proc": 1,
4
- "max_seq_length": 32,
5
- "seed": 0,
6
- "optim": "adamw_torch",
7
- "max_steps": 10000,
8
- "per_device_train_batch_size": 2,
9
- "learning_rate": 5e-05,
10
- "weight_decay": 0,
11
- "warmup_steps": 10,
12
- "lr_scheduler_type": "linear",
13
- "gradient_checkpointing": true,
14
- "dataloader_num_workers": 2,
15
- "bf16": true,
16
- "gradient_accumulation_steps": 1,
17
- "logging_steps": 3,
18
- "report_to": [],
19
- "save_strategy": "steps",
20
- "save_steps": 3,
21
- "save_total_limit": 1,
22
- "push_to_hub": true,
23
- "hub_model_id": "hienbm/psychology-llama3.1-8B"
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
1
  {
2
+ "_name_or_path": "meta-llama/Meta-Llama-3.1-8B",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 128000,
9
+ "eos_token_id": 128001,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "max_position_embeddings": 131072,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 16,
19
+ "num_key_value_heads": 8,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": {
23
+ "factor": 8.0,
24
+ "high_freq_factor": 4.0,
25
+ "low_freq_factor": 1.0,
26
+ "original_max_position_embeddings": 8192,
27
+ "rope_type": "llama3"
28
+ },
29
+ "rope_theta": 500000.0,
30
+ "tie_word_embeddings": false,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.44.0.dev0",
33
+ "use_cache": false,
34
+ "vocab_size": 128256
35
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97007644242db3fec914aff8d0d85ee4892d7ac8d80b9ee951bbee421e156cf0
3
+ size 2515639472
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20b23c5062bdd3955b8a36281a8c22557a3103269212b4bfae070533996d521b
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bb5ecac405a641762d526405359377cb588451eb0138aa06cb6661dd108a933
3
  size 5304