WangzaiRucer commited on
Commit
d55055c
1 Parent(s): 90c50ae

remove unnecessary file and update README

Browse files
Files changed (5) hide show
  1. README.md +6 -4
  2. finetuning_args.json +0 -16
  3. rng_state.pth +0 -3
  4. trainer_state.json +0 -67
  5. training_args.bin +0 -3
README.md CHANGED
@@ -3,13 +3,15 @@ library_name: peft
3
  tags:
4
  - text-to-sql
5
  - spider
6
- - ' text2sql'
 
7
  ---
8
  ## Introduce
9
- this is a text-to-sql peft module ,which is trained on CodeLlama-13b-Instruct-hf with LoRA SFT by spider training dataset.
10
- ## Training procedure
 
11
 
12
  ### Framework versions
13
 
14
 
15
- - PEFT 0.4.0
 
3
  tags:
4
  - text-to-sql
5
  - spider
6
+ - 'text2sql'
7
+
8
  ---
9
  ## Introduce
10
+ This folder is a text-to-sql weights directory, containing weight files fine-tuned based on the LoRA with the CodeLlama-13b-Instruct-hf model through the [DB-GPT-Hub](https://github.com/eosphoros-ai/DB-GPT-Hub/tree/main/dbgpt_hub) project. The training data used is from the Spider training set. This weights files achieving an execution accuracy of approximately 0.788 on the Spider evaluation set.
11
+ Merge the weights with [CodeLlama-13b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf/tree/main) and this folder weigths, you can refer the [DB-GPT-Hub](https://github.com/eosphoros-ai/DB-GPT-Hub/tree/main/dbgpt_hub) ,in `dbgpt_hub/scripts/export_merge.sh`.
12
+ If you find our weight files or the DB-GPT-Hub project helpful for your work, give a star on our github project [DB-GPT-Hub](https://github.com/eosphoros-ai/DB-GPT-Hub/tree/main/dbgpt_hub) will be a great encouragement for us to release more weight files.
13
 
14
  ### Framework versions
15
 
16
 
17
+ - PEFT 0.4.0
finetuning_args.json DELETED
@@ -1,16 +0,0 @@
1
- {
2
- "dpo_beta": 0.1,
3
- "finetuning_type": "lora",
4
- "lora_alpha": 32.0,
5
- "lora_dropout": 0.1,
6
- "lora_rank": 64,
7
- "lora_target": [
8
- "q_proj",
9
- "v_proj"
10
- ],
11
- "name_module_trainable": "mlp",
12
- "num_hidden_layers": 32,
13
- "num_layer_trainable": 3,
14
- "ppo_score_norm": false,
15
- "resume_lora_training": true
16
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:295fd0124fd4af5b98b14acffeb4407253dd0e74441344e646e94f1841b842a2
3
- size 14575
 
 
 
 
trainer_state.json DELETED
@@ -1,67 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 7.3911537128998726,
5
- "eval_steps": 500,
6
- "global_step": 4000,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [
11
- {
12
- "epoch": 0.92,
13
- "learning_rate": 0.0001934857749846208,
14
- "loss": 0.1563,
15
- "step": 500
16
- },
17
- {
18
- "epoch": 1.85,
19
- "learning_rate": 0.00017479180248950295,
20
- "loss": 0.0667,
21
- "step": 1000
22
- },
23
- {
24
- "epoch": 2.77,
25
- "learning_rate": 0.00014635361737993667,
26
- "loss": 0.0389,
27
- "step": 1500
28
- },
29
- {
30
- "epoch": 3.7,
31
- "learning_rate": 0.00011187627439257638,
32
- "loss": 0.0217,
33
- "step": 2000
34
- },
35
- {
36
- "epoch": 4.62,
37
- "learning_rate": 7.58516369304635e-05,
38
- "loss": 0.012,
39
- "step": 2500
40
- },
41
- {
42
- "epoch": 5.54,
43
- "learning_rate": 4.2973156884111344e-05,
44
- "loss": 0.0058,
45
- "step": 3000
46
- },
47
- {
48
- "epoch": 6.47,
49
- "learning_rate": 1.752439059723171e-05,
50
- "loss": 0.0025,
51
- "step": 3500
52
- },
53
- {
54
- "epoch": 7.39,
55
- "learning_rate": 2.8209178689553083e-06,
56
- "loss": 0.0014,
57
- "step": 4000
58
- }
59
- ],
60
- "logging_steps": 500,
61
- "max_steps": 4328,
62
- "num_train_epochs": 8,
63
- "save_steps": 2000,
64
- "total_flos": 3.256255138554317e+18,
65
- "trial_name": null,
66
- "trial_params": null
67
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:26ab201b1691dcc90c25344dfa536d42c15fbb4ac7ce976f99dfd505fa7fb1bc
3
- size 3496