File size: 3,824 Bytes
5af0f64 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
base_model: bigcode/starcoderplus
base_model_config: bigcode/starcoderplus
load_in_8bit: false
load_in_4bit: true
gptq: false
strict: false
push_dataset_to_hub: winglian
hf_use_auth_token: true
datasets:
- path: winglian/evals
data_files:
- hf/ARC-Challenge.jsonl
- hf/ARC-Easy.jsonl
- hf/riddle_sense.jsonl
- hf/piqa.jsonl
type: explainchoice:chat
- path: winglian/evals
data_files:
- hf/gsm8k.jsonl
- hf/winogrande.jsonl
type: alpaca_chat.load_qa
- path: winglian/evals
data_files:
- custom/n_task.jsonl
- custom/misconceptions.jsonl
- custom/context_insensitivity.jsonl
type: alpaca_chat
- path: camel-ai/math
type: alpaca_chat.load_camel_ai
- path: camel-ai/biology
type: alpaca_chat.load_camel_ai
- path: camel-ai/physics
type: alpaca_chat.load_camel_ai
- path: camel-ai/chemistry
type: alpaca_chat.load_camel_ai
- path: winglian/evals
data_files:
- custom/in_context_qa.jsonl
type: context_qa
- path: winglian/evals
data_files:
- custom/in_context_qa.jsonl
type: context_qa.load_404
- path: winglian/evals
data_files:
- custom/jokes_explained_500up.jsonl
type: sharegpt_jokes
- path: winglian/evals
data_files:
- custom/classify-self-chat.sharegpt.jsonl
- custom/coding-self-chat.sharegpt.jsonl
- custom/prose-gpt4.sharegpt.jsonl
- custom/prose-rewrite-gpt4.sharegpt.jsonl
type: sharegpt_simple.load_role
- path: winglian/evals
data_files:
- openai/tldr.jsonl
type: summarizetldr:chat
- path: winglian/evals
data_files:
- hellaswag/hellaswag.jsonl
type: explainchoice:chat
- path: metaeval/ScienceQA_text_only
type: concisechoice:chat
- path: teknium/GPT4-LLM-Cleaned
type: alpaca_chat
- path: teknium/GPTeacher-General-Instruct
data_files: gpt4-instruct-similarity-0.6-dataset.json
type: gpteacher:chat
- path: QingyiSi/Alpaca-CoT
data_files:
- Chain-of-Thought/formatted_cot_data/aqua_train.json
- Chain-of-Thought/formatted_cot_data/creak_train.json
- Chain-of-Thought/formatted_cot_data/ecqa_train.json
- Chain-of-Thought/formatted_cot_data/esnli_train.json
- Chain-of-Thought/formatted_cot_data/qasc_train.json
- Chain-of-Thought/formatted_cot_data/qed_train.json
- Chain-of-Thought/formatted_cot_data/sensemaking_train.json
- Chain-of-Thought/formatted_cot_data/strategyqa_train.json
- GPTeacher/Roleplay/formatted_roleplay-similarity_0.6-instruct-dataset.json
type: alpaca_chat
- path: ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered
type: alpaca_chat
- path: ehartford/wizard_vicuna_70k_unfiltered
type: sharegpt:chat
dataset_prepared_path: last_run_prepared
val_set_size: 0.01
adapter: qlora
lora_model_dir:
sequence_len: 8192
max_packed_sequence_len: 8192
lora_r: 40
lora_alpha: 32
lora_dropout: 0.1
lora_target_modules:
- c_attn
- c_proj
- c_fc
lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: minotaur-16b-8k
wandb_watch:
wandb_run_id:
wandb_log_model:
output_dir: ./minotaur-16b-8k
gradient_accumulation_steps: 1
micro_batch_size: 1
num_epochs: 3
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.00013
train_on_inputs: false
group_by_length: true
bf16: true
fp16: false
tf32: true
gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention: false
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 100
eval_steps: 20
save_steps: 51
load_best_model_at_end: false
debug:
deepspeed:
weight_decay: 0.01
fsdp:
fsdp_config:
special_tokens:
pad_token: "<|endoftext|>"
bos_token: "<|endoftext|>"
eos_token: "<|endoftext|>"
unk_token: "<|endoftext|>"
|