Audreygyj's picture
Upload 4 files
bfe7b12 verified
DEVICE = "cuda:0"
DEBUG = False
config = {
"model": {
"codellama": {
"base_model_id": "codellama/CodeLlama-7b-hf",
"quantitize": "int8",
"dataset": "Arithmetic_Simple",
"data_collator": "DataCollatorForSeq2Seq",
"peft_config": {
"lora_config": {
"r": 16,
"lora_alpha": 16,
"target_modules": [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
],
"lora_dropout": 0.05,
"bias": "none",
"task_type": "CAUSAL_LM",
},
},
"training_args": {
"output_dir": "codellama-output",
"warmup_steps": 100,
"per_device_train_batch_size": 1,
"per_device_eval_batch_size": 1,
"gradient_accumulation_steps": 4,
"max_steps": 10000,
"learning_rate": 3e-4,
"optim": "adamw_torch",
"logging_dir": "codellama-output-logs",
"logging_steps": 10,
"save_strategy": "steps",
"save_steps": 500,
"load_best_model_at_end": False,
"group_by_length": True,
"fp16": True,
"evaluation_strategy": "steps",
"eval_steps": 1000,
# Uncomment this line to set a custom integration to report the results and logs to
# With transformers v4, the default value is "all"
# With transformers v5, the default value will be "none"
# "report_to": "wandb",
# Uncomment this line to set a custom run name (default ones like "eternal-brook-20"
# will be used if not set)
# "run_name": "phi2-code-finetune",
# Uncomment the following lines to trigger (Hugging Face built-in) evaluation after
# every X steps of training
# "evaluation_strategy": "steps",
# "eval_steps": 200,
# "do_eval": True,
},
"tokenizer": {
"tokenize_config": {
"truncation": True,
"max_length": 192,
"padding": "max_length",
},
"prompt_template": "config/qa_template.txt",
},
},
"phi-2": {
"base_model_id": "microsoft/phi-2",
"quantitize": "fp16",
"dataset": "Arithmetic_Simple",
"data_collator": "DataCollatorForLanguageModeling",
"peft_config":{
"lora_config": {
"r": 32,
"lora_alpha": 64,
"target_modules": [
"q_proj",
"k_proj",
"v_proj",
"dense",
"fc1",
"fc2",
],
"bias": "none",
"lora_dropout": 0.05,
"task_type": "CAUSAL_LM",
},
},
"training_args": {
"output_dir": "phi2-output",
"warmup_steps": 500,
# fp16: ~21.5GiB VRAM; ~40h to finish
"per_device_train_batch_size": 1,
"per_device_eval_batch_size": 1,
"gradient_accumulation_steps": 4,
"max_steps": 100000,
"learning_rate": 5e-5,
"optim": "adamw_torch",
"logging_dir": "phi2-output-logs",
"logging_steps": 100,
"save_strategy": "steps",
"save_steps": 500,
"evaluation_strategy": "steps",
"eval_steps": 500,
"fp16": True,
},
"tokenizer": {
"tokenize_config": {
"truncation": True,
"max_length": 512,
"padding": "max_length",
},
"prompt_template": "config/qa_template.txt",
},
},
"deepseek": {
"base_model_id": "deepseek-ai/deepseek-coder-1.3b-instruct",
# Quantization config guidelines for DeepSeek
# If running on T4, RTX 20xx, or anything older like V100: quantitize="fp16"
# If running on L4, A100, RTX 30xx/40xx, or anything Ampere or later: quantitize="bf16"
"quantitize": "fp16",
"dataset": "Arithmetic_Hard_Third",
"data_collator": "DataCollatorForLanguageModeling",
"peft_config":{
"lora": { # trainable params = 30.0 M
"r": 32,
"lora_alpha": 64,
"target_modules": [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
],
"bias": "none",
"lora_dropout": 0.05,
"task_type": "CAUSAL_LM",
},
"lora_large": { # trainable params = not checked yet
"r": 128,
"lora_alpha": 256,
"target_modules": [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
],
"bias": "none",
"lora_dropout": 0.05,
"task_type": "CAUSAL_LM",
},
},
"p_tuning_config": { # Doesn't work, PEFT interface issues
"num_virtual_tokens": 16,
"num_transformer_submodules": 1,
"token_dim": 2048, # NOTE(Shih-Lun): should change w/ base LLM
"encoder_hidden_size": 2048,
"task_type": "CAUSAL_LM",
},
"training_args": {
"warmup_steps": 500,
# bf16: ~21.0GiB VRAM; ~21h to finish
"per_device_train_batch_size": 4,
"per_device_eval_batch_size": 4,
"gradient_accumulation_steps": 1,
"max_steps": 35000,
"learning_rate": 2e-5,
"optim": "adamw_torch",
"logging_steps": 100//10,
"save_strategy": "steps",
"save_total_limit": 10,
"save_steps": 500,
"evaluation_strategy": "steps",
"eval_steps": 500,
"weight_decay": 0.01,
"report_to": "wandb",
"dataloader_num_workers": 4,
"load_best_model_at_end": True,
# fp16/bf16 config guidelines for DeepSeek
# If running full tuning: Don't set fp16/bf16
# If running LoRA: Match the quantization setting (either fp16=True or bf16=True)
"fp16": True,
},
"tokenizer": {
"tokenize_config": {
"truncation": True,
"max_length": 512,
"padding": "max_length",
},
"prompt_template": "config/qa_template.txt",
},
},
},
"dataset": {
"simple_dataset": {
"type": "huggingface", # Public datasets on the Hugging Face Hub (only for testing)
"dataset_purpose": "downstream",
"name": "b-mc2/sql-create-context",
"train_split": 0.9,
"max_train_size": 100,
"filling_field": ["question", "context", "answer"],
},
"testdset": {
"type": "local", # Local files
"dataset_purpose": "downstream",
"train_file": "data/Test/TestDataset.json",
"val_file": "data/Test/TestDataset.json",
"test_file": "data/Test/TestDataset.json",
"filling_field": ["prompted_question", "answer"],
},
"deepmind_mathematics": {
"type": "local",
"dataset_purpose": "downstream",
"filling_field": ["question", "answer"],
"epoch_length":{
"train": 54*4*50, # Module * BatchSize * lesson_num in each module
"val": 54*4*1,
},
},
"mixture_codegen": {
"filling_field": ["Question", "Answer"],
"dataset_purpose": "downstream",
},
"Arithmetic_Hard_prompt_C12_with_gh": {
"filling_field": ["Question", "Answer"],
"dataset_purpose": "downstream",
},
"MathQA_Python_loader": {
"type": "list-like", # List-like objects (we're going to use this for ablations)
"dataset_purpose": "downstream",
"train": "data/MathQA_Python_processed/mathqa_python_train_clean_final.json",
"val": "data/MathQA_Python_processed/mathqa_python_dev_clean_final.json",
"test": "data/MathQA_Python_processed/mathqa_python_test_clean_final.json",
"filling_field": ["Question", "Answer"],
},
"APPS_loader": {
"type": "list-like", # List-like objects (we're going to use this for ablations)
"dataset_purpose": "downstream",
"train": "data/APPS/apps_train.json",
"val": "data/APPS/apps_dev.json",
"test": "data/APPS/test/apps_test_75.json",
"filling_field": ["Question", "Answer"],
},
"MBPP_loader": {
"type": "list-like",
"dataset_purpose": "downstream",
"train": "data/MBPP/mbpp_train.json",
"val": "data/MBPP/mbpp_dev.json",
"test": "data/MBPP/mbpp_test.json",
"filling_field": ["Question", "Answer"],
},
"Arithmetic_Simple": {
"type": "list-like",
"dataset_purpose": "downstream",
"attributes": {
"subjects": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"lessons": [
"Max_Ops1_Bounds0_100",
"Max_Ops1_Bounds0_1000",
"Max_Ops2_Bounds0_100",
"Max_Ops2_Bounds0_1000",
"Max_Ops3_Bounds0_100",
# "Max_Ops3_Bounds0_1000",
# "Max_Ops4_Bounds0_100",
# "Max_Ops4_Bounds0_1000",
# "Max_Ops5_Bounds0_100",
# "Max_Ops5_Bounds0_1000",
]
},
"train": "data/Arithmetic/Curriculum_Simple",
"val": "data/Arithmetic/Curriculum_Simple",
"test": "data/Arithmetic/Curriculum_Simple",
"filling_field": ["Question", "Answer"],
},
"Arithmetic_Simple_First_Half": {
"type": "list-like",
"dataset_purpose": "downstream",
"attributes": {
"subjects": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"lessons": [
"Max_Ops1_Bounds0_100",
"Max_Ops1_Bounds0_1000",
"Max_Ops2_Bounds0_100",
"Max_Ops2_Bounds0_1000",
"Max_Ops3_Bounds0_100",
]
},
"train": "data/Arithmetic/Curriculum_Simple",
"val": "data/Arithmetic/Curriculum_Simple",
"test": "data/Arithmetic/Curriculum_Simple",
"filling_field": ["Question", "Answer"],
},
"Arithmetic_Simple_Second_Half": {
"type": "list-like",
"dataset_purpose": "downstream",
"attributes": {
"subjects": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"lessons": [
"Max_Ops3_Bounds0_1000",
"Max_Ops4_Bounds0_100",
"Max_Ops4_Bounds0_1000",
"Max_Ops5_Bounds0_100",
"Max_Ops5_Bounds0_1000",
]
},
"train": "data/Arithmetic/Curriculum_Simple",
"val": "data/Arithmetic/Curriculum_Simple",
"test": "data/Arithmetic/Curriculum_Simple",
"filling_field": ["Question", "Answer"],
},
"Arithmetic_Hard": {
"type": "list-like",
"dataset_purpose": "downstream",
"attributes": {
"subjects": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"lessons": [
"Max_Ops1_Bounds-1000_1000",
"Max_Ops1_Bounds-100_100",
"Max_Ops1_Bounds0_100",
"Max_Ops1_Bounds0_1000",
"Max_Ops2_Bounds-1000_1000",
"Max_Ops2_Bounds-100_100",
"Max_Ops2_Bounds0_100",
"Max_Ops2_Bounds0_1000",
"Max_Ops3_Bounds-1000_1000",
"Max_Ops3_Bounds-100_100",
"Max_Ops3_Bounds0_100",
"Max_Ops3_Bounds0_1000",
"Max_Ops4_Bounds-1000_1000",
"Max_Ops4_Bounds-100_100",
"Max_Ops4_Bounds0_100",
"Max_Ops4_Bounds0_1000",
"Max_Ops5_Bounds-1000_1000",
"Max_Ops5_Bounds-100_100",
"Max_Ops5_Bounds0_100",
"Max_Ops5_Bounds0_1000",
"Max_Ops6_Bounds-1000_1000",
"Max_Ops6_Bounds-100_100",
"Max_Ops6_Bounds0_100",
"Max_Ops6_Bounds0_1000",
"Max_Ops7_Bounds-1000_1000",
"Max_Ops7_Bounds-100_100",
"Max_Ops7_Bounds0_100",
"Max_Ops7_Bounds0_1000",
"Max_Ops8_Bounds-1000_1000",
"Max_Ops8_Bounds-100_100",
"Max_Ops8_Bounds0_100",
"Max_Ops8_Bounds0_1000",
"Max_Ops9_Bounds-1000_1000",
"Max_Ops9_Bounds-100_100",
"Max_Ops9_Bounds0_100",
"Max_Ops9_Bounds0_1000",
"Max_Ops10_Bounds-1000_1000",
"Max_Ops10_Bounds-100_100",
"Max_Ops10_Bounds0_100",
"Max_Ops10_Bounds0_1000",
]
},
"train": "data/Arithmetic/Curriculum_Hard",
"val": "data/Arithmetic/Curriculum_Hard",
"test": "data/Arithmetic/Curriculum_Hard",
"filling_field": ["Question", "Answer"],
},
"Arithmetic_Hard_First": {
"type": "list-like",
"dataset_purpose": "downstream",
"attributes": {
"subjects": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"lessons": [
"Max_Ops1_Bounds-1000_1000",
"Max_Ops1_Bounds-100_100",
"Max_Ops1_Bounds0_100",
"Max_Ops1_Bounds0_1000",
"Max_Ops2_Bounds-1000_1000",
"Max_Ops2_Bounds-100_100",
"Max_Ops2_Bounds0_100",
"Max_Ops2_Bounds0_1000",
"Max_Ops3_Bounds-1000_1000",
"Max_Ops3_Bounds-100_100",
"Max_Ops3_Bounds0_100",
"Max_Ops3_Bounds0_1000",
"Max_Ops4_Bounds-1000_1000",
"Max_Ops4_Bounds-100_100",
"Max_Ops4_Bounds0_100",
"Max_Ops4_Bounds0_1000",
]
},
"train": "data/Arithmetic/Curriculum_Hard",
"val": "data/Arithmetic/Curriculum_Hard",
"test": "data/Arithmetic/Curriculum_Hard",
"filling_field": ["Question", "Answer"],
},
"Arithmetic_Hard_Second": {
"type": "list-like",
"dataset_purpose": "downstream",
"attributes": {
"subjects": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"lessons": [
"Max_Ops5_Bounds-1000_1000",
"Max_Ops5_Bounds-100_100",
"Max_Ops5_Bounds0_100",
"Max_Ops5_Bounds0_1000",
"Max_Ops6_Bounds-1000_1000",
"Max_Ops6_Bounds-100_100",
"Max_Ops6_Bounds0_100",
"Max_Ops6_Bounds0_1000",
"Max_Ops7_Bounds-1000_1000",
"Max_Ops7_Bounds-100_100",
"Max_Ops7_Bounds0_100",
"Max_Ops7_Bounds0_1000",
]
},
"train": "data/Arithmetic/Curriculum_Hard",
"val": "data/Arithmetic/Curriculum_Hard",
"test": "data/Arithmetic/Curriculum_Hard",
"filling_field": ["Question", "Answer"],
},
"Arithmetic_Hard_Third": {
"type": "list-like",
"dataset_purpose": "downstream",
"attributes": {
"subjects": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"lessons": [
"Max_Ops8_Bounds-1000_1000",
"Max_Ops8_Bounds-100_100",
"Max_Ops8_Bounds0_100",
"Max_Ops8_Bounds0_1000",
"Max_Ops9_Bounds-1000_1000",
"Max_Ops9_Bounds-100_100",
"Max_Ops9_Bounds0_100",
"Max_Ops9_Bounds0_1000",
"Max_Ops10_Bounds-1000_1000",
"Max_Ops10_Bounds-100_100",
"Max_Ops10_Bounds0_100",
"Max_Ops10_Bounds0_1000",
]
},
"train": "data/Arithmetic/Curriculum_Hard",
"val": "data/Arithmetic/Curriculum_Hard",
"test": "data/Arithmetic/Curriculum_Hard",
"filling_field": ["Question", "Answer"],
},
"Arithmetic_Hard_prompt_C11": {
"type": "list-like",
"dataset_purpose": "downstream",
"attributes": {
"subjects": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"lessons": [
"Max_Ops1_Bounds-1000_1000",
"Max_Ops1_Bounds-100_100",
"Max_Ops1_Bounds0_100",
"Max_Ops1_Bounds0_1000",
"Max_Ops2_Bounds-1000_1000",
"Max_Ops2_Bounds-100_100",
"Max_Ops2_Bounds0_100",
"Max_Ops2_Bounds0_1000",
"Max_Ops3_Bounds-1000_1000",
"Max_Ops3_Bounds-100_100",
"Max_Ops3_Bounds0_100",
"Max_Ops3_Bounds0_1000",
"Max_Ops4_Bounds-1000_1000",
"Max_Ops4_Bounds-100_100",
"Max_Ops4_Bounds0_100",
"Max_Ops4_Bounds0_1000",
"Max_Ops5_Bounds-1000_1000",
"Max_Ops5_Bounds-100_100",
"Max_Ops5_Bounds0_100",
"Max_Ops5_Bounds0_1000",
"Max_Ops6_Bounds-1000_1000",
"Max_Ops6_Bounds-100_100",
"Max_Ops6_Bounds0_100",
"Max_Ops6_Bounds0_1000",
"Max_Ops7_Bounds-1000_1000",
"Max_Ops7_Bounds-100_100",
"Max_Ops7_Bounds0_100",
"Max_Ops7_Bounds0_1000",
"Max_Ops8_Bounds-1000_1000",
"Max_Ops8_Bounds-100_100",
"Max_Ops8_Bounds0_100",
"Max_Ops8_Bounds0_1000",
"Max_Ops9_Bounds-1000_1000",
"Max_Ops9_Bounds-100_100",
"Max_Ops9_Bounds0_100",
"Max_Ops9_Bounds0_1000",
"Max_Ops10_Bounds-1000_1000",
"Max_Ops10_Bounds-100_100",
"Max_Ops10_Bounds0_100",
"Max_Ops10_Bounds0_1000",
]
},
"train": "data/Arithmetic/Curriculum_Hard",
"val": "data/Arithmetic/Curriculum_Hard",
"test": "data/Arithmetic/Curriculum_Hard",
"filling_field": ["Question", "Answer"],
},
"Arithmetic_Hard_prompt_C12": {
"type": "list-like",
"dataset_purpose": "downstream",
"attributes": {
"subjects": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"lessons": [
"Max_Ops1_Bounds-1000_1000",
"Max_Ops1_Bounds-100_100",
"Max_Ops1_Bounds0_100",
"Max_Ops1_Bounds0_1000",
"Max_Ops2_Bounds-1000_1000",
"Max_Ops2_Bounds-100_100",
"Max_Ops2_Bounds0_100",
"Max_Ops2_Bounds0_1000",
"Max_Ops3_Bounds-1000_1000",
"Max_Ops3_Bounds-100_100",
"Max_Ops3_Bounds0_100",
"Max_Ops3_Bounds0_1000",
"Max_Ops4_Bounds-1000_1000",
"Max_Ops4_Bounds-100_100",
"Max_Ops4_Bounds0_100",
"Max_Ops4_Bounds0_1000",
"Max_Ops5_Bounds-1000_1000",
"Max_Ops5_Bounds-100_100",
"Max_Ops5_Bounds0_100",
"Max_Ops5_Bounds0_1000",
"Max_Ops6_Bounds-1000_1000",
"Max_Ops6_Bounds-100_100",
"Max_Ops6_Bounds0_100",
"Max_Ops6_Bounds0_1000",
"Max_Ops7_Bounds-1000_1000",
"Max_Ops7_Bounds-100_100",
"Max_Ops7_Bounds0_100",
"Max_Ops7_Bounds0_1000",
"Max_Ops8_Bounds-1000_1000",
"Max_Ops8_Bounds-100_100",
"Max_Ops8_Bounds0_100",
"Max_Ops8_Bounds0_1000",
"Max_Ops9_Bounds-1000_1000",
"Max_Ops9_Bounds-100_100",
"Max_Ops9_Bounds0_100",
"Max_Ops9_Bounds0_1000",
"Max_Ops10_Bounds-1000_1000",
"Max_Ops10_Bounds-100_100",
"Max_Ops10_Bounds0_100",
"Max_Ops10_Bounds0_1000",
]
},
"train": "data/Arithmetic/Curriculum_Hard",
"val": "data/Arithmetic/Curriculum_Hard",
"test": "data/Arithmetic/Curriculum_Hard",
"filling_field": ["Question", "Answer"],
},
"Arithmetic_Hard_prompt_C12_intermediate": {
"type": "list-like",
"dataset_purpose": "downstream",
"attributes": {
"subjects": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"lessons": [
"Max_Ops1_Bounds-1000_1000",
"Max_Ops1_Bounds-100_100",
"Max_Ops1_Bounds0_100",
"Max_Ops1_Bounds0_1000",
"Max_Ops2_Bounds-1000_1000",
"Max_Ops2_Bounds-100_100",
"Max_Ops2_Bounds0_100",
"Max_Ops2_Bounds0_1000",
"Max_Ops3_Bounds-1000_1000",
"Max_Ops3_Bounds-100_100",
"Max_Ops3_Bounds0_100",
"Max_Ops3_Bounds0_1000",
"Max_Ops4_Bounds-1000_1000",
"Max_Ops4_Bounds-100_100",
"Max_Ops4_Bounds0_100",
"Max_Ops4_Bounds0_1000",
"Max_Ops5_Bounds-1000_1000",
"Max_Ops5_Bounds-100_100",
"Max_Ops5_Bounds0_100",
"Max_Ops5_Bounds0_1000",
"Max_Ops6_Bounds-1000_1000",
"Max_Ops6_Bounds-100_100",
"Max_Ops6_Bounds0_100",
"Max_Ops6_Bounds0_1000",
"Max_Ops7_Bounds-1000_1000",
"Max_Ops7_Bounds-100_100",
"Max_Ops7_Bounds0_100",
"Max_Ops7_Bounds0_1000",
"Max_Ops8_Bounds-1000_1000",
"Max_Ops8_Bounds-100_100",
"Max_Ops8_Bounds0_100",
"Max_Ops8_Bounds0_1000",
"Max_Ops9_Bounds-1000_1000",
"Max_Ops9_Bounds-100_100",
"Max_Ops9_Bounds0_100",
"Max_Ops9_Bounds0_1000",
"Max_Ops10_Bounds-1000_1000",
"Max_Ops10_Bounds-100_100",
"Max_Ops10_Bounds0_100",
"Max_Ops10_Bounds0_1000",
]
},
"train": "data/Arithmetic/Curriculum_Hard",
"val": "data/Arithmetic/Curriculum_Hard",
"test": "data/Arithmetic/Curriculum_Hard",
"filling_field": ["Question", "Answer"],
},
"Arithmetic_XHard": {
"type": "list-like",
"dataset_purpose": "downstream",
"attributes": {
"subjects": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"lessons": [
"Max_Ops10_Bounds0_10000.json",
"Max_Ops10_Bounds0_1000.json",
"Max_Ops10_Bounds-10000_10000.json",
"Max_Ops10_Bounds-1000_1000.json",
"Max_Ops11_Bounds0_10000.json",
"Max_Ops11_Bounds0_1000.json",
"Max_Ops11_Bounds-10000_10000.json",
"Max_Ops11_Bounds-1000_1000.json",
"Max_Ops12_Bounds0_10000.json",
"Max_Ops12_Bounds0_1000.json",
"Max_Ops12_Bounds-10000_10000.json",
"Max_Ops12_Bounds-1000_1000.json",
"Max_Ops13_Bounds0_10000.json",
"Max_Ops13_Bounds0_1000.json",
"Max_Ops13_Bounds-10000_10000.json",
"Max_Ops13_Bounds-1000_1000.json",
"Max_Ops14_Bounds0_10000.json",
"Max_Ops14_Bounds0_1000.json",
"Max_Ops14_Bounds-10000_10000.json",
"Max_Ops14_Bounds-1000_1000.json",
"Max_Ops15_Bounds0_10000.json",
"Max_Ops15_Bounds0_1000.json",
"Max_Ops15_Bounds-10000_10000.json",
"Max_Ops15_Bounds-1000_1000.json",
"Max_Ops16_Bounds0_10000.json",
"Max_Ops16_Bounds0_1000.json",
"Max_Ops16_Bounds-10000_10000.json",
"Max_Ops16_Bounds-1000_1000.json",
"Max_Ops17_Bounds0_10000.json",
"Max_Ops17_Bounds0_1000.json",
"Max_Ops17_Bounds-10000_10000.json",
"Max_Ops17_Bounds-1000_1000.json",
"Max_Ops18_Bounds0_10000.json",
"Max_Ops18_Bounds0_1000.json",
"Max_Ops18_Bounds-10000_10000.json",
"Max_Ops18_Bounds-1000_1000.json",
"Max_Ops19_Bounds0_10000.json",
"Max_Ops19_Bounds0_1000.json",
"Max_Ops19_Bounds-10000_10000.json",
"Max_Ops19_Bounds-1000_1000.json",
"Max_Ops1_Bounds0_10000.json",
"Max_Ops1_Bounds0_1000.json",
"Max_Ops1_Bounds-10000_10000.json",
"Max_Ops1_Bounds-1000_1000.json",
"Max_Ops20_Bounds0_10000.json",
"Max_Ops20_Bounds0_1000.json",
"Max_Ops20_Bounds-10000_10000.json",
"Max_Ops20_Bounds-1000_1000.json",
"Max_Ops2_Bounds0_10000.json",
"Max_Ops2_Bounds0_1000.json",
"Max_Ops2_Bounds-10000_10000.json",
"Max_Ops2_Bounds-1000_1000.json",
"Max_Ops3_Bounds0_10000.json",
"Max_Ops3_Bounds0_1000.json",
"Max_Ops3_Bounds-10000_10000.json",
"Max_Ops3_Bounds-1000_1000.json",
"Max_Ops4_Bounds0_10000.json",
"Max_Ops4_Bounds0_1000.json",
"Max_Ops4_Bounds-10000_10000.json",
"Max_Ops4_Bounds-1000_1000.json",
"Max_Ops5_Bounds0_10000.json",
"Max_Ops5_Bounds0_1000.json",
"Max_Ops5_Bounds-10000_10000.json",
"Max_Ops5_Bounds-1000_1000.json",
"Max_Ops6_Bounds0_10000.json",
"Max_Ops6_Bounds0_1000.json",
"Max_Ops6_Bounds-10000_10000.json",
"Max_Ops6_Bounds-1000_1000.json",
"Max_Ops7_Bounds0_10000.json",
"Max_Ops7_Bounds0_1000.json",
"Max_Ops7_Bounds-10000_10000.json",
"Max_Ops7_Bounds-1000_1000.json",
"Max_Ops8_Bounds0_10000.json",
"Max_Ops8_Bounds0_1000.json",
"Max_Ops8_Bounds-10000_10000.json",
"Max_Ops8_Bounds-1000_1000.json",
"Max_Ops9_Bounds0_10000.json",
"Max_Ops9_Bounds0_1000.json",
"Max_Ops9_Bounds-10000_10000.json",
"Max_Ops9_Bounds-1000_1000.json",
]
},
"train": "data/Arithmetic/Curriculum_XHard",
"val": "data/Arithmetic/Curriculum_XHard",
"test": "data/Arithmetic/Curriculum_XHard",
"filling_field": ["Question", "Answer"],
},
"GSM8K": {
"type": "local",
"dataset_purpose": "downstream",
"train_file": "data/GSM8K/GSM8K_train.json",
"val_file": "data/GSM8K/GSM8K_test.json",
"test_file": "data/GSM8K/GSM8K_dev.json",
"filling_field": ["Body", "Question", "Answer"],
},
"APPS": {
"type": "local",
"dataset_purpose": "downstream",
"train_file": "data/APPS/apps_train.json",
"val_file": "data/APPS/apps_test.json",
"test_file": "data/APPS/apps_dev.json",
"filling_field": ["Body", "Question", "Answer"],
},
"ghcode_python": {
"type": "huggingface",
"dataset_purpose": "pretrain",
"name": "slseanwu/ghcode_python_split_700k",
"max_eval_size": 1000,
"max_train_size": 160000,
"filling_field": ["code"],
},
},
}
if DEBUG:
config.epochs = 100
config.save_steps = 10
config.train_dataset = "local-test-train"
config.val_dataset = "local-test-dev"
config.test_dataset = "test-clean"