Nekochu's picture
Update README.md
926fc3b verified
|
raw
history blame
5.75 kB
metadata
model_creator: Nekochu
quantized_by: Nekochu
model_name: Llama-3.1 8B German ORPO
pretty_name: Llama-3.1 8B German ORPO
model_type: llama3.1
prompt_template: >-
  Below is an instruction that describes a task. Write a response that
  appropriately completes the request. ### Instruction: {Instruction} {summary}
  ### input: {category} ### Response: {prompt}
library_name: peft
license: llama3.1
base_model: meta-llama/Meta-Llama-3.1-8B-Instruct
tags:
  - llama-factory
  - lora
datasets:
  - mayflowergmbh/intel_orca_dpo_pairs_de
  - LeoLM/OpenSchnabeltier
  - LeoLM/German_Songs
  - LeoLM/German_Poems
  - bjoernp/ultrachat_de
  - mayflowergmbh/ultra-chat_de
  - mayflowergmbh/airoboros-3.0_de
  - mayflowergmbh/booksum_de
  - mayflowergmbh/dolphin_de
  - mayflowergmbh/evol-instruct_de
  - mayflowergmbh/openschnabeltier_de
  - mayflowergmbh/alpaca-gpt4_de
  - mayflowergmbh/dolly-15k_de
  - mayflowergmbh/oasst_de
language:
  - de
  - en
pipeline_tag: text-generation
task_categories:
  - question-answering
  - text2text-generation
  - conversational
inference: true
model-index:
  - name: Llama-3.1-8B-German-ORPO
    results: []
  • Fine-tuning of Llama-3.1-8B on german datasets. Same datasets used in Nekochu/Llama-2-13B-German-ORPO.
  • I've (alway) kept LoRA QLoRA_German-ORPO so it can be applied to any LLaMA-3.1-8B fine-tuned model but may affect performance.
This training can be replicated using LLaMA-Factory.

Stage A: SFT

set CUDA_VISIBLE_DEVICES=0 && llamafactory-cli train --stage sft --do_train True --model_name_or_path meta-llama/Meta-Llama-3.1-8B-Instruct --preprocessing_num_workers 1 --finetuning_type lora --template alpaca --rope_scaling linear --flash_attn fa2 --dataset_dir data --dataset ultrachat_de,airoboros_de,booksum_de,dolphin_de,evol_instruct_de,openschnabeltier_de,alpaca-gpt4_de,dolly_15k_de,oasst_de,bjoernp_ultrachat_de,German_Poems,German_Songs,OpenSchnabeltier --cutoff_len 8192 --learning_rate 5e-05 --num_train_epochs 3.0 --max_samples 100000 --per_device_train_batch_size 1 --gradient_accumulation_steps 1 --lr_scheduler_type cosine --max_grad_norm 1.0 --logging_steps 100 --save_steps 1000 --warmup_steps 1000 --neftune_noise_alpha 5 --optim adamw_8bit --packing True --neat_packing True --report_to none --output_dir saves\LLaMA3.1-8B-Chat\lora\Llama-3.1-8B-German --bf16 True --plot_loss True --ddp_timeout 180000000 --include_num_input_tokens_seen True --quantization_bit 4 --quantization_method bitsandbytes --lora_rank 32 --lora_alpha 64 --lora_dropout 0.15 --lora_target all --use_adam_mini True --create_new_adapter True

Stage B: Continued, orpo

set CUDA_VISIBLE_DEVICES=0 && llamafactory-cli train --stage dpo --do_train True --model_name_or_path meta-llama/Meta-Llama-3.1-8B-Instruct --preprocessing_num_workers 1 --finetuning_type lora --template alpaca --rope_scaling linear --flash_attn fa2 --dataset_dir data --dataset fix_orca_dpo_de --cutoff_len 4000 --learning_rate 5e-05 --num_train_epochs 1.0 --max_samples 100000 --per_device_train_batch_size 1 --gradient_accumulation_steps 1 --lr_scheduler_type cosine --max_grad_norm 1.0 --logging_steps 10 --save_steps 1000 --warmup_steps 0 --neftune_noise_alpha 5 --optim adamw_8bit --packing True --report_to none --output_dir saves\LLaMA3.1-8B-Chat\lora\Llama-3.1-8B-German-ORPO --bf16 True --plot_loss True --ddp_timeout 180000000 --include_num_input_tokens_seen True --quantization_bit 4 --quantization_method bitsandbytes --lora_rank 32 --lora_alpha 64 --lora_dropout 0.35 --lora_target all --pref_beta 0.1 --pref_ftx 0 --pref_loss orpo --adapter_name_or_path saves\LLaMA3.1-8B-Chat\lora\Llama-3.1-8B-German
dataset_info.json

dataset_info.json:

  "oasst_de": {
    "hf_hub_url": "mayflowergmbh/oasst_de"
  },
  "dolly_15k_de": {
    "hf_hub_url": "mayflowergmbh/dolly-15k_de"
  },
  "alpaca-gpt4_de": {
    "hf_hub_url": "mayflowergmbh/alpaca-gpt4_de"
  },
  "openschnabeltier_de": {
    "hf_hub_url": "mayflowergmbh/openschnabeltier_de"
  },
  "evol_instruct_de": {
    "hf_hub_url": "mayflowergmbh/evol-instruct_de"
  },
  "dolphin_de": {
    "hf_hub_url": "mayflowergmbh/dolphin_de"
  },
  "booksum_de": {
    "hf_hub_url": "mayflowergmbh/booksum_de"
  },
  "airoboros_de": {
    "hf_hub_url": "mayflowergmbh/airoboros-3.0_de"
  },
  "ultrachat_de": {
    "hf_hub_url": "mayflowergmbh/ultra-chat_de"
  },
  "German_Songs": {
    "file_name": "German_Songs.json",
    "file_sha1": "3ec36066a19debd1b138020b293e05f21264c352",
    "columns": {
      "prompt": "prompt",
      "query": "analysis_prompt",
      "response": "song",
      "history": "analysis",
      "system": "topic"
    }
  },
  "German_Poems": {
    "file_name": "German_Poems.json",
    "file_sha1": "f0f4bbea3b8cbc378afb640f4ff4dcd11132263c",
    "columns": {
      "prompt": "prompt",
      "query": "topic", 
      "response": "poem"
    }
  },
  "bjoernp_ultrachat_de": {
    "file_name": "ultrachat_de.json",
    "file_sha1": "4e2b6dba1c387b3fa439c33ab35281403c39e973",
    "formatting": "sharegpt",
    "columns": {
      "messages": "conversations"
    },
    "tags": {
      "role_tag": "from",
      "content_tag": "value",
      "user_tag": "human",
      "assistant_tag": "gpt",
      "system_tag": "system"
    }
  },
  "OpenSchnabeltier": {
    "file_name": "OpenSchnabeltier.json",
    "columns": {
      "prompt": "instruction_de",
      "response": "output_de"
    }
  },
  "fix_orca_dpo_de": {
    "file_name": "fix_intel_orca_dpo_pairs_de.json",
    "ranking": true,
    "columns": {
      "prompt": "instruction",
      "query": "input",
      "chosen": "chosen",
      "rejected": "rejected"
    }
  }
}