|
description: Merging MISCHIEVOUS-12B-Mix models with sliced slerp |
|
|
|
|
|
model_description: | |
|
This configuration merges two versions of the MISCHIEVOUS-12B-Mix model: 0.4v and 0.3v. |
|
0.3v was further fine-tuned on a specific dataset (ADD DATASET NAME HERE if known). |
|
The sliced slerp approach allows for layer-specific control over the merging process. |
|
|
|
base_model: bamec66557/MISCHIEVOUS-12B-Mix_0.4v |
|
dtype: bfloat16 |
|
merge_method: slerp |
|
tokenizer_source: union |
|
|
|
|
|
slices: |
|
- sources: |
|
- model: bamec66557/MISCHIEVOUS-12B-Mix_0.4v |
|
layer_range: [0, 10] |
|
- model: bamec66557/MISCHIEVOUS-12B-Mix_0.5v |
|
layer_range: [0, 10] |
|
parameters: |
|
t: |
|
- name: self_attn |
|
value: [0.8, 0.85, 0.9, 0.95, 1.0] |
|
- name: mlp |
|
value: [0.9, 0.95, 1.0, 1.05, 1.1] |
|
- name: layer_norm |
|
value: [0.6, 0.65, 0.7, 0.75, 0.8] |
|
- name: embed_tokens |
|
value: [1.0] |
|
|
|
- sources: |
|
- model: bamec66557/MISCHIEVOUS-12B-Mix_0.4v |
|
layer_range: [10, 20] |
|
- model: bamec66557/MISCHIEVOUS-12B-Mix_0.5v |
|
layer_range: [10, 20] |
|
parameters: |
|
t: |
|
- name: self_attn |
|
value: [0.7, 0.75, 0.8, 0.85, 0.9] |
|
- name: mlp |
|
value: [1.0, 0.95, 0.9, 0.85, 0.8] |
|
- name: layer_norm |
|
value: [0.5, 0.55, 0.6, 0.65, 0.7] |
|
- name: embed_tokens |
|
value: [1.0] |
|
|
|
- sources: |
|
- model: bamec66557/MISCHIEVOUS-12B-Mix_0.4v |
|
layer_range: [20, 30] |
|
- model: bamec66557/MISCHIEVOUS-12B-Mix_0.5v |
|
layer_range: [20, 30] |
|
parameters: |
|
t: |
|
- name: self_attn |
|
value: [0.6, 0.65, 0.7, 0.75, 0.8] |
|
- name: mlp |
|
value: [0.8, 0.75, 0.7, 0.65, 0.6] |
|
- name: layer_norm |
|
value: [0.4, 0.45, 0.5, 0.55, 0.6] |
|
- name: embed_tokens |
|
value: [1.0] |
|
|
|
- sources: |
|
- model: bamec66557/MISCHIEVOUS-12B-Mix_0.4v |
|
layer_range: [30, 40] |
|
- model: bamec66557/MISCHIEVOUS-12B-Mix_0.5v |
|
layer_range: [30, 40] |
|
parameters: |
|
t: |
|
- name: self_attn |
|
value: [0.9, 1.0, 1.1, 1.2, 1.3] |
|
- name: mlp |
|
value: [0.7, 0.65, 0.6, 0.55, 0.5] |
|
- name: layer_norm |
|
value: [0.7, 0.75, 0.8, 0.85, 0.9] |
|
- name: embed_tokens |
|
value: [1.0] |
|
|
|
|
|
regularization: |
|
- method: weight_clipping |
|
clip_range: [-0.2, 0.2] |
|
- method: random_noise |
|
scale: 0.015 |
|
- method: l2_norm |
|
scale: 0.01 |
|
|
|
|
|
postprocessing: |
|
- operation: random_noise |
|
scale: 0.0025 |
|
- operation: non_linear_scaling |
|
parameters: |
|
function: tanh |
|
- operation: sharpening |
|
intensity: 0.3 |
|
- operation: gaussian_smoothing |
|
sigma: 1.5 |
|
- operation: smoothing |
|
parameters: |
|
adaptive: true |
|
range: [0.8, 1.2] |
|
kernel_size: 5 |
|
- operation: normalize |
|
- operation: dynamic_scaling |
|
scale_range: [0.75, 1.25] |
|
|
|
|
|
evaluation: |
|
metrics: |
|
- perplexity |
|
- accuracy |
|
- bleu |
|
- rouge |
|
datasets: |
|
- wikitext |
|
- lambada |
|
- (ADD RELEVANT TASK-SPECIFIC DATASETS HERE) |
|
prompts: |
|
- "The quick brown fox jumps over the lazy dog." |
|
- "Translate 'Thank you' to Spanish:" |
|
- "Write a short summary of the French Revolution." |
|
|
|
|
|
logging: |
|
output_dir: ./merged_models |
|
log_level: INFO |
|
|
|
|
|
|
|
|
|
|
|
|
|
|