base_model: | |
model: huihui-ai/Llama-3.2-3B-Instruct-abliterated | |
layer_range: | |
- 0 | |
- 28 | |
merge_method: ties | |
merge_method_sequence: | |
- dare_ties | |
- slerp | |
- ties | |
parameters: | |
batch_size: 32 | |
density: 0.5 | |
int8_mask: true | |
layer_range: | |
- 0 | |
- 28 | |
model.embed_tokens.weight.t: 1.0 | |
normalize: false | |
t: | |
- filter: self_attn | |
value: | |
- 0 | |
- 0.5 | |
- 0.3 | |
- 0.7 | |
- 1 | |
- filter: mlp | |
value: | |
- 1 | |
- 0.5 | |
- 0.7 | |
- 0.3 | |
- 0 | |
- value: 0.5 | |
weight: 0.5 | |
slices: | |
- sources: | |
- density: 0.5 | |
layer_range: | |
- 0 | |
- 28 | |
model: meta-llama/Llama-3.2-3B-Instruct | |
weight: 0.5 | |
- density: 0.5 | |
layer_range: | |
- 0 | |
- 28 | |
model: meta-llama/Llama-3.2-3B | |
weight: 0.5 | |
- density: 0.5 | |
layer_range: | |
- 0 | |
- 28 | |
model: chuanli11/Llama-3.2-3B-Instruct-uncensored | |
weight: 0.5 | |
- density: 0.5 | |
layer_range: | |
- 0 | |
- 28 | |
model: huihui-ai/Llama-3.2-3B-Instruct-abliterated | |
weight: 0.5 | |
- density: 0.5 | |
layer_range: | |
- 0 | |
- 28 | |
model: bunnycore/Llama-3.2-3B-ProdigyPlusPlus | |
weight: 0.5 | |
tokenizer_source: union | |