|
models: |
|
- layer_range: [0, 28] |
|
model: Hjgugugjhuhjggg/mergekit-ties-qgcitfu |
|
parameters: |
|
weight: 1 |
|
density: 0.9 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
|
|
- layer_range: [0, 28] |
|
model: Hjgugugjhuhjggg/mergekit-ties-esawwda |
|
parameters: |
|
weight: 1 |
|
density: 0.9 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
|
|
- layer_range: [0, 28] |
|
model: Hjgugugjhuhjggg/mergekit-ties-dkhnzcn |
|
parameters: |
|
weight: 1 |
|
density: 0.9 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
|
|
- layer_range: [0, 28] |
|
model: Hjgugugjhuhjggg/mergekit-ties-poovzrh |
|
parameters: |
|
weight: 1 |
|
density: 0.9 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
|
|
- layer_range: [0, 28] |
|
model: Hjgugugjhuhjggg/mergekit-ties-pghuyfi |
|
parameters: |
|
weight: 1 |
|
density: 0.9 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
|
|
- layer_range: [0, 28] |
|
model: Hjgugugjhuhjggg/mergekit-ties-kmlzhzo |
|
parameters: |
|
weight: 1 |
|
density: 0.9 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
|
|
- layer_range: [0, 28] |
|
model: Hjgugugjhuhjggg/mergekit-ties-xflmond |
|
parameters: |
|
weight: 1 |
|
density: 0.9 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
|
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-Long-Think |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
|
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-Pure-RP |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-Apex |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-Mix-Skill |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-Booval |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-ProdigyPlusPlus |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-Prodigy |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-Sci-Think |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-Stock |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: ValiantLabs/Llama3.2-3B-Enigma |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: CarrotAI/Llama-3.2-Rabbit-Ko-3B-Instruct |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: AELLM/Llama-3.2-Chibi-3B |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: EmTpro01/llama-3.2-Code-Generator |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: disi-unibo-nlp/llama3.2-3B-SFT-medmcqa-triples-cot |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: Atharva26/llama-3.2-3b-mathdaily-chatbot |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: Diluksha/Llama_3.2_3B_sql_finetuned_full |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-CodeReactor |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: AcademieDuNumerique/Llama-3.2-3B-SQL-Instruct |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: roger33303/Best_Model-llama3.2-3b-Instruct-Finetune-website-QnA |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: noaebbot/llama3.2-3B-insights |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-TitanFusion-v2 |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-TitanFusion |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: bunnycore/Llama-3.2-3B-Mix |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: ValiantLabs/Llama3.2-3B-ShiningValiant2 |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: TroyDoesAI/BlackSheep-Llama3.2-3B-Context_Obedient |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: BrainWave-ML/llama3.2-3B-codemath-orpo |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: CK0607/llama3.2-3B-CodeP |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: disi-unibo-nlp/llama3.2-3B-SFT-medqa-triples-cot |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: Isotonic/reasoning-llama3.2-3b |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: meta-llama/Llama-3.2-3B-Instruct |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
- layer_range: [0, 28] |
|
model: meta-llama/Llama-3.2-3B |
|
parameters: |
|
weight: 0.5 |
|
density: 0.5 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
|
|
merge_method: linear |
|
base_model: huihui-ai/Llama-3.2-3B-Instruct-abliterated |
|
weight: 1 |
|
density: 0.9 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
parameters: |
|
weight: 1 |
|
density: 0.9 |
|
gamma: 0.01 |
|
normalize: true |
|
int8_mask: true |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
quantization: |
|
- method: int8 |
|
value: 100 |
|
- method: int4 |
|
value: 100 |
|
dtype: float16 |