name: SuperMerge-LayeredTIES-v1 | |
merge_method: della_linear | |
base_model: CultriX/Enhanced-TIES-Base-v1 # Referencing the TIES base model defined below (now inlined) | |
tokenizer_source: base | |
dtype: float32 | |
out_dtype: bfloat16 | |
parameters: | |
int8_mask: true | |
normalize: true | |
rescale: false | |
t: [0.1, 0.3, 0.7, 0.7, 0.4, 0.2] | |
slices: | |
- sources: | |
- model: CultriX/Enhanced-TIES-Base-v1 # Referencing inlined TIES base | |
layer_range: [0, 8] | |
parameters: | |
weight: 0.7 | |
- model: arcee-ai/Virtuoso-Small-v2 | |
layer_range: [0, 8] | |
parameters: | |
weight: 0.3 | |
- model: sthenno/tempesthenno-ppo-ckpt40 | |
layer_range: [0, 8] | |
parameters: | |
weight: 0.0 | |
- model: sometimesanotion/Qwenvergence-14B-v3-Prose | |
layer_range: [0, 8] | |
parameters: | |
weight: 0.0 | |
- sources: | |
- model: CultriX/Enhanced-TIES-Base-v1 # Referencing inlined TIES base | |
layer_range: [8, 16] | |
parameters: | |
weight: 0.4 | |
- model: arcee-ai/Virtuoso-Small-v2 | |
layer_range: [8, 16] | |
parameters: | |
weight: 0.3 | |
- model: sthenno/tempesthenno-ppo-ckpt40 | |
layer_range: [8, 16] | |
parameters: | |
weight: 0.3 | |
- model: sometimesanotion/Qwenvergence-14B-v3-Prose | |
layer_range: [8, 16] | |
parameters: | |
weight: 0.0 | |
- sources: | |
- model: CultriX/Enhanced-TIES-Base-v1 # Referencing inlined TIES base | |
layer_range: [16, 24] | |
parameters: | |
weight: 0.2 | |
- model: arcee-ai/Virtuoso-Small-v2 | |
layer_range: [16, 24] | |
parameters: | |
weight: 0.2 | |
- model: sthenno/tempesthenno-ppo-ckpt40 | |
layer_range: [16, 24] | |
parameters: | |
weight: 0.5 | |
- model: sometimesanotion/Qwenvergence-14B-v3-Prose | |
layer_range: [16, 24] | |
parameters: | |
weight: 0.1 | |
- sources: | |
- model: CultriX/Enhanced-TIES-Base-v1 # Referencing inlined TIES base | |
layer_range: [24, 32] | |
parameters: | |
weight: 0.25 | |
- model: arcee-ai/Virtuoso-Small-v2 | |
layer_range: [24, 32] | |
parameters: | |
weight: 0.1 | |
- model: sthenno/tempesthenno-ppo-ckpt40 | |
layer_range: [24, 32] | |
parameters: | |
weight: 0.4 | |
- model: sometimesanotion/Qwenvergence-14B-v3-Prose | |
layer_range: [24, 32] | |
parameters: | |
weight: 0.25 | |
- sources: | |
- model: CultriX/Enhanced-TIES-Base-v1 # Referencing inlined TIES base | |
layer_range: [32, 40] | |
parameters: | |
weight: 0.4 | |
- model: arcee-ai/Virtuoso-Small-v2 | |
layer_range: [32, 40] | |
parameters: | |
weight: 0.0 | |
- model: sthenno/tempesthenno-ppo-ckpt40 | |
layer_range: [32, 40] | |
parameters: | |
weight: 0.2 | |
- model: sometimesanotion/Qwenvergence-14B-v3-Prose | |
layer_range: [32, 40] | |
parameters: | |
weight: 0.4 | |
- sources: | |
- model: CultriX/Enhanced-TIES-Base-v1 # Referencing inlined TIES base | |
layer_range: [40, 48] | |
parameters: | |
weight: 0.6 | |
- model: arcee-ai/Virtuoso-Small-v2 | |
layer_range: [40, 48] | |
parameters: | |
weight: 0.0 | |
- model: sthenno/tempesthenno-ppo-ckpt40 | |
layer_range: [40, 48] | |
parameters: | |
weight: 0.1 | |
- model: sometimesanotion/Qwenvergence-14B-v3-Prose | |
layer_range: [40, 48] | |
parameters: | |
weight: 0.3 | |
# Commentary: | |
# ============================================================================= | |
# SuperMerge-LayeredTIES-v1 Commentary: | |
# | |
# This configuration combines the strengths of both Enhanced-LayeredSlerp-v1 and SuperMerge-Enhanced-v1. | |
# It leverages the robust foundation of a TIES-merged base model (Enhanced-TIES-Base-v1) and applies | |
# the layer-wise module approach and fine-grained weight control from SuperMerge-Enhanced-v1 in a SLERP merge. | |
# | |
# Key Features: | |
# - TIES-Merged Base Foundation: Uses 'Enhanced-TIES-Base-v1' as the base model for the SLERP merge. | |
# This TIES base provides a selectively merged and potentially more efficient starting point, incorporating | |
# strengths from multiple models (Virtuoso, Phi-4, Qwenvergence, DeepSeek) with density control. | |
# | |
# - Layer-wise Module Integration in SLERP: Maintains the module-based slice structure from SuperMerge-Enhanced-v1. | |
# The SLERP merge now combines the TIES-merged base with specialized modules for Reasoning, IFEval, and MATH/Knowledge | |
# at different layer ranges, using explicit weights for fine-grained control. | |
# | |
# - Benchmark-Driven Iterative Weight Tuning: The configuration is designed to be optimized through a | |
# benchmark-driven iterative weight tuning process (as described in the refined SuperMerge-Enhanced-v1 approach). | |
# The initial weights provided are starting points and need to be systematically tuned based on benchmark results. | |
# | |
# Tuning Process (Same as Refined SuperMerge-Enhanced-v1): | |
# 1. Initial Benchmarking: Run a full benchmark suite. | |
# 2. Performance Analysis: Examine per-benchmark scores and compare to source models. | |
# 3. Targeted Weight Adjustments: Adjust layer weights based on performance analysis (e.g., increase IFEval module weight | |
# in early layers if IFEval is weak). | |
# 4. Iterate: Repeat steps 1-3. Make small, incremental adjustments in each iteration. | |
# | |
# Rationale: | |
# - By using a TIES-merged base, we aim to create a more robust and potentially efficient foundation for the SLERP merge. | |
# - The layer-wise module approach and fine-grained weights in SLERP still allow for precise control over the blending | |
# of specialized capabilities at different network depths, building upon the solid TIES base. | |
# - The emphasis on a benchmark-driven iterative weight tuning process remains crucial for achieving optimal performance. | |
# | |
# Next Steps: | |
# - Implement this configuration using MergeKit. | |
# - Run initial benchmarks to establish a baseline. | |
# - Begin the iterative benchmark-driven weight tuning process to optimize performance. | |
# ============================================================================= |