merge_method: della_linear | |
base_model: Qwen/Qwen2.5-7B-Instruct | |
dtype: bfloat16 | |
parameters: | |
epsilon: 0.015 # Fine-grain scaling for precision. | |
lambda: 1.6 # Strong emphasis on top-performing models. | |
normalize: true # Stable parameter integration across models. | |
adaptive_merge_parameters: | |
task_weights: | |
tinyArc: 1.75 # Logical reasoning. | |
tinyHellaswag: 1.65 # Contextual predictions. | |
tinyMMLU: 1.8 # Domain knowledge. | |
tinyTruthfulQA: 2.0 # Prioritize truthful reasoning. | |
tinyTruthfulQA_mc1: 1.85 | |
tinyWinogrande: 1.9 # Advanced reasoning and predictions. | |
IFEval: 2.1 # Instruction-following and multitasking. | |
BBH: 2.25 # Complex reasoning. | |
MATH: 2.4 # Mathematical reasoning. | |
GPQA: 2.35 # Factual QA. | |
MUSR: 2.3 # Multi-step reasoning. | |
MMLU-PRO: 2.35 # Domain multitask performance. | |
smoothing_factor: 0.05 # TURN UP THE SMOOTH! | |
models: | |
- model: Qwen/Qwen2.5-7B-Instruct | |
parameters: | |
weight: 0.65 | |
density: 0.65 | |
- model: huihui-ai/Qwen2.5-7B-Instruct-abliterated-v3 | |
parameters: | |
weight: 0.1 | |
density: 0.1 | |
- model: rombodawg/Rombos-LLM-V2.5-Qwen-7b | |
parameters: | |
weight: 0.15 | |
density: 0.15 | |
- model: fblgit/cybertron-v4-qw7B-MGS | |
parameters: | |
weight: 0.05 | |
density: 0.05 | |
- model: FreedomIntelligence/HuatuoGPT-o1-7B | |
parameters: | |
weight: 0.05 | |
density: 0.05 | |