models: - layer_range: [0, 1] model: huihui-ai/Llama-3.2-1B-Instruct-abliterated parameters: weight: 1 density: 0.9 gamma: 0.01 normalize: true int8_mask: true random_seed: 0 temperature: 0.5 top_p: 0.65 inference: true max_tokens: 999999999 stream: true quantization: - method: int8 value: 100 - method: int4 value: 100 - layer_range: [0, 1] model: huihui-ai/MicroThinker-1B-Preview parameters: weight: 1 density: 0.9 gamma: 0.01 normalize: true int8_mask: true random_seed: 0 temperature: 0.5 top_p: 0.65 inference: true max_tokens: 999999999 stream: true quantization: - method: int8 value: 100 - method: int4 value: 100 - layer_range: [0, 1] model: Hjgugugjhuhjggg/llama-3.2-1B-spinquant-hf parameters: weight: 1 density: 0.9 gamma: 0.01 normalize: true int8_mask: true random_seed: 0 temperature: 0.5 top_p: 0.65 inference: true max_tokens: 999999999 stream: true quantization: - method: int8 value: 100 - method: int4 value: 100 merge_method: linear base_model: huihui-ai/Llama-3.2-1B-Instruct-abliterated weight: 1 density: 0.9 gamma: 0.01 normalize: true int8_mask: true random_seed: 0 temperature: 0.5 top_p: 0.65 inference: true max_tokens: 999999999 stream: true quantization: - method: int8 value: 100 - method: int4 value: 100 parameters: weight: 1 density: 0.9 gamma: 0.01 normalize: true int8_mask: true random_seed: 0 temperature: 0.5 top_p: 0.65 inference: true max_tokens: 999999999 stream: true quantization: - method: int8 value: 100 - method: int4 value: 100 dtype: float16