File size: 1,976 Bytes
7022fe8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
slices:
  - sources:
      - model: lilmeaty/testing_semifinal
        layer_range: [15, 16]
        parameters:
          weight: 0.3
          density: 0.2
          gamma: 0.005
          normalize: true
          int8_mask: true
          random_seed: 42
          temperature: 0.5
          top_p: 0.65
          inference: true
          max_tokens: 300
          stream: true
          quantization:
            - method: int8
              value: 60
            - method: int4
              value: 40
merge_method: passthrough
base_model: huihui-ai/Llama-3.2-1B-Instruct-abliterated
dtype: float16
compression:
  pruning: 
    enabled: true
    sparsity: 0.95
  distillation:
    enabled: true
    temperature: 0.7
    model_type: "distilled"
  quantization:
    enabled: true
    methods:
      - int8
      - int4
inference_optimizations:
  caching:
    enabled: true
    cache_size: 1000
  batching:
    enabled: true
    batch_size: 8
  parallelism:
    enabled: true
    workers: 4
  asynchronous:
    enabled: true
    max_concurrent_tasks: 5
  tensor_cores:
    enabled: true
  gpu:
    enabled: true
    device: cuda
  model_sharding:
    enabled: true
    shards: 2
  memory_optimization:
    enabled: true
    strategy: "offload"
  tensor_compression:
    enabled: true
    method: "tensor_factorization"
mixture_of_experts:
  enabled: true
  num_experts: 4
  gating_strategy: top_k
  top_k: 2
  load_balancing:
    enabled: true
    balance_factor: 0.5
  expert_capacity:
    max_tokens_per_expert: 512
  dynamic_routing:
    enabled: true
    routing_threshold: 0.1
  routing_optimizations:
    enabled: true
    cache_routing: true
model_sparsity:
  enabled: true
  sparsity_pattern: "block"
  mask_method: "random"
  pruning_factor: 0.98
auto_tuning:
  enabled: true
  batch_size_adaptation:
    enabled: true
    factor: 0.8
    max_batch_size: 32
  temperature_scheduling:
    enabled: true
    start_temp: 1.0
    end_temp: 0.5
    schedule: "linear"