leeloolee commited on
Commit
8bca016
1 Parent(s): 966662a

Create configuration_boost.py

Browse files
Files changed (1) hide show
  1. configuration_boost.py +75 -0
configuration_boost.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Boost model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class BoostConfig(PretrainedConfig):
25
+
26
+ model_type = "boost"
27
+ keys_to_ignore_at_inference = ["past_key_values"]
28
+
29
+ def __init__(
30
+ self,
31
+ vocab_size=151936,
32
+ hidden_size=4096,
33
+ intermediate_size=22016,
34
+ num_hidden_layers=32,
35
+ num_attention_heads=32,
36
+ num_key_value_heads=32,
37
+ hidden_act="silu",
38
+ max_position_embeddings=32768,
39
+ initializer_range=0.02,
40
+ rms_norm_eps=1e-6,
41
+ use_cache=True,
42
+ tie_word_embeddings=False,
43
+ rope_theta=10000.0,
44
+ use_sliding_window=False,
45
+ sliding_window=4096,
46
+ max_window_layers=28,
47
+ attention_dropout=0.0,
48
+ **kwargs,
49
+ ):
50
+ self.vocab_size = vocab_size
51
+ self.max_position_embeddings = max_position_embeddings
52
+ self.hidden_size = hidden_size
53
+ self.intermediate_size = intermediate_size
54
+ self.num_hidden_layers = num_hidden_layers
55
+ self.num_attention_heads = num_attention_heads
56
+ self.use_sliding_window = use_sliding_window
57
+ self.sliding_window = sliding_window
58
+ self.max_window_layers = max_window_layers
59
+
60
+ # for backward compatibility
61
+ if num_key_value_heads is None:
62
+ num_key_value_heads = num_attention_heads
63
+
64
+ self.num_key_value_heads = num_key_value_heads
65
+ self.hidden_act = hidden_act
66
+ self.initializer_range = initializer_range
67
+ self.rms_norm_eps = rms_norm_eps
68
+ self.use_cache = use_cache
69
+ self.rope_theta = rope_theta
70
+ self.attention_dropout = attention_dropout
71
+
72
+ super().__init__(
73
+ tie_word_embeddings=tie_word_embeddings,
74
+ **kwargs,
75
+ )