attention_logit_softcapping: null | |
attention_scores_scalar: null | |
attn_bias: false | |
bias: false | |
block_size: 131072 | |
final_logit_softcapping: null | |
gelu_approximate: none | |
head_size: 64 | |
hf_config: | |
name: MicroLlamaV2 | |
org: keeeeenw | |
intermediate_size: 5632 | |
lm_head_bias: false | |
mlp_class_name: LLaMAMLP | |
n_embd: 1024 | |
n_expert: 0 | |
n_expert_per_token: 0 | |
n_head: 16 | |
n_layer: 12 | |
n_query_groups: 4 | |
name: micro-llama-300M-v2 | |
norm_class_name: RMSNorm | |
norm_eps: 1.0e-05 | |
norm_qk: false | |
padded_vocab_size: 128256 | |
padding_multiple: 512 | |
parallel_residual: false | |
post_attention_norm: false | |
post_mlp_norm: false | |
rope_adjustments: | |
factor: 16.0 | |
high_freq_factor: 4.0 | |
low_freq_factor: 1.0 | |
original_max_seq_len: 8192 | |
rope_base: 500000 | |
rope_condense_ratio: 1 | |
rotary_percentage: 1.0 | |
scale_embeddings: false | |
shared_attention_norm: false | |
sliding_window_layer_placing: null | |
sliding_window_size: null | |
vocab_size: 128000 | |