|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""DDLLaMA model configuration""" |
|
|
|
from transformers.configuration_utils import PretrainedConfig |
|
from transformers.modeling_rope_utils import rope_config_validation |
|
|
|
class DDLlamaConfig(PretrainedConfig): |
|
model_type = "ddllama" |
|
keys_to_ignore_at_inference = ["past_key_values"] |
|
base_model_tp_plan = { |
|
"layers.*.self_attn.q_proj": "colwise", |
|
"layers.*.self_attn.k_proj": "colwise", |
|
"layers.*.self_attn.v_proj": "colwise", |
|
"layers.*.self_attn.o_proj": "rowwise", |
|
"layers.*.mlp.gate_proj": "colwise", |
|
"layers.*.mlp.up_proj": "colwise", |
|
"layers.*.mlp.down_proj": "rowwise", |
|
} |
|
|
|
def __init__( |
|
self, |
|
vocab_size=128256, |
|
hidden_size=4096, |
|
intermediate_size=14336, |
|
num_hidden_layers=32, |
|
num_attention_heads=32, |
|
num_key_value_heads=8, |
|
router_layers=[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31], |
|
router_reduction_factor=16, |
|
proj_reduction_factor=16, |
|
hidden_act="silu", |
|
max_position_embeddings=8192, |
|
initializer_range=0.02, |
|
rms_norm_eps=1e-5, |
|
use_cache=True, |
|
pad_token_id=None, |
|
bos_token_id=128000, |
|
eos_token_id=128009, |
|
pretraining_tp=1, |
|
tie_word_embeddings=False, |
|
rope_theta=500000.0, |
|
rope_scaling=None, |
|
attention_bias=False, |
|
attention_dropout=0.0, |
|
mlp_bias=False, |
|
head_dim=128, |
|
torch_dtype="bfloat16", |
|
**kwargs, |
|
): |
|
self.vocab_size = vocab_size |
|
self.hidden_size = hidden_size |
|
self.intermediate_size = intermediate_size |
|
self.num_hidden_layers = num_hidden_layers |
|
self.num_attention_heads = num_attention_heads |
|
self.num_key_value_heads = num_key_value_heads |
|
self.router_layers = router_layers |
|
self.router_reduction_factor = router_reduction_factor |
|
self.proj_reduction_factor = proj_reduction_factor |
|
self.hidden_act = hidden_act |
|
self.max_position_embeddings = max_position_embeddings |
|
self.initializer_range = initializer_range |
|
self.rms_norm_eps = rms_norm_eps |
|
self.use_cache = use_cache |
|
self.pretraining_tp = pretraining_tp |
|
self.rope_theta = rope_theta |
|
self.rope_scaling = rope_scaling |
|
self.attention_bias = attention_bias |
|
self.attention_dropout = attention_dropout |
|
self.mlp_bias = mlp_bias |
|
self.head_dim = head_dim |
|
self.torch_dtype = torch_dtype |
|
|
|
|
|
|
|
rope_config_validation(self) |
|
|
|
super().__init__( |
|
pad_token_id=pad_token_id, |
|
bos_token_id=bos_token_id, |
|
eos_token_id=eos_token_id, |
|
tie_word_embeddings=tie_word_embeddings, |
|
**kwargs, |
|
) |