|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | from transformers import PretrainedConfig | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class Ernie4_5_Config(PretrainedConfig): | 
					
						
						|  | """ | 
					
						
						|  | Configuration class. | 
					
						
						|  |  | 
					
						
						|  | This class stores the configuration of an Ernie model, defining the model architecture. | 
					
						
						|  | It inherits from PretrainedConfig and can be used to control model outputs. | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | model_type = "ernie4_5" | 
					
						
						|  | keys_to_ignore_at_inference = ["past_key_values"] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | base_model_tp_plan = { | 
					
						
						|  | "layers.*.self_attn.q_proj": "colwise", | 
					
						
						|  | "layers.*.self_attn.k_proj": "colwise", | 
					
						
						|  | "layers.*.self_attn.v_proj": "colwise", | 
					
						
						|  | "layers.*.self_attn.o_proj": "rowwise", | 
					
						
						|  | "layers.*.mlp.gate_proj": "colwise", | 
					
						
						|  | "layers.*.mlp.up_proj": "colwise", | 
					
						
						|  | "layers.*.mlp.down_proj": "rowwise", | 
					
						
						|  | } | 
					
						
						|  | base_model_pp_plan = { | 
					
						
						|  | "embed_tokens": (["input_ids"], ["inputs_embeds"]), | 
					
						
						|  | "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), | 
					
						
						|  | "norm": (["hidden_states"], ["hidden_states"]), | 
					
						
						|  | } | 
					
						
						|  |  | 
					
						
						|  | def __init__( | 
					
						
						|  | self, | 
					
						
						|  | vocab_size=32000, | 
					
						
						|  | hidden_size=768, | 
					
						
						|  | intermediate_size=11008, | 
					
						
						|  | max_position_embeddings=32768, | 
					
						
						|  | num_hidden_layers=2, | 
					
						
						|  | num_attention_heads=2, | 
					
						
						|  | rms_norm_eps=1e-6, | 
					
						
						|  | use_cache=False, | 
					
						
						|  | use_flash_attention=False, | 
					
						
						|  | pad_token_id=0, | 
					
						
						|  | bos_token_id=1, | 
					
						
						|  | eos_token_id=2, | 
					
						
						|  | use_bias=False, | 
					
						
						|  | rope_theta=10000, | 
					
						
						|  | weight_share_add_bias=True, | 
					
						
						|  | ignored_index=-100, | 
					
						
						|  | attention_probs_dropout_prob=0.0, | 
					
						
						|  | hidden_dropout_prob=0.0, | 
					
						
						|  | compression_ratio: float = 1.0, | 
					
						
						|  | num_key_value_heads=None, | 
					
						
						|  | max_sequence_length=None, | 
					
						
						|  | **kwargs, | 
					
						
						|  | ): | 
					
						
						|  | """ | 
					
						
						|  | Initialize configuration with default or specified parameters. | 
					
						
						|  |  | 
					
						
						|  | Args: | 
					
						
						|  | vocab_size (int): Size of the vocabulary (number of unique tokens) | 
					
						
						|  | hidden_size (int): Dimensionality of the encoder layers and the pooler layer | 
					
						
						|  | intermediate_size (int): Dimensionality of the "intermediate" (feed-forward) layer | 
					
						
						|  | max_position_embeddings (int): Maximum sequence length the model can handle | 
					
						
						|  | num_hidden_layers (int): Number of hidden layers in the Transformer encoder | 
					
						
						|  | num_attention_heads (int): Number of attention heads for each attention layer | 
					
						
						|  | rms_norm_eps (float): The epsilon used by the RMS normalization layers | 
					
						
						|  | use_cache (bool): Whether to use caching for faster generation (decoding) | 
					
						
						|  | use_flash_attention (bool): Whether to use FlashAttention for optimized attention computation | 
					
						
						|  | pad_token_id (int): Token ID used for padding sequences | 
					
						
						|  | bos_token_id (int): Token ID used for beginning-of-sequence | 
					
						
						|  | eos_token_id (int): Token ID used for end-of-sequence | 
					
						
						|  | use_bias (bool): Whether to use bias terms in linear layers | 
					
						
						|  | rope_theta (float): The base period of the RoPE embeddings | 
					
						
						|  | weight_share_add_bias (bool): Whether to share bias weights in certain layers | 
					
						
						|  | ignored_index (int): Target value that is ignored during loss computation | 
					
						
						|  | attention_probs_dropout_prob (float): Dropout probability for attention weights | 
					
						
						|  | hidden_dropout_prob (float): Dropout probability for hidden layers | 
					
						
						|  | compression_ratio (float): Ratio for KV cache compression (1.0 = no compression) | 
					
						
						|  | num_key_value_heads (int): Number of key/value heads (for Grouped Query Attention) | 
					
						
						|  | max_sequence_length (int): Maximum sequence length for positional embeddings | 
					
						
						|  | **kwargs: Additional keyword arguments passed to parent class | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if "tie_word_embeddings" not in kwargs: | 
					
						
						|  | kwargs["tie_word_embeddings"] = False | 
					
						
						|  | super().__init__( | 
					
						
						|  | pad_token_id=pad_token_id, | 
					
						
						|  | bos_token_id=bos_token_id, | 
					
						
						|  | eos_token_id=eos_token_id, | 
					
						
						|  | **kwargs, | 
					
						
						|  | ) | 
					
						
						|  | self.vocab_size = vocab_size | 
					
						
						|  | self.hidden_size = hidden_size | 
					
						
						|  | self.intermediate_size = intermediate_size | 
					
						
						|  | self.max_position_embeddings = max_position_embeddings | 
					
						
						|  | self.num_hidden_layers = num_hidden_layers | 
					
						
						|  | self.num_attention_heads = num_attention_heads | 
					
						
						|  | self.rms_norm_eps = rms_norm_eps | 
					
						
						|  | self.use_cache = use_cache | 
					
						
						|  | self.use_flash_attention = use_flash_attention | 
					
						
						|  | self.pad_token_id = pad_token_id | 
					
						
						|  | self.bos_token_id = bos_token_id | 
					
						
						|  | self.eos_token_id = eos_token_id | 
					
						
						|  | self.use_bias = use_bias | 
					
						
						|  | self.weight_share_add_bias = weight_share_add_bias | 
					
						
						|  | self.rope_theta = rope_theta | 
					
						
						|  | self.ignored_index = ignored_index | 
					
						
						|  | self.attention_probs_dropout_prob = attention_probs_dropout_prob | 
					
						
						|  | self.hidden_dropout_prob = hidden_dropout_prob | 
					
						
						|  | self.compression_ratio = compression_ratio | 
					
						
						|  | self.num_key_value_heads = num_key_value_heads | 
					
						
						|  | self.max_sequence_length = max_sequence_length | 
					
						
						|  |  |