{ "architectures": [ "Qwen2ForCausalLM" ], "attention_dropout": 0.0, "attn_mechanism": "splash", "axis_dims": [ 1, 8, 1, 2 ], "axis_names": [ "dp", "fsdp", "tp", "sp" ], "backend": null, "bits": null, "blocksize_b": 1, "blocksize_k": 128, "blocksize_q": 128, "bos_token_id": 151643, "dcn_axis_dims": null, "easy_method": "train", "embd_pdrop": 0.0, "eos_token_id": 151643, "fcm_max_ratio": 0.0, "fcm_min_ratio": 0.0, "flash_attention_backward_pass_impl": "triton", "freq_max_position_embeddings": 4096, "gradient_checkpointing": "", "hardware_abstraction": false, "head_dim": 128, "hidden_act": "silu", "hidden_size": 5120, "initializer_range": 0.02, "intermediate_size": 13824, "kv_cache_quantization_blocksize": 64, "kv_cache_quantization_method": "None", "kv_cache_sharding_sequence_axis_name": "sp", "mask_max_position_embeddings": 4096, "max_position_embeddings": 131072, "max_window_layers": 48, "model_type": "qwen2", "num_attention_heads": 40, "num_hidden_layers": 48, "num_key_value_heads": 8, "number_rep_kv": 1, "pallas_k_block_size": 128, "pallas_m_block_size": 128, "pallas_n_block_size": 128, "partition_axis": [ [ "fsdp", "dp" ], "sp", "sp", "tp", "sp", "tp", null, null, null, null, "tp", "sp", null ], "platform": "jax", "pretraining_tp": 1, "quantization_blocksize": 64, "quantization_method": "None", "quantization_pattern": ".*", "resid_pdrop": 0.0, "rms_norm_eps": 1e-05, "rope_scaling": null, "rope_theta": 10000.0, "scan_attention_layers": false, "scan_layers": true, "scan_mlp_chunk_size": 1024, "scan_ring_attention": true, "sequence_axis_name": "sp", "shard_attention_computation": true, "sliding_window": null, "tie_word_embeddings": false, "torch_dtype": "bfloat16", "transformers_version": "4.50.3", "use_cache": true, "use_scan_mlp": false, "use_sharded_kv_caching": false, "use_sharding_constraint": false, "use_sliding_window": false, "vocab_size": 151667 }