ekurtic commited on
Commit
e46ba8a
·
verified ·
1 Parent(s): a9f4ca3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. config.json +94 -0
  3. configuration_deepseek.py +212 -0
  4. generation_config.json +6 -0
  5. model-1-of-63.safetensors +3 -0
  6. model-10-of-63.safetensors +3 -0
  7. model-11-of-63.safetensors +3 -0
  8. model-12-of-63.safetensors +3 -0
  9. model-13-of-63.safetensors +3 -0
  10. model-14-of-63.safetensors +3 -0
  11. model-15-of-63.safetensors +3 -0
  12. model-16-of-63.safetensors +3 -0
  13. model-17-of-63.safetensors +3 -0
  14. model-18-of-63.safetensors +3 -0
  15. model-19-of-63.safetensors +3 -0
  16. model-2-of-63.safetensors +3 -0
  17. model-20-of-63.safetensors +3 -0
  18. model-21-of-63.safetensors +3 -0
  19. model-22-of-63.safetensors +3 -0
  20. model-23-of-63.safetensors +3 -0
  21. model-24-of-63.safetensors +3 -0
  22. model-25-of-63.safetensors +3 -0
  23. model-26-of-63.safetensors +3 -0
  24. model-27-of-63.safetensors +3 -0
  25. model-28-of-63.safetensors +3 -0
  26. model-29-of-63.safetensors +3 -0
  27. model-3-of-63.safetensors +3 -0
  28. model-30-of-63.safetensors +3 -0
  29. model-31-of-63.safetensors +3 -0
  30. model-32-of-63.safetensors +3 -0
  31. model-33-of-63.safetensors +3 -0
  32. model-34-of-63.safetensors +3 -0
  33. model-35-of-63.safetensors +3 -0
  34. model-36-of-63.safetensors +3 -0
  35. model-37-of-63.safetensors +3 -0
  36. model-38-of-63.safetensors +3 -0
  37. model-39-of-63.safetensors +3 -0
  38. model-4-of-63.safetensors +3 -0
  39. model-40-of-63.safetensors +3 -0
  40. model-41-of-63.safetensors +3 -0
  41. model-42-of-63.safetensors +3 -0
  42. model-43-of-63.safetensors +3 -0
  43. model-44-of-63.safetensors +3 -0
  44. model-45-of-63.safetensors +3 -0
  45. model-46-of-63.safetensors +3 -0
  46. model-47-of-63.safetensors +3 -0
  47. model-48-of-63.safetensors +3 -0
  48. model-49-of-63.safetensors +3 -0
  49. model-5-of-63.safetensors +3 -0
  50. model-50-of-63.safetensors +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.safetensors.index.json filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DeepseekV3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_deepseek.DeepseekV3Config",
9
+ "AutoModel": "modeling_deepseek.DeepseekV3Model",
10
+ "AutoModelForCausalLM": "modeling_deepseek.DeepseekV3ForCausalLM"
11
+ },
12
+ "aux_loss_alpha": 0.001,
13
+ "bos_token_id": 163584,
14
+ "eos_token_id": 163585,
15
+ "ep_size": 1,
16
+ "first_k_dense_replace": 1,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 7168,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 18432,
21
+ "kv_lora_rank": 512,
22
+ "max_position_embeddings": 131072,
23
+ "model_type": "deepseek_v3",
24
+ "moe_intermediate_size": 2048,
25
+ "moe_layer_freq": 1,
26
+ "n_group": 1,
27
+ "n_routed_experts": 384,
28
+ "n_shared_experts": 1,
29
+ "norm_topk_prob": true,
30
+ "num_attention_heads": 64,
31
+ "num_experts_per_tok": 8,
32
+ "num_hidden_layers": 61,
33
+ "num_key_value_heads": 64,
34
+ "num_nextn_predict_layers": 0,
35
+ "pretraining_tp": 1,
36
+ "q_lora_rank": 1536,
37
+ "qk_nope_head_dim": 128,
38
+ "qk_rope_head_dim": 64,
39
+ "quantization_config": {
40
+ "config_groups": {
41
+ "group_0": {
42
+ "input_activations": null,
43
+ "output_activations": null,
44
+ "targets": [
45
+ "Linear"
46
+ ],
47
+ "weights": {
48
+ "actorder": null,
49
+ "block_structure": null,
50
+ "dynamic": false,
51
+ "group_size": 128,
52
+ "num_bits": 4,
53
+ "observer": "minmax",
54
+ "observer_kwargs": {},
55
+ "strategy": "group",
56
+ "symmetric": true,
57
+ "type": "int"
58
+ }
59
+ }
60
+ },
61
+ "format": "pack-quantized",
62
+ "ignore": [
63
+ "lm_head",
64
+ "re:.*self_attn.*",
65
+ "re:.*shared_experts.*",
66
+ "re:.*mlp\\.(gate|up|gate_up|down)_proj.*"
67
+ ],
68
+ "kv_cache_scheme": null,
69
+ "quant_method": "compressed-tensors",
70
+ "quantization_status": "compressed"
71
+ },
72
+ "rms_norm_eps": 1e-06,
73
+ "rope_scaling": {
74
+ "beta_fast": 1.0,
75
+ "beta_slow": 1.0,
76
+ "factor": 32.0,
77
+ "mscale": 1.0,
78
+ "mscale_all_dim": 1.0,
79
+ "original_max_position_embeddings": 4096,
80
+ "type": "yarn"
81
+ },
82
+ "rope_theta": 50000.0,
83
+ "routed_scaling_factor": 2.827,
84
+ "scoring_func": "sigmoid",
85
+ "seq_aux": true,
86
+ "tie_word_embeddings": false,
87
+ "topk_group": 1,
88
+ "topk_method": "noaux_tc",
89
+ "torch_dtype": "bfloat16",
90
+ "transformers_version": "4.50.0",
91
+ "use_cache": true,
92
+ "v_head_dim": 128,
93
+ "vocab_size": 163840
94
+ }
configuration_deepseek.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copy from https://huggingface.co/deepseek-ai/DeepSeek-V3/blob/main/configuration_deepseek.py
2
+
3
+ from transformers.configuration_utils import PretrainedConfig
4
+ from transformers.utils import logging
5
+
6
+ logger = logging.get_logger(__name__)
7
+
8
+ DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
9
+ class DeepseekV3Config(PretrainedConfig):
10
+ r"""
11
+ This is the configuration class to store the configuration of a [`DeepseekV3Model`]. It is used to instantiate an DeepSeek
12
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
13
+ defaults will yield a similar configuration to that of the DeepSeek-V3.
14
+
15
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
16
+ documentation from [`PretrainedConfig`] for more information.
17
+
18
+
19
+ Args:
20
+ vocab_size (`int`, *optional*, defaults to 129280):
21
+ Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
22
+ `inputs_ids` passed when calling [`DeepseekV3Model`]
23
+ hidden_size (`int`, *optional*, defaults to 4096):
24
+ Dimension of the hidden representations.
25
+ intermediate_size (`int`, *optional*, defaults to 11008):
26
+ Dimension of the MLP representations.
27
+ moe_intermediate_size (`int`, *optional*, defaults to 1407):
28
+ Dimension of the MoE representations.
29
+ num_hidden_layers (`int`, *optional*, defaults to 32):
30
+ Number of hidden layers in the Transformer decoder.
31
+ num_nextn_predict_layers (`int`, *optional*, defaults to 1):
32
+ Number of nextn predict layers in the DeepSeekV3 Model.
33
+ num_attention_heads (`int`, *optional*, defaults to 32):
34
+ Number of attention heads for each attention layer in the Transformer decoder.
35
+ n_shared_experts (`int`, *optional*, defaults to None):
36
+ Number of shared experts, None means dense model.
37
+ n_routed_experts (`int`, *optional*, defaults to None):
38
+ Number of routed experts, None means dense model.
39
+ routed_scaling_factor (`float`, *optional*, defaults to 1.0):
40
+ Scaling factor or routed experts.
41
+ topk_method (`str`, *optional*, defaults to `gready`):
42
+ Topk method used in routed gate.
43
+ n_group (`int`, *optional*, defaults to None):
44
+ Number of groups for routed experts.
45
+ topk_group (`int`, *optional*, defaults to None):
46
+ Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
47
+ num_experts_per_tok (`int`, *optional*, defaults to None):
48
+ Number of selected experts, None means dense model.
49
+ moe_layer_freq (`int`, *optional*, defaults to 1):
50
+ The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
51
+ first_k_dense_replace (`int`, *optional*, defaults to 0):
52
+ Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
53
+ \--k dense layers--/
54
+ norm_topk_prob (`bool`, *optional*, defaults to False):
55
+ Whether to normalize the weights of the routed experts.
56
+ scoring_func (`str`, *optional*, defaults to 'softmax'):
57
+ Method of computing expert weights.
58
+ aux_loss_alpha (`float`, *optional*, defaults to 0.001):
59
+ Auxiliary loss weight coefficient.
60
+ seq_aux = (`bool`, *optional*, defaults to True):
61
+ Whether to compute the auxiliary loss for each individual sample.
62
+ num_key_value_heads (`int`, *optional*):
63
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
64
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
65
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
66
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
67
+ by meanpooling all the original heads within that group. For more details checkout [this
68
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
69
+ `num_attention_heads`.
70
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
71
+ The non-linear activation function (function or string) in the decoder.
72
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
73
+ The maximum sequence length that this model might ever be used with.
74
+ initializer_range (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
77
+ The epsilon used by the rms normalization layers.
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
80
+ relevant if `config.is_decoder=True`.
81
+ pad_token_id (`int`, *optional*):
82
+ Padding token id.
83
+ bos_token_id (`int`, *optional*, defaults to 1):
84
+ Beginning of stream token id.
85
+ eos_token_id (`int`, *optional*, defaults to 2):
86
+ End of stream token id.
87
+ pretraining_tp (`int`, *optional*, defaults to 1):
88
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
89
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
90
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
91
+ issue](https://github.com/pytorch/pytorch/issues/76232).
92
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
93
+ Whether to tie weight embeddings
94
+ rope_theta (`float`, *optional*, defaults to 10000.0):
95
+ The base period of the RoPE embeddings.
96
+ rope_scaling (`Dict`, *optional*):
97
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
98
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
99
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
100
+ `max_position_embeddings` to the expected new maximum.
101
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
102
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
103
+ attention_dropout (`float`, *optional*, defaults to 0.0):
104
+ The dropout ratio for the attention probabilities.
105
+
106
+ ```python
107
+ >>> from transformers import DeepseekV3Model, DeepseekV3Config
108
+
109
+ >>> # Initializing a Deepseek-V3 style configuration
110
+ >>> configuration = DeepseekV3Config()
111
+
112
+ >>> # Accessing the model configuration
113
+ >>> configuration = model.config
114
+ ```"""
115
+
116
+ model_type = "deepseek_v3"
117
+ keys_to_ignore_at_inference = ["past_key_values"]
118
+
119
+ def __init__(
120
+ self,
121
+ vocab_size=129280,
122
+ hidden_size=7168,
123
+ intermediate_size=18432,
124
+ moe_intermediate_size = 2048,
125
+ num_hidden_layers=61,
126
+ num_nextn_predict_layers=1,
127
+ num_attention_heads=128,
128
+ num_key_value_heads=128,
129
+ n_shared_experts = 1,
130
+ n_routed_experts = 256,
131
+ ep_size = 1,
132
+ routed_scaling_factor = 2.5,
133
+ kv_lora_rank = 512,
134
+ q_lora_rank = 1536,
135
+ qk_rope_head_dim = 64,
136
+ v_head_dim = 128,
137
+ qk_nope_head_dim = 128,
138
+ topk_method = 'noaux_tc',
139
+ n_group = 8,
140
+ topk_group = 4,
141
+ num_experts_per_tok = 8,
142
+ moe_layer_freq = 1,
143
+ first_k_dense_replace = 3,
144
+ norm_topk_prob = True,
145
+ scoring_func = 'sigmoid',
146
+ aux_loss_alpha = 0.001,
147
+ seq_aux = True,
148
+ hidden_act="silu",
149
+ max_position_embeddings=4096,
150
+ initializer_range=0.02,
151
+ rms_norm_eps=1e-6,
152
+ use_cache=True,
153
+ pad_token_id=None,
154
+ bos_token_id=0,
155
+ eos_token_id=1,
156
+ pretraining_tp=1,
157
+ tie_word_embeddings=False,
158
+ rope_theta=10000.0,
159
+ rope_scaling=None,
160
+ attention_bias=False,
161
+ attention_dropout=0.0,
162
+ **kwargs,
163
+ ):
164
+ self.vocab_size = vocab_size
165
+ self.max_position_embeddings = max_position_embeddings
166
+ self.hidden_size = hidden_size
167
+ self.intermediate_size = intermediate_size
168
+ self.moe_intermediate_size = moe_intermediate_size
169
+ self.num_hidden_layers = num_hidden_layers
170
+ self.num_nextn_predict_layers = num_nextn_predict_layers
171
+ self.num_attention_heads = num_attention_heads
172
+ self.n_shared_experts = n_shared_experts
173
+ self.n_routed_experts = n_routed_experts
174
+ self.ep_size = ep_size
175
+ self.routed_scaling_factor = routed_scaling_factor
176
+ self.kv_lora_rank = kv_lora_rank
177
+ self.q_lora_rank = q_lora_rank
178
+ self.qk_rope_head_dim = qk_rope_head_dim
179
+ self.v_head_dim = v_head_dim
180
+ self.qk_nope_head_dim = qk_nope_head_dim
181
+ self.topk_method = topk_method
182
+ self.n_group = n_group
183
+ self.topk_group = topk_group
184
+ self.num_experts_per_tok = num_experts_per_tok
185
+ self.moe_layer_freq = moe_layer_freq
186
+ self.first_k_dense_replace = first_k_dense_replace
187
+ self.norm_topk_prob = norm_topk_prob
188
+ self.scoring_func = scoring_func
189
+ self.aux_loss_alpha = aux_loss_alpha
190
+ self.seq_aux = seq_aux
191
+ # for backward compatibility
192
+ if num_key_value_heads is None:
193
+ num_key_value_heads = num_attention_heads
194
+
195
+ self.num_key_value_heads = num_key_value_heads
196
+ self.hidden_act = hidden_act
197
+ self.initializer_range = initializer_range
198
+ self.rms_norm_eps = rms_norm_eps
199
+ self.pretraining_tp = pretraining_tp
200
+ self.use_cache = use_cache
201
+ self.rope_theta = rope_theta
202
+ self.rope_scaling = rope_scaling
203
+ self.attention_bias = attention_bias
204
+ self.attention_dropout = attention_dropout
205
+
206
+ super().__init__(
207
+ pad_token_id=pad_token_id,
208
+ bos_token_id=bos_token_id,
209
+ eos_token_id=eos_token_id,
210
+ tie_word_embeddings=tie_word_embeddings,
211
+ **kwargs,
212
+ )
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 163584,
4
+ "eos_token_id": 163585,
5
+ "transformers_version": "4.50.0"
6
+ }
model-1-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dbbc34aa40d0d96ff15958a63bbdbc4be4664bf5dcb44041520b01229010d28
3
+ size 2348810352
model-10-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b24391f0a652711dabc66a0e318ee1668f51ed13e35596b40ba2726183e74667
3
+ size 9016268032
model-11-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56517697ef01dda805d0d0a88c742e2037feeb0b3d15d00a85583ab1baf352bd
3
+ size 9016268032
model-12-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a7af313d7641aa43c7c477170e184ff2ef72d1567026a5a732bc49ae8bf7e13
3
+ size 9016271504
model-13-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e34af38d1dbdb9aa8cbfbf96aaf2920090268b9f5579ad74669d6e4e6c586e9
3
+ size 9016271504
model-14-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef160316b9059d7e2e781e5106840629056952af143ec3aff73bea455a08277f
3
+ size 9016271504
model-15-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce588d7032fb27b333e5b8e51632a6b33ddbc607104165114d1492c6ff2c964c
3
+ size 9016271504
model-16-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e685a812edad6395134ef2b9ab525e7c506757b7a63d2c25a71b38dffe3f96d
3
+ size 9016271504
model-17-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:729037a7ffbb74268faac87105b65744e969e4fc61d0ee29ba16d7169ba40667
3
+ size 9016271504
model-18-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c8cd2bde23d3c822f3deb3e38793463636000ede5c41578a9b3e7d17b57b85c
3
+ size 9016271504
model-19-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64ede8a18033f7a7f15c8ebeb511e9a381bf076e796f3173b77496346a32bd8f
3
+ size 9016271504
model-2-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0b471fc05d5d8a1bb6ef509f7fe00dd02c6dc36e79196b33485dc03b751d000
3
+ size 995001936
model-20-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cf7cd36bbef3db62597f060c8257762afe4da8a488f4b61b23f6d6e81ec5acb
3
+ size 9016271504
model-21-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7218c7fb9a38a14ce6c4887655c5704cdb9dd92957edd9e612b151db5920053
3
+ size 9016271504
model-22-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0343a640289f23da04420d3c015d6eb41f154a6c18e53c94e3ac399750974f8
3
+ size 9016271504
model-23-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90407c324d8b776348818debaf474ed0c8b40472d2075ad1799d515fac4c9f35
3
+ size 9016271504
model-24-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b51492daace6bb7513f9d51e2afe393aa419de94a3740eaee4258fc878a9c1c8
3
+ size 9016271504
model-25-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03ca23f1b322cf930dc897c62cc24cde8fa77ef887ac4b771bd8b562159ac4f9
3
+ size 9016271504
model-26-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09a3a1a5a19a84888e69307516382d78ca7f14f0556fe1bd709e682aea64e3de
3
+ size 9016271504
model-27-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ab21ee5d5e503320be9edf7562ebb878ba8b339829ef36c9b390f349afedfb5
3
+ size 9016271504
model-28-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a7134dbad41e2421984acddd39ff5fd0e262c0950a47f1f0b2f51422f947150
3
+ size 9016271504
model-29-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11bd27fe10225127626dd52f0614f36da46e501c4241b71a9186181e161e6e7d
3
+ size 9016271504
model-3-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71b1fb3457d1124a238ab8145916ec71a31a8f8e7fca8625b968b3d3e855f223
3
+ size 9016268032
model-30-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9073d2e240c7df36f11742877acf2d402901ea1890b744af32ecf407e14f5b45
3
+ size 9016271504
model-31-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfecc1cd9a44a247cb217972dcea54d46595b7972ee4235fd60d93d4993c13da
3
+ size 9016271504
model-32-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e03a119952ea32da3ed0802d57a5ca756e5577295b5e3ab678ade6e20f19357d
3
+ size 9016271504
model-33-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e6dc436b2188c568650b03a47486cdbb145eec25479d03fc64a66f2d4edda8f
3
+ size 9016271504
model-34-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:546d07878ac1f643c0583bc28274cdd53b06f7608968089aa351811de01997df
3
+ size 9016271504
model-35-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50cf53a7ed54a591b3bf698f1981c4a2b9d62f06a77a7b6de45318fd94013423
3
+ size 9016271504
model-36-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0205c7bde79b2cf8072f094f06224190b9399d5e1cce054f4b8211afd1744947
3
+ size 9016271504
model-37-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d43c3fe62d268d57a9ccb075834b6f4ef63aa0161ac6f73b35bf32ab063570de
3
+ size 9016271504
model-38-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b186fbabf24dc89493fcc13b8989cfc7cb439c4b82190e0bf11494b8bda72446
3
+ size 9016271504
model-39-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e31e18bfc1ce84f3f63eac79922df4380001d4b570ea0999f914c1e5cc107e5f
3
+ size 9016271504
model-4-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:552db983ebd51f0fb0f48e114044edbf61667458a4c7fc58e4c297ca0c4ec75d
3
+ size 9016268032
model-40-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82d4df4d3c5d7fc8d0677a2d14c899894f6ceb2f37daf2fe796b2b8f016ac0bb
3
+ size 9016271504
model-41-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d410656393d4ecd1e62576921b72830e376b6c96c1585c17e75dd55e0ae9dab8
3
+ size 9016271504
model-42-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:195d9e9cb2a6bb53c00700da4719ecb976a3d7a338296581a56e505411fc4bee
3
+ size 9016271504
model-43-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bf20111d4ae5e4d1b6135af4fdf93b590a6f8c76a2607450bbd054979a216dd
3
+ size 9016271504
model-44-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d55ead126a3df01dda4e41a3d0883df69db85a65681e71763056dc4bfa894728
3
+ size 9016271504
model-45-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31fe4cc466d67fc32d4e1c0110ca73e9d65d7dd53e1e44885ec0d88e724e63ed
3
+ size 9016271504
model-46-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cae4067b52b00fcec59fa98338422dd7401cf3abc49382f81bb13968ca3a3120
3
+ size 9016271504
model-47-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7259f592d53ec64752e31884ab3f68a806e7b01ecff31f7730b72e8fa16c5d0d
3
+ size 9016271504
model-48-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0810585151b5b97407606b06b42637ecc2bb6d7ad0c3734f4463609563939b39
3
+ size 9016271504
model-49-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9014f9d6f23b1a4f0a1db60fc7960a903e9ca23d14df7bacad2abc16bd1d392
3
+ size 9016271504
model-5-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ddbed3e94d58f0d760303b1b79ed2b54054c592338dc0bf789722160f6cc8b3
3
+ size 9016268032
model-50-of-63.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c19d38a6c4a6cece2cb9bc9ed054eb7365499e3deb1294df126854e0af869006
3
+ size 9016271504