xcpan commited on
Commit
ea0a360
·
verified ·
1 Parent(s): dd4c8bd

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/added_tokens.json +6 -0
  2. 22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/config.json +225 -0
  3. 22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/generation_config.json +14 -0
  4. 22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/merges.txt +0 -0
  5. 22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/model.safetensors +3 -0
  6. 22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/special_tokens_map.json +20 -0
  7. 22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer.json +0 -0
  8. 22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer_config.json +53 -0
  9. 22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/trainer_state.json +0 -0
  10. 22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/training_args.bin +3 -0
  11. 22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/vocab.json +0 -0
  12. 23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/added_tokens.json +6 -0
  13. 23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/config.json +225 -0
  14. 23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/generation_config.json +14 -0
  15. 23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/merges.txt +0 -0
  16. 23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/model.safetensors +3 -0
  17. 23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/special_tokens_map.json +20 -0
  18. 23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer.json +0 -0
  19. 23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer_config.json +53 -0
  20. 23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/trainer_state.json +0 -0
  21. 23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/training_args.bin +3 -0
  22. 23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/vocab.json +0 -0
  23. 24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/added_tokens.json +6 -0
  24. 24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/config.json +225 -0
  25. 24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/generation_config.json +14 -0
  26. 24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/merges.txt +0 -0
  27. 24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/model.safetensors +3 -0
  28. 24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/special_tokens_map.json +20 -0
  29. 24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer.json +0 -0
  30. 24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer_config.json +53 -0
  31. 24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/trainer_state.json +0 -0
  32. 24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/training_args.bin +3 -0
  33. 24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/vocab.json +0 -0
  34. 25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/added_tokens.json +6 -0
  35. 25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/config.json +225 -0
  36. 25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/generation_config.json +14 -0
  37. 25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/merges.txt +0 -0
  38. 25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/model.safetensors +3 -0
  39. 25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/special_tokens_map.json +20 -0
  40. 25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer.json +0 -0
  41. 25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer_config.json +53 -0
  42. 25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/trainer_state.json +0 -0
  43. 25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/training_args.bin +3 -0
  44. 25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/vocab.json +0 -0
  45. 26_xp_misc_0.5b_alpha_0.1_head_5e4_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/added_tokens.json +6 -0
  46. 26_xp_misc_0.5b_alpha_0.1_head_5e4_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/config.json +225 -0
  47. 26_xp_misc_0.5b_alpha_0.1_head_5e4_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/generation_config.json +14 -0
  48. 26_xp_misc_0.5b_alpha_0.1_head_5e4_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/merges.txt +0 -0
  49. 26_xp_misc_0.5b_alpha_0.1_head_5e4_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/model.safetensors +3 -0
  50. 26_xp_misc_0.5b_alpha_0.1_head_5e4_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/special_tokens_map.json +20 -0
22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/added_tokens.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "<image>": 151646,
3
+ "<|endoftext|>": 151643,
4
+ "<|im_end|>": 151645,
5
+ "<|im_start|>": 151644
6
+ }
22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/config.json ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/fsx/xichenpan/pllm_pretrained_ckpt/llava-onevision-qwen2-0.5b-ov",
3
+ "add_faster_video": false,
4
+ "add_time_instruction": true,
5
+ "architectures": [
6
+ "LlavaQwenForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 151643,
10
+ "cut3r_loss_weight": 0.01,
11
+ "cut3r_model_path": "/fsx/xichenpan/pllm_pretrained_ckpt/cut3r_224_linear_4.pth",
12
+ "downstream_head_lr": 0.0001,
13
+ "eos_token_id": 151645,
14
+ "faster_token_stride": 10,
15
+ "force_sample": true,
16
+ "head_name": "linear",
17
+ "hidden_act": "silu",
18
+ "hidden_size": 896,
19
+ "ignore_index": -100,
20
+ "image_aspect_ratio": "anyres_max_9",
21
+ "image_crop_resolution": null,
22
+ "image_grid_pinpoints": [
23
+ [
24
+ 384,
25
+ 384
26
+ ],
27
+ [
28
+ 384,
29
+ 768
30
+ ],
31
+ [
32
+ 384,
33
+ 1152
34
+ ],
35
+ [
36
+ 384,
37
+ 1536
38
+ ],
39
+ [
40
+ 384,
41
+ 1920
42
+ ],
43
+ [
44
+ 384,
45
+ 2304
46
+ ],
47
+ [
48
+ 768,
49
+ 384
50
+ ],
51
+ [
52
+ 768,
53
+ 768
54
+ ],
55
+ [
56
+ 768,
57
+ 1152
58
+ ],
59
+ [
60
+ 768,
61
+ 1536
62
+ ],
63
+ [
64
+ 768,
65
+ 1920
66
+ ],
67
+ [
68
+ 768,
69
+ 2304
70
+ ],
71
+ [
72
+ 1152,
73
+ 384
74
+ ],
75
+ [
76
+ 1152,
77
+ 768
78
+ ],
79
+ [
80
+ 1152,
81
+ 1152
82
+ ],
83
+ [
84
+ 1152,
85
+ 1536
86
+ ],
87
+ [
88
+ 1152,
89
+ 1920
90
+ ],
91
+ [
92
+ 1152,
93
+ 2304
94
+ ],
95
+ [
96
+ 1536,
97
+ 384
98
+ ],
99
+ [
100
+ 1536,
101
+ 768
102
+ ],
103
+ [
104
+ 1536,
105
+ 1152
106
+ ],
107
+ [
108
+ 1536,
109
+ 1536
110
+ ],
111
+ [
112
+ 1536,
113
+ 1920
114
+ ],
115
+ [
116
+ 1536,
117
+ 2304
118
+ ],
119
+ [
120
+ 1920,
121
+ 384
122
+ ],
123
+ [
124
+ 1920,
125
+ 768
126
+ ],
127
+ [
128
+ 1920,
129
+ 1152
130
+ ],
131
+ [
132
+ 1920,
133
+ 1536
134
+ ],
135
+ [
136
+ 1920,
137
+ 1920
138
+ ],
139
+ [
140
+ 1920,
141
+ 2304
142
+ ],
143
+ [
144
+ 2304,
145
+ 384
146
+ ],
147
+ [
148
+ 2304,
149
+ 768
150
+ ],
151
+ [
152
+ 2304,
153
+ 1152
154
+ ],
155
+ [
156
+ 2304,
157
+ 1536
158
+ ],
159
+ [
160
+ 2304,
161
+ 1920
162
+ ],
163
+ [
164
+ 2304,
165
+ 2304
166
+ ]
167
+ ],
168
+ "image_split_resolution": null,
169
+ "image_token_index": 151646,
170
+ "initializer_range": 0.02,
171
+ "intermediate_size": 4864,
172
+ "max_position_embeddings": 32768,
173
+ "max_window_layers": 24,
174
+ "mm_hidden_size": 1152,
175
+ "mm_newline_position": "grid",
176
+ "mm_patch_merge_type": "spatial_unpad",
177
+ "mm_projector_lr": null,
178
+ "mm_projector_type": "mlp2x_gelu",
179
+ "mm_resampler_type": null,
180
+ "mm_spatial_pool_mode": "bilinear",
181
+ "mm_spatial_pool_stride": 2,
182
+ "mm_tunable_parts": "mm_vision_tower,mm_mlp_adapter,mm_language_model",
183
+ "mm_use_im_patch_token": false,
184
+ "mm_use_im_start_end": false,
185
+ "mm_vision_select_feature": "patch",
186
+ "mm_vision_select_layer": -2,
187
+ "mm_vision_tower": "google/siglip-so400m-patch14-384",
188
+ "mm_vision_tower_lr": 1e-05,
189
+ "model_type": "llava",
190
+ "num_attention_heads": 14,
191
+ "num_hidden_layers": 1,
192
+ "num_key_value_heads": 2,
193
+ "pos_skipping_range": 4096,
194
+ "projector_hidden_act": "gelu",
195
+ "rms_norm_eps": 1e-06,
196
+ "rope_scaling": null,
197
+ "rope_theta": 1000000.0,
198
+ "sliding_window": 32768,
199
+ "text_config": {
200
+ "model_type": "llama"
201
+ },
202
+ "token_num": 2,
203
+ "tokenizer_model_max_length": 32768,
204
+ "tokenizer_padding_side": "right",
205
+ "torch_dtype": "bfloat16",
206
+ "transformers_version": "4.40.0.dev0",
207
+ "use_cache": true,
208
+ "use_mm_proj": true,
209
+ "use_pos_skipping": false,
210
+ "use_sliding_window": false,
211
+ "vision_config": {
212
+ "hidden_size": 1024,
213
+ "image_size": 336,
214
+ "intermediate_size": 4096,
215
+ "model_type": "clip_vision_model",
216
+ "num_attention_heads": 16,
217
+ "num_hidden_layers": 24,
218
+ "patch_size": 14,
219
+ "projection_dim": 768,
220
+ "vocab_size": 32000
221
+ },
222
+ "vision_feature_layer": -2,
223
+ "vision_feature_select_strategy": "default",
224
+ "vision_tower_pretrained": null
225
+ }
22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.40.0.dev0"
14
+ }
22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:353568829cbd13c1f27ec19a3f2309699ee6241a85f4f58cb6d4da40bd8dfe83
3
+ size 1449042030
22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<image>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "additional_special_tokens": [
38
+ "<|im_start|>",
39
+ "<|im_end|>"
40
+ ],
41
+ "bos_token": null,
42
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
43
+ "clean_up_tokenization_spaces": false,
44
+ "eos_token": "<|im_end|>",
45
+ "errors": "replace",
46
+ "model_max_length": 32768,
47
+ "pad_token": "<|endoftext|>",
48
+ "padding_side": "right",
49
+ "processor_class": "LlavaProcessor",
50
+ "split_special_tokens": false,
51
+ "tokenizer_class": "Qwen2Tokenizer",
52
+ "unk_token": null
53
+ }
22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:554823b9bf4c851c37396710233ac8d77baa96adbf492c35b1d5ed5d00d47391
3
+ size 7096
22_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_1e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/added_tokens.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "<image>": 151646,
3
+ "<|endoftext|>": 151643,
4
+ "<|im_end|>": 151645,
5
+ "<|im_start|>": 151644
6
+ }
23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/config.json ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/fsx/xichenpan/pllm_pretrained_ckpt/llava-onevision-qwen2-0.5b-ov",
3
+ "add_faster_video": false,
4
+ "add_time_instruction": true,
5
+ "architectures": [
6
+ "LlavaQwenForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 151643,
10
+ "cut3r_loss_weight": 0.01,
11
+ "cut3r_model_path": "/fsx/xichenpan/pllm_pretrained_ckpt/cut3r_224_linear_4.pth",
12
+ "downstream_head_lr": 0.0001,
13
+ "eos_token_id": 151645,
14
+ "faster_token_stride": 10,
15
+ "force_sample": true,
16
+ "head_name": "linear",
17
+ "hidden_act": "silu",
18
+ "hidden_size": 896,
19
+ "ignore_index": -100,
20
+ "image_aspect_ratio": "anyres_max_9",
21
+ "image_crop_resolution": null,
22
+ "image_grid_pinpoints": [
23
+ [
24
+ 384,
25
+ 384
26
+ ],
27
+ [
28
+ 384,
29
+ 768
30
+ ],
31
+ [
32
+ 384,
33
+ 1152
34
+ ],
35
+ [
36
+ 384,
37
+ 1536
38
+ ],
39
+ [
40
+ 384,
41
+ 1920
42
+ ],
43
+ [
44
+ 384,
45
+ 2304
46
+ ],
47
+ [
48
+ 768,
49
+ 384
50
+ ],
51
+ [
52
+ 768,
53
+ 768
54
+ ],
55
+ [
56
+ 768,
57
+ 1152
58
+ ],
59
+ [
60
+ 768,
61
+ 1536
62
+ ],
63
+ [
64
+ 768,
65
+ 1920
66
+ ],
67
+ [
68
+ 768,
69
+ 2304
70
+ ],
71
+ [
72
+ 1152,
73
+ 384
74
+ ],
75
+ [
76
+ 1152,
77
+ 768
78
+ ],
79
+ [
80
+ 1152,
81
+ 1152
82
+ ],
83
+ [
84
+ 1152,
85
+ 1536
86
+ ],
87
+ [
88
+ 1152,
89
+ 1920
90
+ ],
91
+ [
92
+ 1152,
93
+ 2304
94
+ ],
95
+ [
96
+ 1536,
97
+ 384
98
+ ],
99
+ [
100
+ 1536,
101
+ 768
102
+ ],
103
+ [
104
+ 1536,
105
+ 1152
106
+ ],
107
+ [
108
+ 1536,
109
+ 1536
110
+ ],
111
+ [
112
+ 1536,
113
+ 1920
114
+ ],
115
+ [
116
+ 1536,
117
+ 2304
118
+ ],
119
+ [
120
+ 1920,
121
+ 384
122
+ ],
123
+ [
124
+ 1920,
125
+ 768
126
+ ],
127
+ [
128
+ 1920,
129
+ 1152
130
+ ],
131
+ [
132
+ 1920,
133
+ 1536
134
+ ],
135
+ [
136
+ 1920,
137
+ 1920
138
+ ],
139
+ [
140
+ 1920,
141
+ 2304
142
+ ],
143
+ [
144
+ 2304,
145
+ 384
146
+ ],
147
+ [
148
+ 2304,
149
+ 768
150
+ ],
151
+ [
152
+ 2304,
153
+ 1152
154
+ ],
155
+ [
156
+ 2304,
157
+ 1536
158
+ ],
159
+ [
160
+ 2304,
161
+ 1920
162
+ ],
163
+ [
164
+ 2304,
165
+ 2304
166
+ ]
167
+ ],
168
+ "image_split_resolution": null,
169
+ "image_token_index": 151646,
170
+ "initializer_range": 0.02,
171
+ "intermediate_size": 4864,
172
+ "max_position_embeddings": 32768,
173
+ "max_window_layers": 24,
174
+ "mm_hidden_size": 1152,
175
+ "mm_newline_position": "grid",
176
+ "mm_patch_merge_type": "spatial_unpad",
177
+ "mm_projector_lr": null,
178
+ "mm_projector_type": "mlp2x_gelu",
179
+ "mm_resampler_type": null,
180
+ "mm_spatial_pool_mode": "bilinear",
181
+ "mm_spatial_pool_stride": 2,
182
+ "mm_tunable_parts": "mm_vision_tower,mm_mlp_adapter,mm_language_model",
183
+ "mm_use_im_patch_token": false,
184
+ "mm_use_im_start_end": false,
185
+ "mm_vision_select_feature": "patch",
186
+ "mm_vision_select_layer": -2,
187
+ "mm_vision_tower": "google/siglip-so400m-patch14-384",
188
+ "mm_vision_tower_lr": 5e-05,
189
+ "model_type": "llava",
190
+ "num_attention_heads": 14,
191
+ "num_hidden_layers": 1,
192
+ "num_key_value_heads": 2,
193
+ "pos_skipping_range": 4096,
194
+ "projector_hidden_act": "gelu",
195
+ "rms_norm_eps": 1e-06,
196
+ "rope_scaling": null,
197
+ "rope_theta": 1000000.0,
198
+ "sliding_window": 32768,
199
+ "text_config": {
200
+ "model_type": "llama"
201
+ },
202
+ "token_num": 2,
203
+ "tokenizer_model_max_length": 32768,
204
+ "tokenizer_padding_side": "right",
205
+ "torch_dtype": "bfloat16",
206
+ "transformers_version": "4.40.0.dev0",
207
+ "use_cache": true,
208
+ "use_mm_proj": true,
209
+ "use_pos_skipping": false,
210
+ "use_sliding_window": false,
211
+ "vision_config": {
212
+ "hidden_size": 1024,
213
+ "image_size": 336,
214
+ "intermediate_size": 4096,
215
+ "model_type": "clip_vision_model",
216
+ "num_attention_heads": 16,
217
+ "num_hidden_layers": 24,
218
+ "patch_size": 14,
219
+ "projection_dim": 768,
220
+ "vocab_size": 32000
221
+ },
222
+ "vision_feature_layer": -2,
223
+ "vision_feature_select_strategy": "default",
224
+ "vision_tower_pretrained": null
225
+ }
23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.40.0.dev0"
14
+ }
23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b62f1ccbbf4566c819aee4c1f5d78c27a4ee3a4c9f6441bd017942b0960a67eb
3
+ size 1449042030
23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<image>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "additional_special_tokens": [
38
+ "<|im_start|>",
39
+ "<|im_end|>"
40
+ ],
41
+ "bos_token": null,
42
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
43
+ "clean_up_tokenization_spaces": false,
44
+ "eos_token": "<|im_end|>",
45
+ "errors": "replace",
46
+ "model_max_length": 32768,
47
+ "pad_token": "<|endoftext|>",
48
+ "padding_side": "right",
49
+ "processor_class": "LlavaProcessor",
50
+ "split_special_tokens": false,
51
+ "tokenizer_class": "Qwen2Tokenizer",
52
+ "unk_token": null
53
+ }
23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbb3c9587c4ccb60f5b36f888ec0baf93391fbf52b6cddc1560fad0badb753f4
3
+ size 7096
23_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-5_weight_0.01_e1_all_3_loaded_pretrained_weights/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/added_tokens.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "<image>": 151646,
3
+ "<|endoftext|>": 151643,
4
+ "<|im_end|>": 151645,
5
+ "<|im_start|>": 151644
6
+ }
24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/config.json ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/fsx/xichenpan/pllm_pretrained_ckpt/llava-onevision-qwen2-0.5b-ov",
3
+ "add_faster_video": false,
4
+ "add_time_instruction": true,
5
+ "architectures": [
6
+ "LlavaQwenForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 151643,
10
+ "cut3r_loss_weight": 0.01,
11
+ "cut3r_model_path": "/fsx/xichenpan/pllm_pretrained_ckpt/cut3r_224_linear_4.pth",
12
+ "downstream_head_lr": 0.0001,
13
+ "eos_token_id": 151645,
14
+ "faster_token_stride": 10,
15
+ "force_sample": true,
16
+ "head_name": "linear",
17
+ "hidden_act": "silu",
18
+ "hidden_size": 896,
19
+ "ignore_index": -100,
20
+ "image_aspect_ratio": "anyres_max_9",
21
+ "image_crop_resolution": null,
22
+ "image_grid_pinpoints": [
23
+ [
24
+ 384,
25
+ 384
26
+ ],
27
+ [
28
+ 384,
29
+ 768
30
+ ],
31
+ [
32
+ 384,
33
+ 1152
34
+ ],
35
+ [
36
+ 384,
37
+ 1536
38
+ ],
39
+ [
40
+ 384,
41
+ 1920
42
+ ],
43
+ [
44
+ 384,
45
+ 2304
46
+ ],
47
+ [
48
+ 768,
49
+ 384
50
+ ],
51
+ [
52
+ 768,
53
+ 768
54
+ ],
55
+ [
56
+ 768,
57
+ 1152
58
+ ],
59
+ [
60
+ 768,
61
+ 1536
62
+ ],
63
+ [
64
+ 768,
65
+ 1920
66
+ ],
67
+ [
68
+ 768,
69
+ 2304
70
+ ],
71
+ [
72
+ 1152,
73
+ 384
74
+ ],
75
+ [
76
+ 1152,
77
+ 768
78
+ ],
79
+ [
80
+ 1152,
81
+ 1152
82
+ ],
83
+ [
84
+ 1152,
85
+ 1536
86
+ ],
87
+ [
88
+ 1152,
89
+ 1920
90
+ ],
91
+ [
92
+ 1152,
93
+ 2304
94
+ ],
95
+ [
96
+ 1536,
97
+ 384
98
+ ],
99
+ [
100
+ 1536,
101
+ 768
102
+ ],
103
+ [
104
+ 1536,
105
+ 1152
106
+ ],
107
+ [
108
+ 1536,
109
+ 1536
110
+ ],
111
+ [
112
+ 1536,
113
+ 1920
114
+ ],
115
+ [
116
+ 1536,
117
+ 2304
118
+ ],
119
+ [
120
+ 1920,
121
+ 384
122
+ ],
123
+ [
124
+ 1920,
125
+ 768
126
+ ],
127
+ [
128
+ 1920,
129
+ 1152
130
+ ],
131
+ [
132
+ 1920,
133
+ 1536
134
+ ],
135
+ [
136
+ 1920,
137
+ 1920
138
+ ],
139
+ [
140
+ 1920,
141
+ 2304
142
+ ],
143
+ [
144
+ 2304,
145
+ 384
146
+ ],
147
+ [
148
+ 2304,
149
+ 768
150
+ ],
151
+ [
152
+ 2304,
153
+ 1152
154
+ ],
155
+ [
156
+ 2304,
157
+ 1536
158
+ ],
159
+ [
160
+ 2304,
161
+ 1920
162
+ ],
163
+ [
164
+ 2304,
165
+ 2304
166
+ ]
167
+ ],
168
+ "image_split_resolution": null,
169
+ "image_token_index": 151646,
170
+ "initializer_range": 0.02,
171
+ "intermediate_size": 4864,
172
+ "max_position_embeddings": 32768,
173
+ "max_window_layers": 24,
174
+ "mm_hidden_size": 1152,
175
+ "mm_newline_position": "grid",
176
+ "mm_patch_merge_type": "spatial_unpad",
177
+ "mm_projector_lr": null,
178
+ "mm_projector_type": "mlp2x_gelu",
179
+ "mm_resampler_type": null,
180
+ "mm_spatial_pool_mode": "bilinear",
181
+ "mm_spatial_pool_stride": 2,
182
+ "mm_tunable_parts": "mm_vision_tower,mm_mlp_adapter,mm_language_model",
183
+ "mm_use_im_patch_token": false,
184
+ "mm_use_im_start_end": false,
185
+ "mm_vision_select_feature": "patch",
186
+ "mm_vision_select_layer": -2,
187
+ "mm_vision_tower": "google/siglip-so400m-patch14-384",
188
+ "mm_vision_tower_lr": 5e-06,
189
+ "model_type": "llava",
190
+ "num_attention_heads": 14,
191
+ "num_hidden_layers": 1,
192
+ "num_key_value_heads": 2,
193
+ "pos_skipping_range": 4096,
194
+ "projector_hidden_act": "gelu",
195
+ "rms_norm_eps": 1e-06,
196
+ "rope_scaling": null,
197
+ "rope_theta": 1000000.0,
198
+ "sliding_window": 32768,
199
+ "text_config": {
200
+ "model_type": "llama"
201
+ },
202
+ "token_num": 2,
203
+ "tokenizer_model_max_length": 32768,
204
+ "tokenizer_padding_side": "right",
205
+ "torch_dtype": "bfloat16",
206
+ "transformers_version": "4.40.0.dev0",
207
+ "use_cache": true,
208
+ "use_mm_proj": true,
209
+ "use_pos_skipping": false,
210
+ "use_sliding_window": false,
211
+ "vision_config": {
212
+ "hidden_size": 1024,
213
+ "image_size": 336,
214
+ "intermediate_size": 4096,
215
+ "model_type": "clip_vision_model",
216
+ "num_attention_heads": 16,
217
+ "num_hidden_layers": 24,
218
+ "patch_size": 14,
219
+ "projection_dim": 768,
220
+ "vocab_size": 32000
221
+ },
222
+ "vision_feature_layer": -2,
223
+ "vision_feature_select_strategy": "default",
224
+ "vision_tower_pretrained": null
225
+ }
24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.40.0.dev0"
14
+ }
24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a44ac9d000754eb86c57636c68107a5b789bb4023c9b6f5c0e0177dc462f734
3
+ size 1449042030
24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<image>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "additional_special_tokens": [
38
+ "<|im_start|>",
39
+ "<|im_end|>"
40
+ ],
41
+ "bos_token": null,
42
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
43
+ "clean_up_tokenization_spaces": false,
44
+ "eos_token": "<|im_end|>",
45
+ "errors": "replace",
46
+ "model_max_length": 32768,
47
+ "pad_token": "<|endoftext|>",
48
+ "padding_side": "right",
49
+ "processor_class": "LlavaProcessor",
50
+ "split_special_tokens": false,
51
+ "tokenizer_class": "Qwen2Tokenizer",
52
+ "unk_token": null
53
+ }
24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f11fd87df64530327ab4e63c83b7dc2e924b0b17f998b62f77fcb1655ac576e
3
+ size 7096
24_xp_misc_0.5b_alpha_0.1_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/added_tokens.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "<image>": 151646,
3
+ "<|endoftext|>": 151643,
4
+ "<|im_end|>": 151645,
5
+ "<|im_start|>": 151644
6
+ }
25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/config.json ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/fsx/xichenpan/pllm_pretrained_ckpt/llava-onevision-qwen2-0.5b-ov",
3
+ "add_faster_video": false,
4
+ "add_time_instruction": true,
5
+ "architectures": [
6
+ "LlavaQwenForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 151643,
10
+ "cut3r_loss_weight": 0.01,
11
+ "cut3r_model_path": "/fsx/xichenpan/pllm_pretrained_ckpt/cut3r_224_linear_4.pth",
12
+ "downstream_head_lr": 0.001,
13
+ "eos_token_id": 151645,
14
+ "faster_token_stride": 10,
15
+ "force_sample": true,
16
+ "head_name": "linear",
17
+ "hidden_act": "silu",
18
+ "hidden_size": 896,
19
+ "ignore_index": -100,
20
+ "image_aspect_ratio": "anyres_max_9",
21
+ "image_crop_resolution": null,
22
+ "image_grid_pinpoints": [
23
+ [
24
+ 384,
25
+ 384
26
+ ],
27
+ [
28
+ 384,
29
+ 768
30
+ ],
31
+ [
32
+ 384,
33
+ 1152
34
+ ],
35
+ [
36
+ 384,
37
+ 1536
38
+ ],
39
+ [
40
+ 384,
41
+ 1920
42
+ ],
43
+ [
44
+ 384,
45
+ 2304
46
+ ],
47
+ [
48
+ 768,
49
+ 384
50
+ ],
51
+ [
52
+ 768,
53
+ 768
54
+ ],
55
+ [
56
+ 768,
57
+ 1152
58
+ ],
59
+ [
60
+ 768,
61
+ 1536
62
+ ],
63
+ [
64
+ 768,
65
+ 1920
66
+ ],
67
+ [
68
+ 768,
69
+ 2304
70
+ ],
71
+ [
72
+ 1152,
73
+ 384
74
+ ],
75
+ [
76
+ 1152,
77
+ 768
78
+ ],
79
+ [
80
+ 1152,
81
+ 1152
82
+ ],
83
+ [
84
+ 1152,
85
+ 1536
86
+ ],
87
+ [
88
+ 1152,
89
+ 1920
90
+ ],
91
+ [
92
+ 1152,
93
+ 2304
94
+ ],
95
+ [
96
+ 1536,
97
+ 384
98
+ ],
99
+ [
100
+ 1536,
101
+ 768
102
+ ],
103
+ [
104
+ 1536,
105
+ 1152
106
+ ],
107
+ [
108
+ 1536,
109
+ 1536
110
+ ],
111
+ [
112
+ 1536,
113
+ 1920
114
+ ],
115
+ [
116
+ 1536,
117
+ 2304
118
+ ],
119
+ [
120
+ 1920,
121
+ 384
122
+ ],
123
+ [
124
+ 1920,
125
+ 768
126
+ ],
127
+ [
128
+ 1920,
129
+ 1152
130
+ ],
131
+ [
132
+ 1920,
133
+ 1536
134
+ ],
135
+ [
136
+ 1920,
137
+ 1920
138
+ ],
139
+ [
140
+ 1920,
141
+ 2304
142
+ ],
143
+ [
144
+ 2304,
145
+ 384
146
+ ],
147
+ [
148
+ 2304,
149
+ 768
150
+ ],
151
+ [
152
+ 2304,
153
+ 1152
154
+ ],
155
+ [
156
+ 2304,
157
+ 1536
158
+ ],
159
+ [
160
+ 2304,
161
+ 1920
162
+ ],
163
+ [
164
+ 2304,
165
+ 2304
166
+ ]
167
+ ],
168
+ "image_split_resolution": null,
169
+ "image_token_index": 151646,
170
+ "initializer_range": 0.02,
171
+ "intermediate_size": 4864,
172
+ "max_position_embeddings": 32768,
173
+ "max_window_layers": 24,
174
+ "mm_hidden_size": 1152,
175
+ "mm_newline_position": "grid",
176
+ "mm_patch_merge_type": "spatial_unpad",
177
+ "mm_projector_lr": null,
178
+ "mm_projector_type": "mlp2x_gelu",
179
+ "mm_resampler_type": null,
180
+ "mm_spatial_pool_mode": "bilinear",
181
+ "mm_spatial_pool_stride": 2,
182
+ "mm_tunable_parts": "mm_vision_tower,mm_mlp_adapter,mm_language_model",
183
+ "mm_use_im_patch_token": false,
184
+ "mm_use_im_start_end": false,
185
+ "mm_vision_select_feature": "patch",
186
+ "mm_vision_select_layer": -2,
187
+ "mm_vision_tower": "google/siglip-so400m-patch14-384",
188
+ "mm_vision_tower_lr": 2e-06,
189
+ "model_type": "llava",
190
+ "num_attention_heads": 14,
191
+ "num_hidden_layers": 1,
192
+ "num_key_value_heads": 2,
193
+ "pos_skipping_range": 4096,
194
+ "projector_hidden_act": "gelu",
195
+ "rms_norm_eps": 1e-06,
196
+ "rope_scaling": null,
197
+ "rope_theta": 1000000.0,
198
+ "sliding_window": 32768,
199
+ "text_config": {
200
+ "model_type": "llama"
201
+ },
202
+ "token_num": 2,
203
+ "tokenizer_model_max_length": 32768,
204
+ "tokenizer_padding_side": "right",
205
+ "torch_dtype": "bfloat16",
206
+ "transformers_version": "4.40.0.dev0",
207
+ "use_cache": true,
208
+ "use_mm_proj": true,
209
+ "use_pos_skipping": false,
210
+ "use_sliding_window": false,
211
+ "vision_config": {
212
+ "hidden_size": 1024,
213
+ "image_size": 336,
214
+ "intermediate_size": 4096,
215
+ "model_type": "clip_vision_model",
216
+ "num_attention_heads": 16,
217
+ "num_hidden_layers": 24,
218
+ "patch_size": 14,
219
+ "projection_dim": 768,
220
+ "vocab_size": 32000
221
+ },
222
+ "vision_feature_layer": -2,
223
+ "vision_feature_select_strategy": "default",
224
+ "vision_tower_pretrained": null
225
+ }
25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.40.0.dev0"
14
+ }
25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:660bac734b053ec899125ec372b0e5dd331702f3e2b3d6ea0137e29401310105
3
+ size 1449042030
25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<image>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "additional_special_tokens": [
38
+ "<|im_start|>",
39
+ "<|im_end|>"
40
+ ],
41
+ "bos_token": null,
42
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
43
+ "clean_up_tokenization_spaces": false,
44
+ "eos_token": "<|im_end|>",
45
+ "errors": "replace",
46
+ "model_max_length": 32768,
47
+ "pad_token": "<|endoftext|>",
48
+ "padding_side": "right",
49
+ "processor_class": "LlavaProcessor",
50
+ "split_special_tokens": false,
51
+ "tokenizer_class": "Qwen2Tokenizer",
52
+ "unk_token": null
53
+ }
25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb4dfb859cd2c08dec88bdc95ab5a319fa5c73ad112b6576a6400b10fce504e4
3
+ size 7096
25_xp_misc_0.5b_alpha_0.1_head_1e3_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
26_xp_misc_0.5b_alpha_0.1_head_5e4_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/added_tokens.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "<image>": 151646,
3
+ "<|endoftext|>": 151643,
4
+ "<|im_end|>": 151645,
5
+ "<|im_start|>": 151644
6
+ }
26_xp_misc_0.5b_alpha_0.1_head_5e4_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/config.json ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/fsx/xichenpan/pllm_pretrained_ckpt/llava-onevision-qwen2-0.5b-ov",
3
+ "add_faster_video": false,
4
+ "add_time_instruction": true,
5
+ "architectures": [
6
+ "LlavaQwenForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 151643,
10
+ "cut3r_loss_weight": 0.01,
11
+ "cut3r_model_path": "/fsx/xichenpan/pllm_pretrained_ckpt/cut3r_224_linear_4.pth",
12
+ "downstream_head_lr": 0.0005,
13
+ "eos_token_id": 151645,
14
+ "faster_token_stride": 10,
15
+ "force_sample": true,
16
+ "head_name": "linear",
17
+ "hidden_act": "silu",
18
+ "hidden_size": 896,
19
+ "ignore_index": -100,
20
+ "image_aspect_ratio": "anyres_max_9",
21
+ "image_crop_resolution": null,
22
+ "image_grid_pinpoints": [
23
+ [
24
+ 384,
25
+ 384
26
+ ],
27
+ [
28
+ 384,
29
+ 768
30
+ ],
31
+ [
32
+ 384,
33
+ 1152
34
+ ],
35
+ [
36
+ 384,
37
+ 1536
38
+ ],
39
+ [
40
+ 384,
41
+ 1920
42
+ ],
43
+ [
44
+ 384,
45
+ 2304
46
+ ],
47
+ [
48
+ 768,
49
+ 384
50
+ ],
51
+ [
52
+ 768,
53
+ 768
54
+ ],
55
+ [
56
+ 768,
57
+ 1152
58
+ ],
59
+ [
60
+ 768,
61
+ 1536
62
+ ],
63
+ [
64
+ 768,
65
+ 1920
66
+ ],
67
+ [
68
+ 768,
69
+ 2304
70
+ ],
71
+ [
72
+ 1152,
73
+ 384
74
+ ],
75
+ [
76
+ 1152,
77
+ 768
78
+ ],
79
+ [
80
+ 1152,
81
+ 1152
82
+ ],
83
+ [
84
+ 1152,
85
+ 1536
86
+ ],
87
+ [
88
+ 1152,
89
+ 1920
90
+ ],
91
+ [
92
+ 1152,
93
+ 2304
94
+ ],
95
+ [
96
+ 1536,
97
+ 384
98
+ ],
99
+ [
100
+ 1536,
101
+ 768
102
+ ],
103
+ [
104
+ 1536,
105
+ 1152
106
+ ],
107
+ [
108
+ 1536,
109
+ 1536
110
+ ],
111
+ [
112
+ 1536,
113
+ 1920
114
+ ],
115
+ [
116
+ 1536,
117
+ 2304
118
+ ],
119
+ [
120
+ 1920,
121
+ 384
122
+ ],
123
+ [
124
+ 1920,
125
+ 768
126
+ ],
127
+ [
128
+ 1920,
129
+ 1152
130
+ ],
131
+ [
132
+ 1920,
133
+ 1536
134
+ ],
135
+ [
136
+ 1920,
137
+ 1920
138
+ ],
139
+ [
140
+ 1920,
141
+ 2304
142
+ ],
143
+ [
144
+ 2304,
145
+ 384
146
+ ],
147
+ [
148
+ 2304,
149
+ 768
150
+ ],
151
+ [
152
+ 2304,
153
+ 1152
154
+ ],
155
+ [
156
+ 2304,
157
+ 1536
158
+ ],
159
+ [
160
+ 2304,
161
+ 1920
162
+ ],
163
+ [
164
+ 2304,
165
+ 2304
166
+ ]
167
+ ],
168
+ "image_split_resolution": null,
169
+ "image_token_index": 151646,
170
+ "initializer_range": 0.02,
171
+ "intermediate_size": 4864,
172
+ "max_position_embeddings": 32768,
173
+ "max_window_layers": 24,
174
+ "mm_hidden_size": 1152,
175
+ "mm_newline_position": "grid",
176
+ "mm_patch_merge_type": "spatial_unpad",
177
+ "mm_projector_lr": null,
178
+ "mm_projector_type": "mlp2x_gelu",
179
+ "mm_resampler_type": null,
180
+ "mm_spatial_pool_mode": "bilinear",
181
+ "mm_spatial_pool_stride": 2,
182
+ "mm_tunable_parts": "mm_vision_tower,mm_mlp_adapter,mm_language_model",
183
+ "mm_use_im_patch_token": false,
184
+ "mm_use_im_start_end": false,
185
+ "mm_vision_select_feature": "patch",
186
+ "mm_vision_select_layer": -2,
187
+ "mm_vision_tower": "google/siglip-so400m-patch14-384",
188
+ "mm_vision_tower_lr": 2e-06,
189
+ "model_type": "llava",
190
+ "num_attention_heads": 14,
191
+ "num_hidden_layers": 1,
192
+ "num_key_value_heads": 2,
193
+ "pos_skipping_range": 4096,
194
+ "projector_hidden_act": "gelu",
195
+ "rms_norm_eps": 1e-06,
196
+ "rope_scaling": null,
197
+ "rope_theta": 1000000.0,
198
+ "sliding_window": 32768,
199
+ "text_config": {
200
+ "model_type": "llama"
201
+ },
202
+ "token_num": 2,
203
+ "tokenizer_model_max_length": 32768,
204
+ "tokenizer_padding_side": "right",
205
+ "torch_dtype": "bfloat16",
206
+ "transformers_version": "4.40.0.dev0",
207
+ "use_cache": true,
208
+ "use_mm_proj": true,
209
+ "use_pos_skipping": false,
210
+ "use_sliding_window": false,
211
+ "vision_config": {
212
+ "hidden_size": 1024,
213
+ "image_size": 336,
214
+ "intermediate_size": 4096,
215
+ "model_type": "clip_vision_model",
216
+ "num_attention_heads": 16,
217
+ "num_hidden_layers": 24,
218
+ "patch_size": 14,
219
+ "projection_dim": 768,
220
+ "vocab_size": 32000
221
+ },
222
+ "vision_feature_layer": -2,
223
+ "vision_feature_select_strategy": "default",
224
+ "vision_tower_pretrained": null
225
+ }
26_xp_misc_0.5b_alpha_0.1_head_5e4_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.40.0.dev0"
14
+ }
26_xp_misc_0.5b_alpha_0.1_head_5e4_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
26_xp_misc_0.5b_alpha_0.1_head_5e4_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dceb61ecd2abee989e9751e349212bf4af452a04d3ddd0c1dbb5e4fa6ad7bb66
3
+ size 1449042030
26_xp_misc_0.5b_alpha_0.1_head_5e4_linear_per_frame_vis_lr_5e-6_weight_0.01_e1_all_3_loaded_pretrained_weights/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }