DopeorNope commited on
Commit
c981fa6
·
verified ·
1 Parent(s): b50fdcf

Upload tokenizer

Browse files
chat_template.jinja ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- set found_item = false -%}
2
+ {%- for message in messages -%}
3
+ {%- if message['role'] == 'system' -%}
4
+ {%- set found_item = true -%}
5
+ {%- endif -%}
6
+ {%- endfor -%}
7
+ {%- if not found_item -%}
8
+ {{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.\n'}}
9
+ {%- endif %}
10
+ {%- for message in messages %}
11
+ {%- if message['role'] == 'system' %}
12
+ {{ message['content'] }}
13
+ {%- else %}
14
+ {%- if message['role'] == 'user' %}
15
+ {{'### Instruction:\n' + message['content'] + '\n'}}
16
+ {%- else %}
17
+ {{'### Response:\n' + message['content'] + '\n<|EOT|>\n'}}
18
+ {%- endif %}
19
+ {%- endif %}
20
+ {%- endfor %}
21
+ {{'### Response:\n'}}
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "100000": {
7
+ "content": "<|begin▁of▁sentence|>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "100001": {
15
+ "content": "<|end▁of▁sentence|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "114155": {
23
+ "content": "<pad>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<|begin▁of▁sentence|>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "<|end▁of▁sentence|>",
34
+ "extra_special_tokens": {},
35
+ "legacy": true,
36
+ "model_max_length": 16384,
37
+ "pad_token": "<pad>",
38
+ "sp_model_kwargs": {},
39
+ "tokenizer_class": "LlamaTokenizerFast",
40
+ "unk_token": null,
41
+ "use_default_system_prompt": false
42
+ }