Text Generation
Transformers
Safetensors
PyTorch
English
gpt_neox
causal-lm
pythia
safety
unlearning
data-filtering
interpretability
pretraining
eleutherai
gpt-neox
wmdp
cbrn
tamper-resistance
research
model-suite
6.9b
circuit-breaking
knowledge-filtering
open-weight
biothreat
safety-research
model-diffing
training-dynamics
text-generation-inference
Upload tokenizer
Browse files- tokenizer.json +0 -0
- tokenizer_config.json +5 -1
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -1,4 +1,6 @@
|
|
1 |
{
|
|
|
|
|
2 |
"add_prefix_space": false,
|
3 |
"added_tokens_decoder": {
|
4 |
"0": {
|
@@ -203,9 +205,11 @@
|
|
203 |
}
|
204 |
},
|
205 |
"bos_token": "<|endoftext|>",
|
206 |
-
"clean_up_tokenization_spaces":
|
207 |
"eos_token": "<|endoftext|>",
|
|
|
208 |
"model_max_length": 1000000000000000019884624838656,
|
|
|
209 |
"tokenizer_class": "GPTNeoXTokenizer",
|
210 |
"unk_token": "<|endoftext|>"
|
211 |
}
|
|
|
1 |
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_eos_token": false,
|
4 |
"add_prefix_space": false,
|
5 |
"added_tokens_decoder": {
|
6 |
"0": {
|
|
|
205 |
}
|
206 |
},
|
207 |
"bos_token": "<|endoftext|>",
|
208 |
+
"clean_up_tokenization_spaces": false,
|
209 |
"eos_token": "<|endoftext|>",
|
210 |
+
"extra_special_tokens": {},
|
211 |
"model_max_length": 1000000000000000019884624838656,
|
212 |
+
"pad_token": null,
|
213 |
"tokenizer_class": "GPTNeoXTokenizer",
|
214 |
"unk_token": "<|endoftext|>"
|
215 |
}
|