jerryzh168 commited on
Commit
11d1522
·
verified ·
1 Parent(s): 37df94d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -124,7 +124,7 @@ Use the following code to get the quantized model:
124
  import torch
125
  from transformers import AutoModelForCausalLM, AutoTokenizer, TorchAoConfig
126
 
127
- model_id = "microsoft/Phi-4-mini-instruct"
128
 
129
  from torchao.quantization import Int4WeightOnlyConfig
130
  quant_config = Int4WeightOnlyConfig(group_size=128, use_hqq=True)
 
124
  import torch
125
  from transformers import AutoModelForCausalLM, AutoTokenizer, TorchAoConfig
126
 
127
+ model_id = "Qwen/Qwen3-8B"
128
 
129
  from torchao.quantization import Int4WeightOnlyConfig
130
  quant_config = Int4WeightOnlyConfig(group_size=128, use_hqq=True)