Update README.md
Browse files
README.md
CHANGED
@@ -67,11 +67,15 @@ sampling_params = SamplingParams(temperature=0.7, top_p=0.8, max_tokens=256)
|
|
67 |
|
68 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
69 |
|
70 |
-
|
|
|
|
|
|
|
|
|
71 |
|
72 |
llm = LLM(model=model_id, tensor_parallel_size=number_gpus)
|
73 |
|
74 |
-
outputs = llm.generate(
|
75 |
|
76 |
generated_text = outputs[0].outputs[0].text
|
77 |
print(generated_text)
|
|
|
67 |
|
68 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
69 |
|
70 |
+
messages = [
|
71 |
+
{"role": "user", "content": "Give me a short introduction to large language model."},
|
72 |
+
]
|
73 |
+
|
74 |
+
prompts = tokenizer.apply_chat_template(messages, tokenize=False)
|
75 |
|
76 |
llm = LLM(model=model_id, tensor_parallel_size=number_gpus)
|
77 |
|
78 |
+
outputs = llm.generate(prompts, sampling_params)
|
79 |
|
80 |
generated_text = outputs[0].outputs[0].text
|
81 |
print(generated_text)
|