Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: cc
|
3 |
+
language:
|
4 |
+
- en
|
5 |
+
base_model:
|
6 |
+
- Qwen/Qwen2.5-1.5B-Instruct
|
7 |
+
---
|
8 |
+
|
9 |
+
|
10 |
+
```python
|
11 |
+
from transformers import AutoConfig, AutoModel
|
12 |
+
from termcolor import colored
|
13 |
+
|
14 |
+
model_path = "Efficient-Large-Model/NVILA-Lite-2B-hf-preview"
|
15 |
+
|
16 |
+
# config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
|
17 |
+
# model = AutoModel.from_config(config, trust_remote_code=True)
|
18 |
+
model = AutoModel.from_pretrained(model_path, trust_remote_code=True, device_map="auto")
|
19 |
+
res = model.generate_content([
|
20 |
+
"how are you today?"
|
21 |
+
])
|
22 |
+
print(colored(res, "cyan", attrs=["bold"]))
|
23 |
+
|
24 |
+
print("---" * 40)
|
25 |
+
|
26 |
+
import PIL.Image
|
27 |
+
response = model.generate_content([
|
28 |
+
PIL.Image.open("inference_test/test_data/caption_meat.jpeg"),
|
29 |
+
"describe the image?"
|
30 |
+
])
|
31 |
+
print(colored(response, "cyan", attrs=["bold"]))
|
32 |
+
```
|