prithivMLmods commited on
Commit
f443c5c
·
verified ·
1 Parent(s): da6f2e3

upload notebooks

Browse files
Aya-Vision-8B/aya_vision_8b.ipynb ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {
7
+ "id": "m7rU-pjX3Y1O"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "%%capture\n",
12
+ "!pip install gradio transformers accelerate numpy\n",
13
+ "!pip install torch torchvision av hf_xet spaces\n",
14
+ "!pip install pillow huggingface_hub opencv-python"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {
21
+ "id": "dZUVag_jJMck"
22
+ },
23
+ "outputs": [],
24
+ "source": [
25
+ "from huggingface_hub import notebook_login, HfApi\n",
26
+ "notebook_login()"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": null,
32
+ "metadata": {
33
+ "id": "kW4MjaOs3c9E"
34
+ },
35
+ "outputs": [],
36
+ "source": [
37
+ "import gradio as gr\n",
38
+ "from transformers import AutoProcessor, TextIteratorStreamer, AutoModelForImageTextToText\n",
39
+ "from transformers.image_utils import load_image\n",
40
+ "from threading import Thread\n",
41
+ "import time\n",
42
+ "import torch\n",
43
+ "import spaces\n",
44
+ "import cv2\n",
45
+ "import numpy as np\n",
46
+ "from PIL import Image\n",
47
+ "\n",
48
+ "# Helper: progress bar HTML\n",
49
+ "def progress_bar_html(label: str) -> str:\n",
50
+ " return f'''\n",
51
+ "<div style=\"display: flex; align-items: center;\">\n",
52
+ " <span style=\"margin-right: 10px; font-size: 14px;\">{label}</span>\n",
53
+ " <div style=\"width: 110px; height: 5px; background-color: #FFB6C1; border-radius: 2px; overflow: hidden;\">\n",
54
+ " <div style=\"width: 100%; height: 100%; background-color: #FF69B4; animation: loading 1.5s linear infinite;\"></div>\n",
55
+ " </div>\n",
56
+ "</div>\n",
57
+ "<style>\n",
58
+ "@keyframes loading {{\n",
59
+ " 0% {{ transform: translateX(-100%); }}\n",
60
+ " 100% {{ transform: translateX(100%); }}\n",
61
+ "}}\n",
62
+ "</style>\n",
63
+ " '''\n",
64
+ "\n",
65
+ "# Aya Vision 8B setup\n",
66
+ "AYA_MODEL_ID = \"CohereForAI/aya-vision-8b\"\n",
67
+ "aya_processor = AutoProcessor.from_pretrained(AYA_MODEL_ID)\n",
68
+ "aya_model = AutoModelForImageTextToText.from_pretrained(\n",
69
+ " AYA_MODEL_ID,\n",
70
+ " device_map=\"auto\",\n",
71
+ " torch_dtype=torch.float16\n",
72
+ ")\n",
73
+ "\n",
74
+ "def downsample_video(video_path, num_frames=10):\n",
75
+ " \"\"\"\n",
76
+ " Extract evenly spaced frames and timestamps from a video file.\n",
77
+ " Returns list of (PIL.Image, timestamp_sec).\n",
78
+ " \"\"\"\n",
79
+ " vidcap = cv2.VideoCapture(video_path)\n",
80
+ " total = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
81
+ " fps = vidcap.get(cv2.CAP_PROP_FPS) or 30\n",
82
+ " indices = np.linspace(0, total-1, num_frames, dtype=int)\n",
83
+ " frames = []\n",
84
+ " for idx in indices:\n",
85
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, int(idx))\n",
86
+ " ret, frame = vidcap.read()\n",
87
+ " if not ret:\n",
88
+ " continue\n",
89
+ " frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
90
+ " pil = Image.fromarray(frame)\n",
91
+ " timestamp = round(idx / fps, 2)\n",
92
+ " frames.append((pil, timestamp))\n",
93
+ " vidcap.release()\n",
94
+ " return frames\n",
95
+ "\n",
96
+ "@spaces.GPU\n",
97
+ "def process_image(prompt: str, image: Image.Image):\n",
98
+ " if image is None:\n",
99
+ " yield \"Error: Please upload an image.\"\n",
100
+ " return\n",
101
+ " if not prompt.strip():\n",
102
+ " yield \"Error: Please provide a prompt with the image.\"\n",
103
+ " return\n",
104
+ " yield progress_bar_html(\"Processing Image with Aya Vision 8B\")\n",
105
+ " messages = [{\"role\": \"user\", \"content\": [\n",
106
+ " {\"type\": \"image\", \"image\": image},\n",
107
+ " {\"type\": \"text\", \"text\": prompt.strip()}\n",
108
+ " ]}]\n",
109
+ " inputs = aya_processor.apply_chat_template(\n",
110
+ " messages, padding=True, add_generation_prompt=True,\n",
111
+ " tokenize=True, return_dict=True, return_tensors=\"pt\"\n",
112
+ " ).to(aya_model.device)\n",
113
+ " streamer = TextIteratorStreamer(aya_processor, skip_prompt=True, skip_special_tokens=True)\n",
114
+ " thread = Thread(target=aya_model.generate, kwargs={**inputs, \"streamer\": streamer, \"max_new_tokens\": 1024, \"do_sample\": True, \"temperature\": 0.3})\n",
115
+ " thread.start()\n",
116
+ " buff = \"\"\n",
117
+ " for chunk in streamer:\n",
118
+ " buff += chunk.replace(\"<|im_end|>\", \"\")\n",
119
+ " time.sleep(0.01)\n",
120
+ " yield buff\n",
121
+ "\n",
122
+ "@spaces.GPU\n",
123
+ "def process_video(prompt: str, video_file: str):\n",
124
+ " if video_file is None:\n",
125
+ " yield \"Error: Please upload a video.\"\n",
126
+ " return\n",
127
+ " if not prompt.strip():\n",
128
+ " yield \"Error: Please provide a prompt with the video.\"\n",
129
+ " return\n",
130
+ " yield progress_bar_html(\"Processing Video with Aya Vision 8B\")\n",
131
+ " frames = downsample_video(video_file)\n",
132
+ " # Build chat messages with each frame and timestamp\n",
133
+ " content = [{\"type\": \"text\", \"text\": prompt.strip()}]\n",
134
+ " for img, ts in frames:\n",
135
+ " content.append({\"type\": \"text\", \"text\": f\"Frame at {ts}s:\"})\n",
136
+ " content.append({\"type\": \"image\", \"image\": img})\n",
137
+ " messages = [{\"role\": \"user\", \"content\": content}]\n",
138
+ " inputs = aya_processor.apply_chat_template(\n",
139
+ " messages, tokenize=True, add_generation_prompt=True,\n",
140
+ " return_dict=True, return_tensors=\"pt\"\n",
141
+ " ).to(aya_model.device)\n",
142
+ " streamer = TextIteratorStreamer(aya_processor, skip_prompt=True, skip_special_tokens=True)\n",
143
+ " thread = Thread(target=aya_model.generate, kwargs={**inputs, \"streamer\": streamer, \"max_new_tokens\": 1024, \"do_sample\": True, \"temperature\": 0.3})\n",
144
+ " thread.start()\n",
145
+ " buff = \"\"\n",
146
+ " for chunk in streamer:\n",
147
+ " buff += chunk.replace(\"<|im_end|>\", \"\")\n",
148
+ " time.sleep(0.01)\n",
149
+ " yield buff\n",
150
+ "\n",
151
+ "# Build Gradio UI\n",
152
+ "demo = gr.Blocks()\n",
153
+ "with demo:\n",
154
+ " gr.Markdown(\"# **Aya Vision 8B Multimodal: Image & Video**\")\n",
155
+ " with gr.Tabs():\n",
156
+ " with gr.TabItem(\"Image Inference\"):\n",
157
+ " txt_i = gr.Textbox(label=\"Prompt\", placeholder=\"Enter prompt...\")\n",
158
+ " img_u = gr.Image(type=\"filepath\", label=\"Image\")\n",
159
+ " btn_i = gr.Button(\"Run Image\")\n",
160
+ " out_i = gr.Textbox(label=\"Output\", interactive=False)\n",
161
+ " btn_i.click(fn=process_image, inputs=[txt_i, img_u], outputs=out_i)\n",
162
+ " with gr.TabItem(\"Video Inference\"):\n",
163
+ " txt_v = gr.Textbox(label=\"Prompt\", placeholder=\"Enter prompt...\")\n",
164
+ " vid_u = gr.Video(label=\"Video\")\n",
165
+ " btn_v = gr.Button(\"Run Video\")\n",
166
+ " out_v = gr.Textbox(label=\"Output\", interactive=False)\n",
167
+ " btn_v.click(fn=process_video, inputs=[txt_v, vid_u], outputs=out_v)\n",
168
+ "\n",
169
+ "demo.launch(debug=True, share=True)"
170
+ ]
171
+ }
172
+ ],
173
+ "metadata": {
174
+ "accelerator": "GPU",
175
+ "colab": {
176
+ "gpuType": "T4",
177
+ "provenance": []
178
+ },
179
+ "kernelspec": {
180
+ "display_name": "Python 3",
181
+ "name": "python3"
182
+ },
183
+ "language_info": {
184
+ "name": "python"
185
+ }
186
+ },
187
+ "nbformat": 4,
188
+ "nbformat_minor": 0
189
+ }
Florence-2-Base/Florence_2_base.ipynb ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": 1,
22
+ "metadata": {
23
+ "id": "m7rU-pjX3Y1O"
24
+ },
25
+ "outputs": [],
26
+ "source": [
27
+ "%%capture\n",
28
+ "!pip install gradio transformers==4.30.2 pillow\n",
29
+ "!pip install torch torchvision hf_xet timm==1.0.10\n",
30
+ "!pip install flash-attn --no-build-isolation"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "source": [
36
+ "import gradio as gr\n",
37
+ "import torch\n",
38
+ "from PIL import Image\n",
39
+ "from transformers import AutoProcessor, AutoModelForCausalLM\n",
40
+ "\n",
41
+ "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
42
+ "vision_language_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()\n",
43
+ "vision_language_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)\n",
44
+ "\n",
45
+ "def describe_image(uploaded_image):\n",
46
+ " \"\"\"\n",
47
+ " Generates a detailed description of the input image.\n",
48
+ "\n",
49
+ " Args:\n",
50
+ " uploaded_image (PIL.Image.Image or numpy.ndarray): The image to describe.\n",
51
+ "\n",
52
+ " Returns:\n",
53
+ " str: A detailed textual description of the image.\n",
54
+ " \"\"\"\n",
55
+ " if not isinstance(uploaded_image, Image.Image):\n",
56
+ " uploaded_image = Image.fromarray(uploaded_image)\n",
57
+ "\n",
58
+ " inputs = vision_language_processor(text=\"<MORE_DETAILED_CAPTION>\", images=uploaded_image, return_tensors=\"pt\").to(device)\n",
59
+ " with torch.no_grad():\n",
60
+ " generated_ids = vision_language_model.generate(\n",
61
+ " input_ids=inputs[\"input_ids\"],\n",
62
+ " pixel_values=inputs[\"pixel_values\"],\n",
63
+ " max_new_tokens=1024,\n",
64
+ " early_stopping=False,\n",
65
+ " do_sample=False,\n",
66
+ " num_beams=3,\n",
67
+ " )\n",
68
+ " generated_text = vision_language_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]\n",
69
+ " processed_description = vision_language_processor.post_process_generation(\n",
70
+ " generated_text,\n",
71
+ " task=\"<MORE_DETAILED_CAPTION>\",\n",
72
+ " image_size=(uploaded_image.width, uploaded_image.height)\n",
73
+ " )\n",
74
+ " image_description = processed_description[\"<MORE_DETAILED_CAPTION>\"]\n",
75
+ " print(\"\\nImage description generated!:\", image_description)\n",
76
+ " return image_description\n",
77
+ "\n",
78
+ "image_description_interface = gr.Interface(\n",
79
+ " fn=describe_image,\n",
80
+ " inputs=gr.Image(label=\"Upload Image\"),\n",
81
+ " outputs=gr.Textbox(label=\"Generated Caption\", lines=4, show_copy_button=True),\n",
82
+ " live=False,\n",
83
+ ")\n",
84
+ "\n",
85
+ "image_description_interface.launch(debug=True, ssr_mode=False)"
86
+ ],
87
+ "metadata": {
88
+ "id": "kW4MjaOs3c9E"
89
+ },
90
+ "execution_count": null,
91
+ "outputs": []
92
+ }
93
+ ]
94
+ }
Gemma3-VL/Gemma3_4B_it.ipynb ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": 1,
22
+ "metadata": {
23
+ "id": "m7rU-pjX3Y1O"
24
+ },
25
+ "outputs": [],
26
+ "source": [
27
+ "%%capture\n",
28
+ "!pip install gradio transformers accelerate numpy requests\n",
29
+ "!pip install torch torchvision av hf_xet qwen-vl-utils\n",
30
+ "!pip install pillow huggingface_hub opencv-python spaces"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "source": [
36
+ "from huggingface_hub import notebook_login, HfApi\n",
37
+ "notebook_login()"
38
+ ],
39
+ "metadata": {
40
+ "id": "dZUVag_jJMck"
41
+ },
42
+ "execution_count": null,
43
+ "outputs": []
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "source": [
48
+ "import os\n",
49
+ "import random\n",
50
+ "import uuid\n",
51
+ "import json\n",
52
+ "import time\n",
53
+ "import asyncio\n",
54
+ "import re\n",
55
+ "from threading import Thread\n",
56
+ "\n",
57
+ "import gradio as gr\n",
58
+ "import spaces\n",
59
+ "import torch\n",
60
+ "import numpy as np\n",
61
+ "from PIL import Image\n",
62
+ "import cv2\n",
63
+ "\n",
64
+ "from transformers import (\n",
65
+ " AutoProcessor,\n",
66
+ " Gemma3ForConditionalGeneration,\n",
67
+ " Qwen2VLForConditionalGeneration,\n",
68
+ " TextIteratorStreamer,\n",
69
+ ")\n",
70
+ "from transformers.image_utils import load_image\n",
71
+ "\n",
72
+ "# Constants\n",
73
+ "MAX_MAX_NEW_TOKENS = 2048\n",
74
+ "DEFAULT_MAX_NEW_TOKENS = 1024\n",
75
+ "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"4096\"))\n",
76
+ "MAX_SEED = np.iinfo(np.int32).max\n",
77
+ "\n",
78
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
79
+ "\n",
80
+ "# Helper function to return a progress bar HTML snippet.\n",
81
+ "def progress_bar_html(label: str) -> str:\n",
82
+ " return f'''\n",
83
+ "<div style=\"display: flex; align-items: center;\">\n",
84
+ " <span style=\"margin-right: 10px; font-size: 14px;\">{label}</span>\n",
85
+ " <div style=\"width: 110px; height: 5px; background-color: #F0FFF0; border-radius: 2px; overflow: hidden;\">\n",
86
+ " <div style=\"width: 100%; height: 100%; background-color: #00FF00; animation: loading 1.5s linear infinite;\"></div>\n",
87
+ " </div>\n",
88
+ "</div>\n",
89
+ "<style>\n",
90
+ "@keyframes loading {{\n",
91
+ " 0% {{ transform: translateX(-100%); }}\n",
92
+ " 100% {{ transform: translateX(100%); }}\n",
93
+ "}}\n",
94
+ "</style>\n",
95
+ " '''\n",
96
+ "\n",
97
+ "# Qwen2-VL (for optional image inference)\n",
98
+ "\n",
99
+ "MODEL_ID_VL = \"prithivMLmods/Qwen2-VL-OCR-2B-Instruct\"\n",
100
+ "processor = AutoProcessor.from_pretrained(MODEL_ID_VL, trust_remote_code=True)\n",
101
+ "model_m = Qwen2VLForConditionalGeneration.from_pretrained(\n",
102
+ " MODEL_ID_VL,\n",
103
+ " trust_remote_code=True,\n",
104
+ " torch_dtype=torch.float16\n",
105
+ ").to(\"cuda\").eval()\n",
106
+ "\n",
107
+ "def clean_chat_history(chat_history):\n",
108
+ " cleaned = []\n",
109
+ " for msg in chat_history:\n",
110
+ " if isinstance(msg, dict) and isinstance(msg.get(\"content\"), str):\n",
111
+ " cleaned.append(msg)\n",
112
+ " return cleaned\n",
113
+ "\n",
114
+ "bad_words = json.loads(os.getenv('BAD_WORDS', \"[]\"))\n",
115
+ "bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', \"[]\"))\n",
116
+ "default_negative = os.getenv(\"default_negative\", \"\")\n",
117
+ "\n",
118
+ "def check_text(prompt, negative=\"\"):\n",
119
+ " for i in bad_words:\n",
120
+ " if i in prompt:\n",
121
+ " return True\n",
122
+ " for i in bad_words_negative:\n",
123
+ " if i in negative:\n",
124
+ " return True\n",
125
+ " return False\n",
126
+ "\n",
127
+ "def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:\n",
128
+ " if randomize_seed:\n",
129
+ " seed = random.randint(0, MAX_SEED)\n",
130
+ " return seed\n",
131
+ "\n",
132
+ "CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(\"CACHE_EXAMPLES\", \"0\") == \"1\"\n",
133
+ "MAX_IMAGE_SIZE = int(os.getenv(\"MAX_IMAGE_SIZE\", \"2048\"))\n",
134
+ "USE_TORCH_COMPILE = os.getenv(\"USE_TORCH_COMPILE\", \"0\") == \"1\"\n",
135
+ "ENABLE_CPU_OFFLOAD = os.getenv(\"ENABLE_CPU_OFFLOAD\", \"0\") == \"1\"\n",
136
+ "\n",
137
+ "dtype = torch.float16 if device.type == \"cuda\" else torch.float32\n",
138
+ "\n",
139
+ "\n",
140
+ "# Gemma3 Model (default for text, image, & video inference)\n",
141
+ "\n",
142
+ "gemma3_model_id = \"google/gemma-3-4b-it\" # alternative: google/gemma-3-12b-it\n",
143
+ "gemma3_model = Gemma3ForConditionalGeneration.from_pretrained(\n",
144
+ " gemma3_model_id, device_map=\"auto\"\n",
145
+ ").eval()\n",
146
+ "gemma3_processor = AutoProcessor.from_pretrained(gemma3_model_id)\n",
147
+ "\n",
148
+ "# VIDEO PROCESSING HELPER\n",
149
+ "\n",
150
+ "def downsample_video(video_path):\n",
151
+ " vidcap = cv2.VideoCapture(video_path)\n",
152
+ " total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
153
+ " fps = vidcap.get(cv2.CAP_PROP_FPS)\n",
154
+ " frames = []\n",
155
+ " # Sample 10 evenly spaced frames.\n",
156
+ " frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)\n",
157
+ " for i in frame_indices:\n",
158
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)\n",
159
+ " success, image = vidcap.read()\n",
160
+ " if success:\n",
161
+ " # Convert from BGR to RGB and then to PIL Image.\n",
162
+ " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
163
+ " pil_image = Image.fromarray(image)\n",
164
+ " timestamp = round(i / fps, 2)\n",
165
+ " frames.append((pil_image, timestamp))\n",
166
+ " vidcap.release()\n",
167
+ " return frames\n",
168
+ "\n",
169
+ "# MAIN GENERATION FUNCTION\n",
170
+ "\n",
171
+ "@spaces.GPU\n",
172
+ "def generate(\n",
173
+ " input_dict: dict,\n",
174
+ " chat_history: list[dict],\n",
175
+ " max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,\n",
176
+ " temperature: float = 0.6,\n",
177
+ " top_p: float = 0.9,\n",
178
+ " top_k: int = 50,\n",
179
+ " repetition_penalty: float = 1.2,\n",
180
+ "):\n",
181
+ " text = input_dict[\"text\"]\n",
182
+ " files = input_dict.get(\"files\", [])\n",
183
+ " lower_text = text.lower().strip()\n",
184
+ "\n",
185
+ " # ----- Qwen2-VL branch (triggered with @qwen2-vl) -----\n",
186
+ " if lower_text.startswith(\"@qwen2-vl\"):\n",
187
+ " prompt_clean = re.sub(r\"@qwen2-vl\", \"\", text, flags=re.IGNORECASE).strip().strip('\"')\n",
188
+ " if files:\n",
189
+ " images = [load_image(f) for f in files]\n",
190
+ " messages = [{\n",
191
+ " \"role\": \"user\",\n",
192
+ " \"content\": [\n",
193
+ " *[{\"type\": \"image\", \"image\": image} for image in images],\n",
194
+ " {\"type\": \"text\", \"text\": prompt_clean},\n",
195
+ " ]\n",
196
+ " }]\n",
197
+ " prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
198
+ " inputs = processor(text=[prompt], images=images, return_tensors=\"pt\", padding=True).to(\"cuda\")\n",
199
+ " else:\n",
200
+ " messages = [\n",
201
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
202
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": prompt_clean}]}\n",
203
+ " ]\n",
204
+ " inputs = processor.apply_chat_template(\n",
205
+ " messages, add_generation_prompt=True, tokenize=True,\n",
206
+ " return_dict=True, return_tensors=\"pt\"\n",
207
+ " ).to(\"cuda\", dtype=torch.float16)\n",
208
+ " streamer = TextIteratorStreamer(processor.tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)\n",
209
+ " generation_kwargs = {\n",
210
+ " **inputs,\n",
211
+ " \"streamer\": streamer,\n",
212
+ " \"max_new_tokens\": max_new_tokens,\n",
213
+ " \"do_sample\": True,\n",
214
+ " \"temperature\": temperature,\n",
215
+ " \"top_p\": top_p,\n",
216
+ " \"top_k\": top_k,\n",
217
+ " \"repetition_penalty\": repetition_penalty,\n",
218
+ " }\n",
219
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
220
+ " thread.start()\n",
221
+ " buffer = \"\"\n",
222
+ " yield progress_bar_html(\"Processing with Qwen2VL\")\n",
223
+ " for new_text in streamer:\n",
224
+ " buffer += new_text\n",
225
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
226
+ " time.sleep(0.01)\n",
227
+ " yield buffer\n",
228
+ " return\n",
229
+ "\n",
230
+ " # ----- Default branch: Gemma3 (for text, image, & video inference) -----\n",
231
+ " if files:\n",
232
+ " # Check if any provided file is a video based on extension.\n",
233
+ " video_extensions = (\".mp4\", \".mov\", \".avi\", \".mkv\", \".webm\")\n",
234
+ " if any(str(f).lower().endswith(video_extensions) for f in files):\n",
235
+ " # Video inference branch.\n",
236
+ " prompt_clean = re.sub(r\"@video-infer\", \"\", text, flags=re.IGNORECASE).strip().strip('\"')\n",
237
+ " video_path = files[0]\n",
238
+ " frames = downsample_video(video_path)\n",
239
+ " messages = [\n",
240
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
241
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": prompt_clean}]}\n",
242
+ " ]\n",
243
+ " # Append each frame (with its timestamp) to the conversation.\n",
244
+ " for frame in frames:\n",
245
+ " image, timestamp = frame\n",
246
+ " image_path = f\"video_frame_{uuid.uuid4().hex}.png\"\n",
247
+ " image.save(image_path)\n",
248
+ " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame {timestamp}:\"})\n",
249
+ " messages[1][\"content\"].append({\"type\": \"image\", \"url\": image_path})\n",
250
+ " inputs = gemma3_processor.apply_chat_template(\n",
251
+ " messages, add_generation_prompt=True, tokenize=True,\n",
252
+ " return_dict=True, return_tensors=\"pt\"\n",
253
+ " ).to(gemma3_model.device, dtype=torch.bfloat16)\n",
254
+ " streamer = TextIteratorStreamer(gemma3_processor.tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)\n",
255
+ " generation_kwargs = {\n",
256
+ " **inputs,\n",
257
+ " \"streamer\": streamer,\n",
258
+ " \"max_new_tokens\": max_new_tokens,\n",
259
+ " \"do_sample\": True,\n",
260
+ " \"temperature\": temperature,\n",
261
+ " \"top_p\": top_p,\n",
262
+ " \"top_k\": top_k,\n",
263
+ " \"repetition_penalty\": repetition_penalty,\n",
264
+ " }\n",
265
+ " thread = Thread(target=gemma3_model.generate, kwargs=generation_kwargs)\n",
266
+ " thread.start()\n",
267
+ " buffer = \"\"\n",
268
+ " yield progress_bar_html(\"Processing video with Gemma3\")\n",
269
+ " for new_text in streamer:\n",
270
+ " buffer += new_text\n",
271
+ " time.sleep(0.01)\n",
272
+ " yield buffer\n",
273
+ " return\n",
274
+ " else:\n",
275
+ " # Image inference branch.\n",
276
+ " prompt_clean = re.sub(r\"@gemma3\", \"\", text, flags=re.IGNORECASE).strip().strip('\"')\n",
277
+ " images = [load_image(f) for f in files]\n",
278
+ " messages = [{\n",
279
+ " \"role\": \"user\",\n",
280
+ " \"content\": [\n",
281
+ " *[{\"type\": \"image\", \"image\": image} for image in images],\n",
282
+ " {\"type\": \"text\", \"text\": prompt_clean},\n",
283
+ " ]\n",
284
+ " }]\n",
285
+ " inputs = gemma3_processor.apply_chat_template(\n",
286
+ " messages, tokenize=True, add_generation_prompt=True,\n",
287
+ " return_dict=True, return_tensors=\"pt\"\n",
288
+ " ).to(gemma3_model.device, dtype=torch.bfloat16)\n",
289
+ " streamer = TextIteratorStreamer(gemma3_processor.tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)\n",
290
+ " generation_kwargs = {\n",
291
+ " **inputs,\n",
292
+ " \"streamer\": streamer,\n",
293
+ " \"max_new_tokens\": max_new_tokens,\n",
294
+ " \"do_sample\": True,\n",
295
+ " \"temperature\": temperature,\n",
296
+ " \"top_p\": top_p,\n",
297
+ " \"top_k\": top_k,\n",
298
+ " \"repetition_penalty\": repetition_penalty,\n",
299
+ " }\n",
300
+ " thread = Thread(target=gemma3_model.generate, kwargs=generation_kwargs)\n",
301
+ " thread.start()\n",
302
+ " buffer = \"\"\n",
303
+ " yield progress_bar_html(\"Processing with Gemma3\")\n",
304
+ " for new_text in streamer:\n",
305
+ " buffer += new_text\n",
306
+ " time.sleep(0.01)\n",
307
+ " yield buffer\n",
308
+ " return\n",
309
+ " else:\n",
310
+ " # Text-only inference branch.\n",
311
+ " messages = [\n",
312
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
313
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": text}]}\n",
314
+ " ]\n",
315
+ " inputs = gemma3_processor.apply_chat_template(\n",
316
+ " messages, add_generation_prompt=True, tokenize=True,\n",
317
+ " return_dict=True, return_tensors=\"pt\"\n",
318
+ " ).to(gemma3_model.device, dtype=torch.bfloat16)\n",
319
+ " streamer = TextIteratorStreamer(gemma3_processor.tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)\n",
320
+ " generation_kwargs = {\n",
321
+ " **inputs,\n",
322
+ " \"streamer\": streamer,\n",
323
+ " \"max_new_tokens\": max_new_tokens,\n",
324
+ " \"do_sample\": True,\n",
325
+ " \"temperature\": temperature,\n",
326
+ " \"top_p\": top_p,\n",
327
+ " \"top_k\": top_k,\n",
328
+ " \"repetition_penalty\": repetition_penalty,\n",
329
+ " }\n",
330
+ " thread = Thread(target=gemma3_model.generate, kwargs=generation_kwargs)\n",
331
+ " thread.start()\n",
332
+ " outputs = []\n",
333
+ " for new_text in streamer:\n",
334
+ " outputs.append(new_text)\n",
335
+ " yield \"\".join(outputs)\n",
336
+ " final_response = \"\".join(outputs)\n",
337
+ " yield final_response\n",
338
+ "\n",
339
+ "\n",
340
+ "# Gradio Interface\n",
341
+ "\n",
342
+ "demo = gr.ChatInterface(\n",
343
+ " fn=generate,\n",
344
+ " additional_inputs=[\n",
345
+ " gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS),\n",
346
+ " gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.6),\n",
347
+ " gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9),\n",
348
+ " gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50),\n",
349
+ " gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.2),\n",
350
+ " ],\n",
351
+ " type=\"messages\",\n",
352
+ " description=\"# **Gemma 3 Multimodal** \\n`Use @qwen2-vl to switch to Qwen2-VL OCR for image inference and @video-infer for video input`\",\n",
353
+ " fill_height=True,\n",
354
+ " textbox=gr.MultimodalTextbox(label=\"Query Input\", file_types=[\"image\", \"video\"], file_count=\"multiple\", placeholder=\"Tag with @qwen2-vl for Qwen2-VL inference if needed.\"),\n",
355
+ " stop_btn=\"Stop Generation\",\n",
356
+ " multimodal=True,\n",
357
+ ")\n",
358
+ "\n",
359
+ "if __name__ == \"__main__\":\n",
360
+ " demo.queue(max_size=20).launch(share=True)"
361
+ ],
362
+ "metadata": {
363
+ "id": "kW4MjaOs3c9E"
364
+ },
365
+ "execution_count": null,
366
+ "outputs": []
367
+ }
368
+ ]
369
+ }
MiMo-VL-7B-RL/MiMo_VL_7B_RL.ipynb ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "xL8y37Y6bORU"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "%%capture\n",
12
+ "!pip install gradio spaces transformers accelerate numpy requests\n",
13
+ "!pip install torch torchvision qwen-vl-utils av hf_xet\n",
14
+ "!pip install pillow huggingface_hub opencv-python"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {
21
+ "id": "Y-NTbL1tdL9X"
22
+ },
23
+ "outputs": [],
24
+ "source": [
25
+ "import os\n",
26
+ "import random\n",
27
+ "import uuid\n",
28
+ "import json\n",
29
+ "import time\n",
30
+ "import asyncio\n",
31
+ "from threading import Thread\n",
32
+ "\n",
33
+ "import gradio as gr\n",
34
+ "import spaces\n",
35
+ "import torch\n",
36
+ "import numpy as np\n",
37
+ "from PIL import Image\n",
38
+ "import cv2\n",
39
+ "\n",
40
+ "from transformers import (\n",
41
+ " Qwen2_5_VLForConditionalGeneration,\n",
42
+ " AutoProcessor,\n",
43
+ " TextIteratorStreamer,\n",
44
+ ")\n",
45
+ "from transformers.image_utils import load_image\n",
46
+ "\n",
47
+ "# Constants for text generation\n",
48
+ "MAX_MAX_NEW_TOKENS = 2048\n",
49
+ "DEFAULT_MAX_NEW_TOKENS = 1024\n",
50
+ "# Increase or disable input truncation to avoid token mismatches\n",
51
+ "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"4096\"))\n",
52
+ "\n",
53
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
54
+ "\n",
55
+ "MODEL_ID = \"XiaomiMiMo/MiMo-VL-7B-RL\"\n",
56
+ "processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)\n",
57
+ "model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(\n",
58
+ " MODEL_ID,\n",
59
+ " trust_remote_code=True,\n",
60
+ " torch_dtype=torch.float16\n",
61
+ ").to(\"cuda\").eval()\n",
62
+ "\n",
63
+ "def downsample_video(video_path):\n",
64
+ " \"\"\"\n",
65
+ " Downsamples the video to evenly spaced frames.\n",
66
+ " Each frame is returned as a PIL image along with its timestamp.\n",
67
+ " \"\"\"\n",
68
+ " vidcap = cv2.VideoCapture(video_path)\n",
69
+ " total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
70
+ " fps = vidcap.get(cv2.CAP_PROP_FPS)\n",
71
+ " frames = []\n",
72
+ " # Sample 10 evenly spaced frames.\n",
73
+ " frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)\n",
74
+ " for i in frame_indices:\n",
75
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)\n",
76
+ " success, image = vidcap.read()\n",
77
+ " if success:\n",
78
+ " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB\n",
79
+ " pil_image = Image.fromarray(image)\n",
80
+ " timestamp = round(i / fps, 2)\n",
81
+ " frames.append((pil_image, timestamp))\n",
82
+ " vidcap.release()\n",
83
+ " return frames\n",
84
+ "\n",
85
+ "@spaces.GPU\n",
86
+ "def generate_image(text: str, image: Image.Image,\n",
87
+ " max_new_tokens: int = 1024,\n",
88
+ " temperature: float = 0.6,\n",
89
+ " top_p: float = 0.9,\n",
90
+ " top_k: int = 50,\n",
91
+ " repetition_penalty: float = 1.2):\n",
92
+ "\n",
93
+ " if image is None:\n",
94
+ " yield \"Please upload an image.\"\n",
95
+ " return\n",
96
+ "\n",
97
+ " messages = [{\n",
98
+ " \"role\": \"user\",\n",
99
+ " \"content\": [\n",
100
+ " {\"type\": \"image\", \"image\": image},\n",
101
+ " {\"type\": \"text\", \"text\": text},\n",
102
+ " ]\n",
103
+ " }]\n",
104
+ " prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
105
+ " inputs = processor(\n",
106
+ " text=[prompt_full],\n",
107
+ " images=[image],\n",
108
+ " return_tensors=\"pt\",\n",
109
+ " padding=True,\n",
110
+ " truncation=False,\n",
111
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
112
+ " ).to(\"cuda\")\n",
113
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
114
+ " generation_kwargs = {**inputs, \"streamer\": streamer, \"max_new_tokens\": max_new_tokens}\n",
115
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
116
+ " thread.start()\n",
117
+ " buffer = \"\"\n",
118
+ " for new_text in streamer:\n",
119
+ " buffer += new_text\n",
120
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
121
+ " time.sleep(0.01)\n",
122
+ " yield buffer\n",
123
+ "\n",
124
+ "@spaces.GPU\n",
125
+ "def generate_video(text: str, video_path: str,\n",
126
+ " max_new_tokens: int = 1024,\n",
127
+ " temperature: float = 0.6,\n",
128
+ " top_p: float = 0.9,\n",
129
+ " top_k: int = 50,\n",
130
+ " repetition_penalty: float = 1.2):\n",
131
+ "\n",
132
+ " if video_path is None:\n",
133
+ " yield \"Please upload a video.\"\n",
134
+ " return\n",
135
+ "\n",
136
+ " frames = downsample_video(video_path)\n",
137
+ " messages = [\n",
138
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
139
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": text}]}\n",
140
+ " ]\n",
141
+ " # Append each frame with its timestamp.\n",
142
+ " for frame in frames:\n",
143
+ " image, timestamp = frame\n",
144
+ " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame {timestamp}:\"})\n",
145
+ " messages[1][\"content\"].append({\"type\": \"image\", \"image\": image})\n",
146
+ " inputs = processor.apply_chat_template(\n",
147
+ " messages,\n",
148
+ " tokenize=True,\n",
149
+ " add_generation_prompt=True,\n",
150
+ " return_dict=True,\n",
151
+ " return_tensors=\"pt\",\n",
152
+ " truncation=False,\n",
153
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
154
+ " ).to(\"cuda\")\n",
155
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
156
+ " generation_kwargs = {\n",
157
+ " **inputs,\n",
158
+ " \"streamer\": streamer,\n",
159
+ " \"max_new_tokens\": max_new_tokens,\n",
160
+ " \"do_sample\": True,\n",
161
+ " \"temperature\": temperature,\n",
162
+ " \"top_p\": top_p,\n",
163
+ " \"top_k\": top_k,\n",
164
+ " \"repetition_penalty\": repetition_penalty,\n",
165
+ " }\n",
166
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
167
+ " thread.start()\n",
168
+ " buffer = \"\"\n",
169
+ " for new_text in streamer:\n",
170
+ " buffer += new_text\n",
171
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
172
+ " time.sleep(0.01)\n",
173
+ " yield buffer\n",
174
+ "\n",
175
+ "css = \"\"\"\n",
176
+ ".submit-btn {\n",
177
+ " background-color: #2980b9 !important;\n",
178
+ " color: white !important;\n",
179
+ "}\n",
180
+ ".submit-btn:hover {\n",
181
+ " background-color: #3498db !important;\n",
182
+ "}\n",
183
+ "\"\"\"\n",
184
+ "\n",
185
+ "# Create the Gradio Interface\n",
186
+ "with gr.Blocks(css=css, theme=\"bethecloud/storj_theme\") as demo:\n",
187
+ " gr.Markdown(\"# **XiaomiMiMo/MiMo-VL-7B-RL**\")\n",
188
+ " with gr.Row():\n",
189
+ " with gr.Column():\n",
190
+ " with gr.Tabs():\n",
191
+ " with gr.TabItem(\"Image Inference\"):\n",
192
+ " image_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
193
+ " image_upload = gr.Image(type=\"pil\", label=\"Image\")\n",
194
+ " image_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
195
+ "\n",
196
+ " with gr.TabItem(\"Video Inference\"):\n",
197
+ " video_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
198
+ " video_upload = gr.Video(label=\"Video\")\n",
199
+ " video_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
200
+ "\n",
201
+ " with gr.Accordion(\"Advanced options\", open=False):\n",
202
+ " max_new_tokens = gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)\n",
203
+ " temperature = gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.6)\n",
204
+ " top_p = gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9)\n",
205
+ " top_k = gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50)\n",
206
+ " repetition_penalty = gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.2)\n",
207
+ " with gr.Column():\n",
208
+ " output = gr.Textbox(label=\"Output\", interactive=False)\n",
209
+ "\n",
210
+ " image_submit.click(\n",
211
+ " fn=generate_image,\n",
212
+ " inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
213
+ " outputs=output\n",
214
+ " )\n",
215
+ " video_submit.click(\n",
216
+ " fn=generate_video,\n",
217
+ " inputs=[video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
218
+ " outputs=output\n",
219
+ " )\n",
220
+ "\n",
221
+ "if __name__ == \"__main__\":\n",
222
+ " demo.queue(max_size=30).launch(share=True, ssr_mode=False, show_error=True)"
223
+ ]
224
+ }
225
+ ],
226
+ "metadata": {
227
+ "accelerator": "GPU",
228
+ "colab": {
229
+ "gpuType": "T4",
230
+ "provenance": []
231
+ },
232
+ "kernelspec": {
233
+ "display_name": "Python 3",
234
+ "name": "python3"
235
+ },
236
+ "language_info": {
237
+ "name": "python"
238
+ }
239
+ },
240
+ "nbformat": 4,
241
+ "nbformat_minor": 0
242
+ }
MiMo-VL-7B-SFT/MiMo_VL_7B_SFT.ipynb ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "xL8y37Y6bORU"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "%%capture\n",
12
+ "!pip install gradio spaces transformers accelerate numpy requests\n",
13
+ "!pip install torch torchvision qwen-vl-utils av hf_xet\n",
14
+ "!pip install pillow huggingface_hub opencv-python"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {
21
+ "id": "Y-NTbL1tdL9X"
22
+ },
23
+ "outputs": [],
24
+ "source": [
25
+ "import os\n",
26
+ "import random\n",
27
+ "import uuid\n",
28
+ "import json\n",
29
+ "import time\n",
30
+ "import asyncio\n",
31
+ "from threading import Thread\n",
32
+ "\n",
33
+ "import gradio as gr\n",
34
+ "import spaces\n",
35
+ "import torch\n",
36
+ "import numpy as np\n",
37
+ "from PIL import Image\n",
38
+ "import cv2\n",
39
+ "\n",
40
+ "from transformers import (\n",
41
+ " Qwen2_5_VLForConditionalGeneration,\n",
42
+ " AutoProcessor,\n",
43
+ " TextIteratorStreamer,\n",
44
+ ")\n",
45
+ "from transformers.image_utils import load_image\n",
46
+ "\n",
47
+ "# Constants for text generation\n",
48
+ "MAX_MAX_NEW_TOKENS = 2048\n",
49
+ "DEFAULT_MAX_NEW_TOKENS = 1024\n",
50
+ "# Increase or disable input truncation to avoid token mismatches\n",
51
+ "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"4096\"))\n",
52
+ "\n",
53
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
54
+ "\n",
55
+ "MODEL_ID = \"XiaomiMiMo/MiMo-VL-7B-SFT\"\n",
56
+ "processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)\n",
57
+ "model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(\n",
58
+ " MODEL_ID,\n",
59
+ " trust_remote_code=True,\n",
60
+ " torch_dtype=torch.float16\n",
61
+ ").to(\"cuda\").eval()\n",
62
+ "\n",
63
+ "def downsample_video(video_path):\n",
64
+ " \"\"\"\n",
65
+ " Downsamples the video to evenly spaced frames.\n",
66
+ " Each frame is returned as a PIL image along with its timestamp.\n",
67
+ " \"\"\"\n",
68
+ " vidcap = cv2.VideoCapture(video_path)\n",
69
+ " total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
70
+ " fps = vidcap.get(cv2.CAP_PROP_FPS)\n",
71
+ " frames = []\n",
72
+ " # Sample 10 evenly spaced frames.\n",
73
+ " frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)\n",
74
+ " for i in frame_indices:\n",
75
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)\n",
76
+ " success, image = vidcap.read()\n",
77
+ " if success:\n",
78
+ " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB\n",
79
+ " pil_image = Image.fromarray(image)\n",
80
+ " timestamp = round(i / fps, 2)\n",
81
+ " frames.append((pil_image, timestamp))\n",
82
+ " vidcap.release()\n",
83
+ " return frames\n",
84
+ "\n",
85
+ "@spaces.GPU\n",
86
+ "def generate_image(text: str, image: Image.Image,\n",
87
+ " max_new_tokens: int = 1024,\n",
88
+ " temperature: float = 0.6,\n",
89
+ " top_p: float = 0.9,\n",
90
+ " top_k: int = 50,\n",
91
+ " repetition_penalty: float = 1.2):\n",
92
+ "\n",
93
+ " if image is None:\n",
94
+ " yield \"Please upload an image.\"\n",
95
+ " return\n",
96
+ "\n",
97
+ " messages = [{\n",
98
+ " \"role\": \"user\",\n",
99
+ " \"content\": [\n",
100
+ " {\"type\": \"image\", \"image\": image},\n",
101
+ " {\"type\": \"text\", \"text\": text},\n",
102
+ " ]\n",
103
+ " }]\n",
104
+ " prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
105
+ " inputs = processor(\n",
106
+ " text=[prompt_full],\n",
107
+ " images=[image],\n",
108
+ " return_tensors=\"pt\",\n",
109
+ " padding=True,\n",
110
+ " truncation=False,\n",
111
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
112
+ " ).to(\"cuda\")\n",
113
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
114
+ " generation_kwargs = {**inputs, \"streamer\": streamer, \"max_new_tokens\": max_new_tokens}\n",
115
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
116
+ " thread.start()\n",
117
+ " buffer = \"\"\n",
118
+ " for new_text in streamer:\n",
119
+ " buffer += new_text\n",
120
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
121
+ " time.sleep(0.01)\n",
122
+ " yield buffer\n",
123
+ "\n",
124
+ "@spaces.GPU\n",
125
+ "def generate_video(text: str, video_path: str,\n",
126
+ " max_new_tokens: int = 1024,\n",
127
+ " temperature: float = 0.6,\n",
128
+ " top_p: float = 0.9,\n",
129
+ " top_k: int = 50,\n",
130
+ " repetition_penalty: float = 1.2):\n",
131
+ "\n",
132
+ " if video_path is None:\n",
133
+ " yield \"Please upload a video.\"\n",
134
+ " return\n",
135
+ "\n",
136
+ " frames = downsample_video(video_path)\n",
137
+ " messages = [\n",
138
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
139
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": text}]}\n",
140
+ " ]\n",
141
+ " # Append each frame with its timestamp.\n",
142
+ " for frame in frames:\n",
143
+ " image, timestamp = frame\n",
144
+ " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame {timestamp}:\"})\n",
145
+ " messages[1][\"content\"].append({\"type\": \"image\", \"image\": image})\n",
146
+ " inputs = processor.apply_chat_template(\n",
147
+ " messages,\n",
148
+ " tokenize=True,\n",
149
+ " add_generation_prompt=True,\n",
150
+ " return_dict=True,\n",
151
+ " return_tensors=\"pt\",\n",
152
+ " truncation=False,\n",
153
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
154
+ " ).to(\"cuda\")\n",
155
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
156
+ " generation_kwargs = {\n",
157
+ " **inputs,\n",
158
+ " \"streamer\": streamer,\n",
159
+ " \"max_new_tokens\": max_new_tokens,\n",
160
+ " \"do_sample\": True,\n",
161
+ " \"temperature\": temperature,\n",
162
+ " \"top_p\": top_p,\n",
163
+ " \"top_k\": top_k,\n",
164
+ " \"repetition_penalty\": repetition_penalty,\n",
165
+ " }\n",
166
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
167
+ " thread.start()\n",
168
+ " buffer = \"\"\n",
169
+ " for new_text in streamer:\n",
170
+ " buffer += new_text\n",
171
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
172
+ " time.sleep(0.01)\n",
173
+ " yield buffer\n",
174
+ "\n",
175
+ "css = \"\"\"\n",
176
+ ".submit-btn {\n",
177
+ " background-color: #2980b9 !important;\n",
178
+ " color: white !important;\n",
179
+ "}\n",
180
+ ".submit-btn:hover {\n",
181
+ " background-color: #3498db !important;\n",
182
+ "}\n",
183
+ "\"\"\"\n",
184
+ "\n",
185
+ "# Create the Gradio Interface\n",
186
+ "with gr.Blocks(css=css, theme=\"bethecloud/storj_theme\") as demo:\n",
187
+ " gr.Markdown(\"# **XiaomiMiMo/MiMo-VL-7B-SFT**\")\n",
188
+ " with gr.Row():\n",
189
+ " with gr.Column():\n",
190
+ " with gr.Tabs():\n",
191
+ " with gr.TabItem(\"Image Inference\"):\n",
192
+ " image_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
193
+ " image_upload = gr.Image(type=\"pil\", label=\"Image\")\n",
194
+ " image_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
195
+ "\n",
196
+ " with gr.TabItem(\"Video Inference\"):\n",
197
+ " video_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
198
+ " video_upload = gr.Video(label=\"Video\")\n",
199
+ " video_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
200
+ "\n",
201
+ " with gr.Accordion(\"Advanced options\", open=False):\n",
202
+ " max_new_tokens = gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)\n",
203
+ " temperature = gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.6)\n",
204
+ " top_p = gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9)\n",
205
+ " top_k = gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50)\n",
206
+ " repetition_penalty = gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.2)\n",
207
+ " with gr.Column():\n",
208
+ " output = gr.Textbox(label=\"Output\", interactive=False)\n",
209
+ "\n",
210
+ " image_submit.click(\n",
211
+ " fn=generate_image,\n",
212
+ " inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
213
+ " outputs=output\n",
214
+ " )\n",
215
+ " video_submit.click(\n",
216
+ " fn=generate_video,\n",
217
+ " inputs=[video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
218
+ " outputs=output\n",
219
+ " )\n",
220
+ "\n",
221
+ "if __name__ == \"__main__\":\n",
222
+ " demo.queue(max_size=30).launch(share=True, ssr_mode=False, show_error=True)"
223
+ ]
224
+ }
225
+ ],
226
+ "metadata": {
227
+ "accelerator": "GPU",
228
+ "colab": {
229
+ "gpuType": "T4",
230
+ "provenance": []
231
+ },
232
+ "kernelspec": {
233
+ "display_name": "Python 3",
234
+ "name": "python3"
235
+ },
236
+ "language_info": {
237
+ "name": "python"
238
+ }
239
+ },
240
+ "nbformat": 4,
241
+ "nbformat_minor": 0
242
+ }
Qwen-2VL-MessyOCR/Qwen2_VL_OCR_2B_Instruct_prithivmlmods.ipynb ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": 1,
22
+ "metadata": {
23
+ "id": "xL8y37Y6bORU"
24
+ },
25
+ "outputs": [],
26
+ "source": [
27
+ "%%capture\n",
28
+ "!pip install gradio spaces transformers accelerate numpy requests\n",
29
+ "!pip install torch torchvision qwen-vl-utils av hf_xet\n",
30
+ "!pip install pillow huggingface_hub opencv-python"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "source": [
36
+ "import os\n",
37
+ "import time\n",
38
+ "import numpy as np\n",
39
+ "from threading import Thread\n",
40
+ "\n",
41
+ "import gradio as gr\n",
42
+ "import spaces\n",
43
+ "import torch\n",
44
+ "from PIL import Image\n",
45
+ "import cv2\n",
46
+ "\n",
47
+ "from transformers import (\n",
48
+ " Qwen2VLForConditionalGeneration,\n",
49
+ " AutoProcessor,\n",
50
+ " TextIteratorStreamer,\n",
51
+ ")\n",
52
+ "\n",
53
+ "# Constants for text generation\n",
54
+ "MAX_MAX_NEW_TOKENS = 2048\n",
55
+ "DEFAULT_MAX_NEW_TOKENS = 1024\n",
56
+ "# Increase or disable input truncation to avoid token mismatches\n",
57
+ "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"8192\"))\n",
58
+ "\n",
59
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
60
+ "\n",
61
+ "MODEL_ID = \"prithivMLmods/Qwen2-VL-OCR-2B-Instruct\"\n",
62
+ "processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)\n",
63
+ "model_m = Qwen2VLForConditionalGeneration.from_pretrained(\n",
64
+ " MODEL_ID,\n",
65
+ " trust_remote_code=True,\n",
66
+ " torch_dtype=torch.float16\n",
67
+ ").to(device).eval()\n",
68
+ "\n",
69
+ "def downsample_video(video_path):\n",
70
+ " \"\"\"\n",
71
+ " Downsamples the video to evenly spaced frames.\n",
72
+ " Each frame is returned as a PIL image along with its timestamp.\n",
73
+ " \"\"\"\n",
74
+ " vidcap = cv2.VideoCapture(video_path)\n",
75
+ " total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
76
+ " fps = vidcap.get(cv2.CAP_PROP_FPS)\n",
77
+ " frames = []\n",
78
+ " frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)\n",
79
+ " for i in frame_indices:\n",
80
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)\n",
81
+ " success, image = vidcap.read()\n",
82
+ " if success:\n",
83
+ " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
84
+ " pil_image = Image.fromarray(image)\n",
85
+ " timestamp = round(i / fps, 2)\n",
86
+ " frames.append((pil_image, timestamp))\n",
87
+ " vidcap.release()\n",
88
+ " return frames\n",
89
+ "\n",
90
+ "@spaces.GPU\n",
91
+ "def generate_image(text: str, image: Image.Image,\n",
92
+ " max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,\n",
93
+ " temperature: float = 0.6,\n",
94
+ " top_p: float = 0.9,\n",
95
+ " top_k: int = 50,\n",
96
+ " repetition_penalty: float = 1.2):\n",
97
+ "\n",
98
+ " if image is None:\n",
99
+ " yield \"Please upload an image.\"\n",
100
+ " return\n",
101
+ "\n",
102
+ " messages = [{\n",
103
+ " \"role\": \"user\",\n",
104
+ " \"content\": [\n",
105
+ " {\"type\": \"image\", \"image\": image},\n",
106
+ " {\"type\": \"text\", \"text\": text},\n",
107
+ " ]\n",
108
+ " }]\n",
109
+ " prompt_full = processor.apply_chat_template(\n",
110
+ " messages, tokenize=False, add_generation_prompt=True\n",
111
+ " )\n",
112
+ " inputs = processor(\n",
113
+ " text=[prompt_full],\n",
114
+ " images=[image],\n",
115
+ " return_tensors=\"pt\",\n",
116
+ " padding=True,\n",
117
+ " truncation=False # Disable truncation to keep image tokens intact\n",
118
+ " ).to(device)\n",
119
+ "\n",
120
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
121
+ " generation_kwargs = {\n",
122
+ " **inputs,\n",
123
+ " \"streamer\": streamer,\n",
124
+ " \"max_new_tokens\": max_new_tokens,\n",
125
+ " \"do_sample\": True,\n",
126
+ " \"temperature\": temperature,\n",
127
+ " \"top_p\": top_p,\n",
128
+ " \"top_k\": top_k,\n",
129
+ " \"repetition_penalty\": repetition_penalty,\n",
130
+ " }\n",
131
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
132
+ " thread.start()\n",
133
+ " buffer = \"\"\n",
134
+ " for new_text in streamer:\n",
135
+ " buffer += new_text.replace(\"<|im_end|>\", \"\")\n",
136
+ " time.sleep(0.01)\n",
137
+ " yield buffer\n",
138
+ "\n",
139
+ "@spaces.GPU\n",
140
+ "def generate_video(text: str, video_path: str,\n",
141
+ " max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,\n",
142
+ " temperature: float = 0.6,\n",
143
+ " top_p: float = 0.9,\n",
144
+ " top_k: int = 50,\n",
145
+ " repetition_penalty: float = 1.2):\n",
146
+ "\n",
147
+ " if video_path is None:\n",
148
+ " yield \"Please upload a video.\"\n",
149
+ " return\n",
150
+ "\n",
151
+ " frames = downsample_video(video_path)\n",
152
+ " messages = [\n",
153
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
154
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": text}]}\n",
155
+ " ]\n",
156
+ " for image, timestamp in frames:\n",
157
+ " messages[1][\"content\"].extend([\n",
158
+ " {\"type\": \"text\", \"text\": f\"Frame {timestamp}:\"},\n",
159
+ " {\"type\": \"image\", \"image\": image}\n",
160
+ " ])\n",
161
+ "\n",
162
+ " # Use chat template with no truncation\n",
163
+ " inputs = processor.apply_chat_template(\n",
164
+ " messages,\n",
165
+ " tokenize=True,\n",
166
+ " add_generation_prompt=True,\n",
167
+ " return_dict=True,\n",
168
+ " return_tensors=\"pt\",\n",
169
+ " truncation=False\n",
170
+ " ).to(device)\n",
171
+ "\n",
172
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
173
+ " generation_kwargs = {\n",
174
+ " **inputs,\n",
175
+ " \"streamer\": streamer,\n",
176
+ " \"max_new_tokens\": max_new_tokens,\n",
177
+ " \"do_sample\": True,\n",
178
+ " \"temperature\": temperature,\n",
179
+ " \"top_p\": top_p,\n",
180
+ " \"top_k\": top_k,\n",
181
+ " \"repetition_penalty\": repetition_penalty,\n",
182
+ " }\n",
183
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
184
+ " thread.start()\n",
185
+ " buffer = \"\"\n",
186
+ " for new_text in streamer:\n",
187
+ " buffer += new_text.replace(\"<|im_end|>\", \"\")\n",
188
+ " time.sleep(0.01)\n",
189
+ " yield buffer\n",
190
+ "\n",
191
+ "# Gradio App Style and Layout\n",
192
+ "css = \"\"\"\n",
193
+ ".submit-btn {\n",
194
+ " background-color: #2980b9 !important;\n",
195
+ " color: white !important;\n",
196
+ "}\n",
197
+ ".submit-btn:hover {\n",
198
+ " background-color: #3498db !important;\n",
199
+ "}\n",
200
+ "\"\"\"\n",
201
+ "\n",
202
+ "with gr.Blocks(css=css, theme=\"bethecloud/storj_theme\") as demo:\n",
203
+ " gr.Markdown(\"# **prithivMLmods/Qwen2-VL-OCR-2B-Instruct**\")\n",
204
+ " with gr.Row():\n",
205
+ " with gr.Column():\n",
206
+ " with gr.Tabs():\n",
207
+ " with gr.TabItem(\"Image Inference\"):\n",
208
+ " image_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
209
+ " image_upload = gr.Image(type=\"pil\", label=\"Image\")\n",
210
+ " image_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
211
+ "\n",
212
+ " with gr.TabItem(\"Video Inference\"):\n",
213
+ " video_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
214
+ " video_upload = gr.Video(label=\"Video\")\n",
215
+ " video_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
216
+ "\n",
217
+ " with gr.Accordion(\"Advanced options\", open=False):\n",
218
+ " max_new_tokens = gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)\n",
219
+ " temperature = gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.6)\n",
220
+ " top_p = gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9)\n",
221
+ " top_k = gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50)\n",
222
+ " repetition_penalty = gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.2)\n",
223
+ " with gr.Column():\n",
224
+ " output = gr.Textbox(label=\"Output\", interactive=False)\n",
225
+ "\n",
226
+ " image_submit.click(\n",
227
+ " fn=generate_image,\n",
228
+ " inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
229
+ " outputs=output\n",
230
+ " )\n",
231
+ " video_submit.click(\n",
232
+ " fn=generate_video,\n",
233
+ " inputs=[video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
234
+ " outputs=output\n",
235
+ " )\n",
236
+ "\n",
237
+ "if __name__ == \"__main__\":\n",
238
+ " demo.queue(max_size=30).launch(share=True, ssr_mode=False, show_error=True)\n"
239
+ ],
240
+ "metadata": {
241
+ "id": "Y-NTbL1tdL9X"
242
+ },
243
+ "execution_count": null,
244
+ "outputs": []
245
+ }
246
+ ]
247
+ }
Qwen2-VL/Qwen2_VL_2B_Instruct.ipynb ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "xL8y37Y6bORU"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "%%capture\n",
12
+ "!pip install gradio spaces transformers accelerate numpy requests\n",
13
+ "!pip install torch torchvision qwen-vl-utils av hf_xet\n",
14
+ "!pip install pillow huggingface_hub opencv-python"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {
21
+ "id": "Y-NTbL1tdL9X"
22
+ },
23
+ "outputs": [],
24
+ "source": [
25
+ "import os\n",
26
+ "import random\n",
27
+ "import uuid\n",
28
+ "import json\n",
29
+ "import time\n",
30
+ "import asyncio\n",
31
+ "from threading import Thread\n",
32
+ "\n",
33
+ "import gradio as gr\n",
34
+ "import spaces\n",
35
+ "import torch\n",
36
+ "import numpy as np\n",
37
+ "from PIL import Image\n",
38
+ "import cv2\n",
39
+ "\n",
40
+ "from transformers import (\n",
41
+ " Qwen2VLForConditionalGeneration,\n",
42
+ " AutoProcessor,\n",
43
+ " TextIteratorStreamer,\n",
44
+ ")\n",
45
+ "from transformers.image_utils import load_image\n",
46
+ "\n",
47
+ "# Constants for text generation\n",
48
+ "MAX_MAX_NEW_TOKENS = 2048\n",
49
+ "DEFAULT_MAX_NEW_TOKENS = 1024\n",
50
+ "# Increase or disable input truncation to avoid token mismatches\n",
51
+ "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"8192\"))\n",
52
+ "\n",
53
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
54
+ "\n",
55
+ "MODEL_ID = \"Qwen/Qwen2-VL-2B-Instruct\"\n",
56
+ "processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)\n",
57
+ "model_m = Qwen2VLForConditionalGeneration.from_pretrained(\n",
58
+ " MODEL_ID,\n",
59
+ " trust_remote_code=True,\n",
60
+ " torch_dtype=torch.float16\n",
61
+ ").to(\"cuda\").eval()\n",
62
+ "\n",
63
+ "def downsample_video(video_path):\n",
64
+ " \"\"\"\n",
65
+ " Downsamples the video to evenly spaced frames.\n",
66
+ " Each frame is returned as a PIL image along with its timestamp.\n",
67
+ " \"\"\"\n",
68
+ " vidcap = cv2.VideoCapture(video_path)\n",
69
+ " total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
70
+ " fps = vidcap.get(cv2.CAP_PROP_FPS)\n",
71
+ " frames = []\n",
72
+ " # Sample 10 evenly spaced frames.\n",
73
+ " frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)\n",
74
+ " for i in frame_indices:\n",
75
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)\n",
76
+ " success, image = vidcap.read()\n",
77
+ " if success:\n",
78
+ " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB\n",
79
+ " pil_image = Image.fromarray(image)\n",
80
+ " timestamp = round(i / fps, 2)\n",
81
+ " frames.append((pil_image, timestamp))\n",
82
+ " vidcap.release()\n",
83
+ " return frames\n",
84
+ "\n",
85
+ "@spaces.GPU\n",
86
+ "def generate_image(text: str, image: Image.Image,\n",
87
+ " max_new_tokens: int = 1024,\n",
88
+ " temperature: float = 0.6,\n",
89
+ " top_p: float = 0.9,\n",
90
+ " top_k: int = 50,\n",
91
+ " repetition_penalty: float = 1.2):\n",
92
+ "\n",
93
+ " if image is None:\n",
94
+ " yield \"Please upload an image.\"\n",
95
+ " return\n",
96
+ "\n",
97
+ " messages = [{\n",
98
+ " \"role\": \"user\",\n",
99
+ " \"content\": [\n",
100
+ " {\"type\": \"image\", \"image\": image},\n",
101
+ " {\"type\": \"text\", \"text\": text},\n",
102
+ " ]\n",
103
+ " }]\n",
104
+ " prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
105
+ " inputs = processor(\n",
106
+ " text=[prompt_full],\n",
107
+ " images=[image],\n",
108
+ " return_tensors=\"pt\",\n",
109
+ " padding=True,\n",
110
+ " truncation=False,\n",
111
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
112
+ " ).to(\"cuda\")\n",
113
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
114
+ " generation_kwargs = {**inputs, \"streamer\": streamer, \"max_new_tokens\": max_new_tokens}\n",
115
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
116
+ " thread.start()\n",
117
+ " buffer = \"\"\n",
118
+ " for new_text in streamer:\n",
119
+ " buffer += new_text\n",
120
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
121
+ " time.sleep(0.01)\n",
122
+ " yield buffer\n",
123
+ "\n",
124
+ "@spaces.GPU\n",
125
+ "def generate_video(text: str, video_path: str,\n",
126
+ " max_new_tokens: int = 1024,\n",
127
+ " temperature: float = 0.6,\n",
128
+ " top_p: float = 0.9,\n",
129
+ " top_k: int = 50,\n",
130
+ " repetition_penalty: float = 1.2):\n",
131
+ "\n",
132
+ " if video_path is None:\n",
133
+ " yield \"Please upload a video.\"\n",
134
+ " return\n",
135
+ "\n",
136
+ " frames = downsample_video(video_path)\n",
137
+ " messages = [\n",
138
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
139
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": text}]}\n",
140
+ " ]\n",
141
+ " # Append each frame with its timestamp.\n",
142
+ " for frame in frames:\n",
143
+ " image, timestamp = frame\n",
144
+ " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame {timestamp}:\"})\n",
145
+ " messages[1][\"content\"].append({\"type\": \"image\", \"image\": image})\n",
146
+ " inputs = processor.apply_chat_template(\n",
147
+ " messages,\n",
148
+ " tokenize=True,\n",
149
+ " add_generation_prompt=True,\n",
150
+ " return_dict=True,\n",
151
+ " return_tensors=\"pt\",\n",
152
+ " truncation=False,\n",
153
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
154
+ " ).to(\"cuda\")\n",
155
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
156
+ " generation_kwargs = {\n",
157
+ " **inputs,\n",
158
+ " \"streamer\": streamer,\n",
159
+ " \"max_new_tokens\": max_new_tokens,\n",
160
+ " \"do_sample\": True,\n",
161
+ " \"temperature\": temperature,\n",
162
+ " \"top_p\": top_p,\n",
163
+ " \"top_k\": top_k,\n",
164
+ " \"repetition_penalty\": repetition_penalty,\n",
165
+ " }\n",
166
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
167
+ " thread.start()\n",
168
+ " buffer = \"\"\n",
169
+ " for new_text in streamer:\n",
170
+ " buffer += new_text\n",
171
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
172
+ " time.sleep(0.01)\n",
173
+ " yield buffer\n",
174
+ "\n",
175
+ "css = \"\"\"\n",
176
+ ".submit-btn {\n",
177
+ " background-color: #2980b9 !important;\n",
178
+ " color: white !important;\n",
179
+ "}\n",
180
+ ".submit-btn:hover {\n",
181
+ " background-color: #3498db !important;\n",
182
+ "}\n",
183
+ "\"\"\"\n",
184
+ "\n",
185
+ "# Create the Gradio Interface\n",
186
+ "with gr.Blocks(css=css, theme=\"bethecloud/storj_theme\") as demo:\n",
187
+ " gr.Markdown(\"# **Qwen/Qwen2-VL-2B-Instruct**\")\n",
188
+ " with gr.Row():\n",
189
+ " with gr.Column():\n",
190
+ " with gr.Tabs():\n",
191
+ " with gr.TabItem(\"Image Inference\"):\n",
192
+ " image_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
193
+ " image_upload = gr.Image(type=\"pil\", label=\"Image\")\n",
194
+ " image_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
195
+ "\n",
196
+ " with gr.TabItem(\"Video Inference\"):\n",
197
+ " video_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
198
+ " video_upload = gr.Video(label=\"Video\")\n",
199
+ " video_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
200
+ "\n",
201
+ " with gr.Accordion(\"Advanced options\", open=False):\n",
202
+ " max_new_tokens = gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)\n",
203
+ " temperature = gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.6)\n",
204
+ " top_p = gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9)\n",
205
+ " top_k = gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50)\n",
206
+ " repetition_penalty = gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.2)\n",
207
+ " with gr.Column():\n",
208
+ " output = gr.Textbox(label=\"Output\", interactive=False)\n",
209
+ "\n",
210
+ " image_submit.click(\n",
211
+ " fn=generate_image,\n",
212
+ " inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
213
+ " outputs=output\n",
214
+ " )\n",
215
+ " video_submit.click(\n",
216
+ " fn=generate_video,\n",
217
+ " inputs=[video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
218
+ " outputs=output\n",
219
+ " )\n",
220
+ "\n",
221
+ "if __name__ == \"__main__\":\n",
222
+ " demo.queue(max_size=30).launch(share=True, ssr_mode=False, show_error=True)"
223
+ ]
224
+ }
225
+ ],
226
+ "metadata": {
227
+ "accelerator": "GPU",
228
+ "colab": {
229
+ "gpuType": "T4",
230
+ "provenance": []
231
+ },
232
+ "kernelspec": {
233
+ "display_name": "Python 3",
234
+ "name": "python3"
235
+ },
236
+ "language_info": {
237
+ "name": "python"
238
+ }
239
+ },
240
+ "nbformat": 4,
241
+ "nbformat_minor": 0
242
+ }
Qwen2-VL/Qwen2_VL_7B_Instruct.ipynb ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "xL8y37Y6bORU"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "%%capture\n",
12
+ "!pip install gradio spaces transformers accelerate numpy requests\n",
13
+ "!pip install torch torchvision qwen-vl-utils av hf_xet\n",
14
+ "!pip install pillow huggingface_hub opencv-python"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {
21
+ "id": "Y-NTbL1tdL9X"
22
+ },
23
+ "outputs": [],
24
+ "source": [
25
+ "import os\n",
26
+ "import random\n",
27
+ "import uuid\n",
28
+ "import json\n",
29
+ "import time\n",
30
+ "import asyncio\n",
31
+ "from threading import Thread\n",
32
+ "\n",
33
+ "import gradio as gr\n",
34
+ "import spaces\n",
35
+ "import torch\n",
36
+ "import numpy as np\n",
37
+ "from PIL import Image\n",
38
+ "import cv2\n",
39
+ "\n",
40
+ "from transformers import (\n",
41
+ " Qwen2VLForConditionalGeneration,\n",
42
+ " AutoProcessor,\n",
43
+ " TextIteratorStreamer,\n",
44
+ ")\n",
45
+ "from transformers.image_utils import load_image\n",
46
+ "\n",
47
+ "# Constants for text generation\n",
48
+ "MAX_MAX_NEW_TOKENS = 2048\n",
49
+ "DEFAULT_MAX_NEW_TOKENS = 1024\n",
50
+ "# Increase or disable input truncation to avoid token mismatches\n",
51
+ "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"8192\"))\n",
52
+ "\n",
53
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
54
+ "\n",
55
+ "MODEL_ID = \"Qwen/Qwen2-VL-7B-Instruct\"\n",
56
+ "processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)\n",
57
+ "model_m = Qwen2VLForConditionalGeneration.from_pretrained(\n",
58
+ " MODEL_ID,\n",
59
+ " trust_remote_code=True,\n",
60
+ " torch_dtype=torch.float16\n",
61
+ ").to(\"cuda\").eval()\n",
62
+ "\n",
63
+ "def downsample_video(video_path):\n",
64
+ " \"\"\"\n",
65
+ " Downsamples the video to evenly spaced frames.\n",
66
+ " Each frame is returned as a PIL image along with its timestamp.\n",
67
+ " \"\"\"\n",
68
+ " vidcap = cv2.VideoCapture(video_path)\n",
69
+ " total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
70
+ " fps = vidcap.get(cv2.CAP_PROP_FPS)\n",
71
+ " frames = []\n",
72
+ " # Sample 10 evenly spaced frames.\n",
73
+ " frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)\n",
74
+ " for i in frame_indices:\n",
75
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)\n",
76
+ " success, image = vidcap.read()\n",
77
+ " if success:\n",
78
+ " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB\n",
79
+ " pil_image = Image.fromarray(image)\n",
80
+ " timestamp = round(i / fps, 2)\n",
81
+ " frames.append((pil_image, timestamp))\n",
82
+ " vidcap.release()\n",
83
+ " return frames\n",
84
+ "\n",
85
+ "@spaces.GPU\n",
86
+ "def generate_image(text: str, image: Image.Image,\n",
87
+ " max_new_tokens: int = 1024,\n",
88
+ " temperature: float = 0.6,\n",
89
+ " top_p: float = 0.9,\n",
90
+ " top_k: int = 50,\n",
91
+ " repetition_penalty: float = 1.2):\n",
92
+ "\n",
93
+ " if image is None:\n",
94
+ " yield \"Please upload an image.\"\n",
95
+ " return\n",
96
+ "\n",
97
+ " messages = [{\n",
98
+ " \"role\": \"user\",\n",
99
+ " \"content\": [\n",
100
+ " {\"type\": \"image\", \"image\": image},\n",
101
+ " {\"type\": \"text\", \"text\": text},\n",
102
+ " ]\n",
103
+ " }]\n",
104
+ " prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
105
+ " inputs = processor(\n",
106
+ " text=[prompt_full],\n",
107
+ " images=[image],\n",
108
+ " return_tensors=\"pt\",\n",
109
+ " padding=True,\n",
110
+ " truncation=False,\n",
111
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
112
+ " ).to(\"cuda\")\n",
113
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
114
+ " generation_kwargs = {**inputs, \"streamer\": streamer, \"max_new_tokens\": max_new_tokens}\n",
115
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
116
+ " thread.start()\n",
117
+ " buffer = \"\"\n",
118
+ " for new_text in streamer:\n",
119
+ " buffer += new_text\n",
120
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
121
+ " time.sleep(0.01)\n",
122
+ " yield buffer\n",
123
+ "\n",
124
+ "@spaces.GPU\n",
125
+ "def generate_video(text: str, video_path: str,\n",
126
+ " max_new_tokens: int = 1024,\n",
127
+ " temperature: float = 0.6,\n",
128
+ " top_p: float = 0.9,\n",
129
+ " top_k: int = 50,\n",
130
+ " repetition_penalty: float = 1.2):\n",
131
+ "\n",
132
+ " if video_path is None:\n",
133
+ " yield \"Please upload a video.\"\n",
134
+ " return\n",
135
+ "\n",
136
+ " frames = downsample_video(video_path)\n",
137
+ " messages = [\n",
138
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
139
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": text}]}\n",
140
+ " ]\n",
141
+ " # Append each frame with its timestamp.\n",
142
+ " for frame in frames:\n",
143
+ " image, timestamp = frame\n",
144
+ " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame {timestamp}:\"})\n",
145
+ " messages[1][\"content\"].append({\"type\": \"image\", \"image\": image})\n",
146
+ " inputs = processor.apply_chat_template(\n",
147
+ " messages,\n",
148
+ " tokenize=True,\n",
149
+ " add_generation_prompt=True,\n",
150
+ " return_dict=True,\n",
151
+ " return_tensors=\"pt\",\n",
152
+ " truncation=False,\n",
153
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
154
+ " ).to(\"cuda\")\n",
155
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
156
+ " generation_kwargs = {\n",
157
+ " **inputs,\n",
158
+ " \"streamer\": streamer,\n",
159
+ " \"max_new_tokens\": max_new_tokens,\n",
160
+ " \"do_sample\": True,\n",
161
+ " \"temperature\": temperature,\n",
162
+ " \"top_p\": top_p,\n",
163
+ " \"top_k\": top_k,\n",
164
+ " \"repetition_penalty\": repetition_penalty,\n",
165
+ " }\n",
166
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
167
+ " thread.start()\n",
168
+ " buffer = \"\"\n",
169
+ " for new_text in streamer:\n",
170
+ " buffer += new_text\n",
171
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
172
+ " time.sleep(0.01)\n",
173
+ " yield buffer\n",
174
+ "\n",
175
+ "css = \"\"\"\n",
176
+ ".submit-btn {\n",
177
+ " background-color: #2980b9 !important;\n",
178
+ " color: white !important;\n",
179
+ "}\n",
180
+ ".submit-btn:hover {\n",
181
+ " background-color: #3498db !important;\n",
182
+ "}\n",
183
+ "\"\"\"\n",
184
+ "\n",
185
+ "# Create the Gradio Interface\n",
186
+ "with gr.Blocks(css=css, theme=\"bethecloud/storj_theme\") as demo:\n",
187
+ " gr.Markdown(\"# **Qwen/Qwen2-VL-7B-Instruct**\")\n",
188
+ " with gr.Row():\n",
189
+ " with gr.Column():\n",
190
+ " with gr.Tabs():\n",
191
+ " with gr.TabItem(\"Image Inference\"):\n",
192
+ " image_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
193
+ " image_upload = gr.Image(type=\"pil\", label=\"Image\")\n",
194
+ " image_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
195
+ "\n",
196
+ " with gr.TabItem(\"Video Inference\"):\n",
197
+ " video_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
198
+ " video_upload = gr.Video(label=\"Video\")\n",
199
+ " video_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
200
+ "\n",
201
+ " with gr.Accordion(\"Advanced options\", open=False):\n",
202
+ " max_new_tokens = gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)\n",
203
+ " temperature = gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.6)\n",
204
+ " top_p = gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9)\n",
205
+ " top_k = gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50)\n",
206
+ " repetition_penalty = gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.2)\n",
207
+ " with gr.Column():\n",
208
+ " output = gr.Textbox(label=\"Output\", interactive=False)\n",
209
+ "\n",
210
+ " image_submit.click(\n",
211
+ " fn=generate_image,\n",
212
+ " inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
213
+ " outputs=output\n",
214
+ " )\n",
215
+ " video_submit.click(\n",
216
+ " fn=generate_video,\n",
217
+ " inputs=[video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
218
+ " outputs=output\n",
219
+ " )\n",
220
+ "\n",
221
+ "if __name__ == \"__main__\":\n",
222
+ " demo.queue(max_size=30).launch(share=True, ssr_mode=False, show_error=True)"
223
+ ]
224
+ }
225
+ ],
226
+ "metadata": {
227
+ "accelerator": "GPU",
228
+ "colab": {
229
+ "gpuType": "T4",
230
+ "provenance": []
231
+ },
232
+ "kernelspec": {
233
+ "display_name": "Python 3",
234
+ "name": "python3"
235
+ },
236
+ "language_info": {
237
+ "name": "python"
238
+ }
239
+ },
240
+ "nbformat": 4,
241
+ "nbformat_minor": 0
242
+ }
Qwen2.5-VL/Qwen2_5VL_3B.ipynb ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "xL8y37Y6bORU"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "%%capture\n",
12
+ "!pip install gradio spaces transformers accelerate numpy requests\n",
13
+ "!pip install torch torchvision qwen-vl-utils av hf_xet\n",
14
+ "!pip install pillow huggingface_hub opencv-python"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {
21
+ "id": "Y-NTbL1tdL9X"
22
+ },
23
+ "outputs": [],
24
+ "source": [
25
+ "import os\n",
26
+ "import random\n",
27
+ "import uuid\n",
28
+ "import json\n",
29
+ "import time\n",
30
+ "import asyncio\n",
31
+ "from threading import Thread\n",
32
+ "\n",
33
+ "import gradio as gr\n",
34
+ "import spaces\n",
35
+ "import torch\n",
36
+ "import numpy as np\n",
37
+ "from PIL import Image\n",
38
+ "import cv2\n",
39
+ "\n",
40
+ "from transformers import (\n",
41
+ " Qwen2_5_VLForConditionalGeneration,\n",
42
+ " AutoProcessor,\n",
43
+ " TextIteratorStreamer,\n",
44
+ ")\n",
45
+ "from transformers.image_utils import load_image\n",
46
+ "\n",
47
+ "# Constants for text generation\n",
48
+ "MAX_MAX_NEW_TOKENS = 2048\n",
49
+ "DEFAULT_MAX_NEW_TOKENS = 1024\n",
50
+ "# Increase or disable input truncation to avoid token mismatches\n",
51
+ "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"8192\"))\n",
52
+ "\n",
53
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
54
+ "\n",
55
+ "MODEL_ID = \"Qwen/Qwen2.5-VL-3B-Instruct\"\n",
56
+ "processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)\n",
57
+ "model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(\n",
58
+ " MODEL_ID,\n",
59
+ " trust_remote_code=True,\n",
60
+ " torch_dtype=torch.float16\n",
61
+ ").to(\"cuda\").eval()\n",
62
+ "\n",
63
+ "def downsample_video(video_path):\n",
64
+ " \"\"\"\n",
65
+ " Downsamples the video to evenly spaced frames.\n",
66
+ " Each frame is returned as a PIL image along with its timestamp.\n",
67
+ " \"\"\"\n",
68
+ " vidcap = cv2.VideoCapture(video_path)\n",
69
+ " total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
70
+ " fps = vidcap.get(cv2.CAP_PROP_FPS)\n",
71
+ " frames = []\n",
72
+ " # Sample 10 evenly spaced frames.\n",
73
+ " frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)\n",
74
+ " for i in frame_indices:\n",
75
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)\n",
76
+ " success, image = vidcap.read()\n",
77
+ " if success:\n",
78
+ " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB\n",
79
+ " pil_image = Image.fromarray(image)\n",
80
+ " timestamp = round(i / fps, 2)\n",
81
+ " frames.append((pil_image, timestamp))\n",
82
+ " vidcap.release()\n",
83
+ " return frames\n",
84
+ "\n",
85
+ "@spaces.GPU\n",
86
+ "def generate_image(text: str, image: Image.Image,\n",
87
+ " max_new_tokens: int = 1024,\n",
88
+ " temperature: float = 0.6,\n",
89
+ " top_p: float = 0.9,\n",
90
+ " top_k: int = 50,\n",
91
+ " repetition_penalty: float = 1.2):\n",
92
+ "\n",
93
+ " if image is None:\n",
94
+ " yield \"Please upload an image.\"\n",
95
+ " return\n",
96
+ "\n",
97
+ " messages = [{\n",
98
+ " \"role\": \"user\",\n",
99
+ " \"content\": [\n",
100
+ " {\"type\": \"image\", \"image\": image},\n",
101
+ " {\"type\": \"text\", \"text\": text},\n",
102
+ " ]\n",
103
+ " }]\n",
104
+ " prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
105
+ " inputs = processor(\n",
106
+ " text=[prompt_full],\n",
107
+ " images=[image],\n",
108
+ " return_tensors=\"pt\",\n",
109
+ " padding=True,\n",
110
+ " truncation=False,\n",
111
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
112
+ " ).to(\"cuda\")\n",
113
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
114
+ " generation_kwargs = {**inputs, \"streamer\": streamer, \"max_new_tokens\": max_new_tokens}\n",
115
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
116
+ " thread.start()\n",
117
+ " buffer = \"\"\n",
118
+ " for new_text in streamer:\n",
119
+ " buffer += new_text\n",
120
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
121
+ " time.sleep(0.01)\n",
122
+ " yield buffer\n",
123
+ "\n",
124
+ "@spaces.GPU\n",
125
+ "def generate_video(text: str, video_path: str,\n",
126
+ " max_new_tokens: int = 1024,\n",
127
+ " temperature: float = 0.6,\n",
128
+ " top_p: float = 0.9,\n",
129
+ " top_k: int = 50,\n",
130
+ " repetition_penalty: float = 1.2):\n",
131
+ "\n",
132
+ " if video_path is None:\n",
133
+ " yield \"Please upload a video.\"\n",
134
+ " return\n",
135
+ "\n",
136
+ " frames = downsample_video(video_path)\n",
137
+ " messages = [\n",
138
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
139
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": text}]}\n",
140
+ " ]\n",
141
+ " # Append each frame with its timestamp.\n",
142
+ " for frame in frames:\n",
143
+ " image, timestamp = frame\n",
144
+ " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame {timestamp}:\"})\n",
145
+ " messages[1][\"content\"].append({\"type\": \"image\", \"image\": image})\n",
146
+ " inputs = processor.apply_chat_template(\n",
147
+ " messages,\n",
148
+ " tokenize=True,\n",
149
+ " add_generation_prompt=True,\n",
150
+ " return_dict=True,\n",
151
+ " return_tensors=\"pt\",\n",
152
+ " truncation=False,\n",
153
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
154
+ " ).to(\"cuda\")\n",
155
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
156
+ " generation_kwargs = {\n",
157
+ " **inputs,\n",
158
+ " \"streamer\": streamer,\n",
159
+ " \"max_new_tokens\": max_new_tokens,\n",
160
+ " \"do_sample\": True,\n",
161
+ " \"temperature\": temperature,\n",
162
+ " \"top_p\": top_p,\n",
163
+ " \"top_k\": top_k,\n",
164
+ " \"repetition_penalty\": repetition_penalty,\n",
165
+ " }\n",
166
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
167
+ " thread.start()\n",
168
+ " buffer = \"\"\n",
169
+ " for new_text in streamer:\n",
170
+ " buffer += new_text\n",
171
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
172
+ " time.sleep(0.01)\n",
173
+ " yield buffer\n",
174
+ "\n",
175
+ "css = \"\"\"\n",
176
+ ".submit-btn {\n",
177
+ " background-color: #2980b9 !important;\n",
178
+ " color: white !important;\n",
179
+ "}\n",
180
+ ".submit-btn:hover {\n",
181
+ " background-color: #3498db !important;\n",
182
+ "}\n",
183
+ "\"\"\"\n",
184
+ "\n",
185
+ "# Create the Gradio Interface\n",
186
+ "with gr.Blocks(css=css, theme=\"bethecloud/storj_theme\") as demo:\n",
187
+ " gr.Markdown(\"# **Qwen/Qwen2.5-VL-3B-Instruct**\")\n",
188
+ " with gr.Row():\n",
189
+ " with gr.Column():\n",
190
+ " with gr.Tabs():\n",
191
+ " with gr.TabItem(\"Image Inference\"):\n",
192
+ " image_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
193
+ " image_upload = gr.Image(type=\"pil\", label=\"Image\")\n",
194
+ " image_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
195
+ "\n",
196
+ " with gr.TabItem(\"Video Inference\"):\n",
197
+ " video_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
198
+ " video_upload = gr.Video(label=\"Video\")\n",
199
+ " video_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
200
+ "\n",
201
+ " with gr.Accordion(\"Advanced options\", open=False):\n",
202
+ " max_new_tokens = gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)\n",
203
+ " temperature = gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.6)\n",
204
+ " top_p = gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9)\n",
205
+ " top_k = gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50)\n",
206
+ " repetition_penalty = gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.2)\n",
207
+ " with gr.Column():\n",
208
+ " output = gr.Textbox(label=\"Output\", interactive=False)\n",
209
+ "\n",
210
+ " image_submit.click(\n",
211
+ " fn=generate_image,\n",
212
+ " inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
213
+ " outputs=output\n",
214
+ " )\n",
215
+ " video_submit.click(\n",
216
+ " fn=generate_video,\n",
217
+ " inputs=[video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
218
+ " outputs=output\n",
219
+ " )\n",
220
+ "\n",
221
+ "if __name__ == \"__main__\":\n",
222
+ " demo.queue(max_size=30).launch(share=True, ssr_mode=False, show_error=True)"
223
+ ]
224
+ }
225
+ ],
226
+ "metadata": {
227
+ "accelerator": "GPU",
228
+ "colab": {
229
+ "gpuType": "T4",
230
+ "provenance": []
231
+ },
232
+ "kernelspec": {
233
+ "display_name": "Python 3",
234
+ "name": "python3"
235
+ },
236
+ "language_info": {
237
+ "name": "python"
238
+ }
239
+ },
240
+ "nbformat": 4,
241
+ "nbformat_minor": 0
242
+ }
Qwen2.5-VL/Qwen2_5VL_7B.ipynb ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "xL8y37Y6bORU"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "%%capture\n",
12
+ "!pip install gradio spaces transformers accelerate numpy requests\n",
13
+ "!pip install torch torchvision qwen-vl-utils av hf_xet\n",
14
+ "!pip install pillow huggingface_hub opencv-python"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {
21
+ "id": "Y-NTbL1tdL9X"
22
+ },
23
+ "outputs": [],
24
+ "source": [
25
+ "import os\n",
26
+ "import random\n",
27
+ "import uuid\n",
28
+ "import json\n",
29
+ "import time\n",
30
+ "import asyncio\n",
31
+ "from threading import Thread\n",
32
+ "\n",
33
+ "import gradio as gr\n",
34
+ "import spaces\n",
35
+ "import torch\n",
36
+ "import numpy as np\n",
37
+ "from PIL import Image\n",
38
+ "import cv2\n",
39
+ "\n",
40
+ "from transformers import (\n",
41
+ " Qwen2_5_VLForConditionalGeneration,\n",
42
+ " AutoProcessor,\n",
43
+ " TextIteratorStreamer,\n",
44
+ ")\n",
45
+ "from transformers.image_utils import load_image\n",
46
+ "\n",
47
+ "# Constants for text generation\n",
48
+ "MAX_MAX_NEW_TOKENS = 2048\n",
49
+ "DEFAULT_MAX_NEW_TOKENS = 1024\n",
50
+ "# Increase or disable input truncation to avoid token mismatches\n",
51
+ "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"8192\"))\n",
52
+ "\n",
53
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
54
+ "\n",
55
+ "MODEL_ID = \"Qwen/Qwen2.5-VL-7B-Instruct\"\n",
56
+ "processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)\n",
57
+ "model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(\n",
58
+ " MODEL_ID,\n",
59
+ " trust_remote_code=True,\n",
60
+ " torch_dtype=torch.float16\n",
61
+ ").to(\"cuda\").eval()\n",
62
+ "\n",
63
+ "def downsample_video(video_path):\n",
64
+ " \"\"\"\n",
65
+ " Downsamples the video to evenly spaced frames.\n",
66
+ " Each frame is returned as a PIL image along with its timestamp.\n",
67
+ " \"\"\"\n",
68
+ " vidcap = cv2.VideoCapture(video_path)\n",
69
+ " total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
70
+ " fps = vidcap.get(cv2.CAP_PROP_FPS)\n",
71
+ " frames = []\n",
72
+ " # Sample 10 evenly spaced frames.\n",
73
+ " frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)\n",
74
+ " for i in frame_indices:\n",
75
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)\n",
76
+ " success, image = vidcap.read()\n",
77
+ " if success:\n",
78
+ " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB\n",
79
+ " pil_image = Image.fromarray(image)\n",
80
+ " timestamp = round(i / fps, 2)\n",
81
+ " frames.append((pil_image, timestamp))\n",
82
+ " vidcap.release()\n",
83
+ " return frames\n",
84
+ "\n",
85
+ "@spaces.GPU\n",
86
+ "def generate_image(text: str, image: Image.Image,\n",
87
+ " max_new_tokens: int = 1024,\n",
88
+ " temperature: float = 0.6,\n",
89
+ " top_p: float = 0.9,\n",
90
+ " top_k: int = 50,\n",
91
+ " repetition_penalty: float = 1.2):\n",
92
+ "\n",
93
+ " if image is None:\n",
94
+ " yield \"Please upload an image.\"\n",
95
+ " return\n",
96
+ "\n",
97
+ " messages = [{\n",
98
+ " \"role\": \"user\",\n",
99
+ " \"content\": [\n",
100
+ " {\"type\": \"image\", \"image\": image},\n",
101
+ " {\"type\": \"text\", \"text\": text},\n",
102
+ " ]\n",
103
+ " }]\n",
104
+ " prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
105
+ " inputs = processor(\n",
106
+ " text=[prompt_full],\n",
107
+ " images=[image],\n",
108
+ " return_tensors=\"pt\",\n",
109
+ " padding=True,\n",
110
+ " truncation=False,\n",
111
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
112
+ " ).to(\"cuda\")\n",
113
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
114
+ " generation_kwargs = {**inputs, \"streamer\": streamer, \"max_new_tokens\": max_new_tokens}\n",
115
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
116
+ " thread.start()\n",
117
+ " buffer = \"\"\n",
118
+ " for new_text in streamer:\n",
119
+ " buffer += new_text\n",
120
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
121
+ " time.sleep(0.01)\n",
122
+ " yield buffer\n",
123
+ "\n",
124
+ "@spaces.GPU\n",
125
+ "def generate_video(text: str, video_path: str,\n",
126
+ " max_new_tokens: int = 1024,\n",
127
+ " temperature: float = 0.6,\n",
128
+ " top_p: float = 0.9,\n",
129
+ " top_k: int = 50,\n",
130
+ " repetition_penalty: float = 1.2):\n",
131
+ "\n",
132
+ " if video_path is None:\n",
133
+ " yield \"Please upload a video.\"\n",
134
+ " return\n",
135
+ "\n",
136
+ " frames = downsample_video(video_path)\n",
137
+ " messages = [\n",
138
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
139
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": text}]}\n",
140
+ " ]\n",
141
+ " # Append each frame with its timestamp.\n",
142
+ " for frame in frames:\n",
143
+ " image, timestamp = frame\n",
144
+ " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame {timestamp}:\"})\n",
145
+ " messages[1][\"content\"].append({\"type\": \"image\", \"image\": image})\n",
146
+ " inputs = processor.apply_chat_template(\n",
147
+ " messages,\n",
148
+ " tokenize=True,\n",
149
+ " add_generation_prompt=True,\n",
150
+ " return_dict=True,\n",
151
+ " return_tensors=\"pt\",\n",
152
+ " truncation=False,\n",
153
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
154
+ " ).to(\"cuda\")\n",
155
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
156
+ " generation_kwargs = {\n",
157
+ " **inputs,\n",
158
+ " \"streamer\": streamer,\n",
159
+ " \"max_new_tokens\": max_new_tokens,\n",
160
+ " \"do_sample\": True,\n",
161
+ " \"temperature\": temperature,\n",
162
+ " \"top_p\": top_p,\n",
163
+ " \"top_k\": top_k,\n",
164
+ " \"repetition_penalty\": repetition_penalty,\n",
165
+ " }\n",
166
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
167
+ " thread.start()\n",
168
+ " buffer = \"\"\n",
169
+ " for new_text in streamer:\n",
170
+ " buffer += new_text\n",
171
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
172
+ " time.sleep(0.01)\n",
173
+ " yield buffer\n",
174
+ "\n",
175
+ "css = \"\"\"\n",
176
+ ".submit-btn {\n",
177
+ " background-color: #2980b9 !important;\n",
178
+ " color: white !important;\n",
179
+ "}\n",
180
+ ".submit-btn:hover {\n",
181
+ " background-color: #3498db !important;\n",
182
+ "}\n",
183
+ "\"\"\"\n",
184
+ "\n",
185
+ "# Create the Gradio Interface\n",
186
+ "with gr.Blocks(css=css, theme=\"bethecloud/storj_theme\") as demo:\n",
187
+ " gr.Markdown(\"# **Qwen/Qwen2.5-VL-7B-Instruct**\")\n",
188
+ " with gr.Row():\n",
189
+ " with gr.Column():\n",
190
+ " with gr.Tabs():\n",
191
+ " with gr.TabItem(\"Image Inference\"):\n",
192
+ " image_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
193
+ " image_upload = gr.Image(type=\"pil\", label=\"Image\")\n",
194
+ " image_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
195
+ "\n",
196
+ " with gr.TabItem(\"Video Inference\"):\n",
197
+ " video_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
198
+ " video_upload = gr.Video(label=\"Video\")\n",
199
+ " video_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
200
+ "\n",
201
+ " with gr.Accordion(\"Advanced options\", open=False):\n",
202
+ " max_new_tokens = gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)\n",
203
+ " temperature = gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.6)\n",
204
+ " top_p = gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9)\n",
205
+ " top_k = gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50)\n",
206
+ " repetition_penalty = gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.2)\n",
207
+ " with gr.Column():\n",
208
+ " output = gr.Textbox(label=\"Output\", interactive=False)\n",
209
+ "\n",
210
+ " image_submit.click(\n",
211
+ " fn=generate_image,\n",
212
+ " inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
213
+ " outputs=output\n",
214
+ " )\n",
215
+ " video_submit.click(\n",
216
+ " fn=generate_video,\n",
217
+ " inputs=[video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
218
+ " outputs=output\n",
219
+ " )\n",
220
+ "\n",
221
+ "if __name__ == \"__main__\":\n",
222
+ " demo.queue(max_size=30).launch(share=True, ssr_mode=False, show_error=True)"
223
+ ]
224
+ }
225
+ ],
226
+ "metadata": {
227
+ "accelerator": "GPU",
228
+ "colab": {
229
+ "gpuType": "T4",
230
+ "provenance": []
231
+ },
232
+ "kernelspec": {
233
+ "display_name": "Python 3",
234
+ "name": "python3"
235
+ },
236
+ "language_info": {
237
+ "name": "python"
238
+ }
239
+ },
240
+ "nbformat": 4,
241
+ "nbformat_minor": 0
242
+ }
RolmOCR-Qwen2.5-VL/reducto_RolmOCR_Qwen2_5VL_7B.ipynb ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "xL8y37Y6bORU"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "%%capture\n",
12
+ "!pip install gradio spaces transformers accelerate numpy requests\n",
13
+ "!pip install torch torchvision qwen-vl-utils av hf_xet\n",
14
+ "!pip install pillow huggingface_hub opencv-python"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {
21
+ "id": "Y-NTbL1tdL9X"
22
+ },
23
+ "outputs": [],
24
+ "source": [
25
+ "import os\n",
26
+ "import random\n",
27
+ "import uuid\n",
28
+ "import json\n",
29
+ "import time\n",
30
+ "import asyncio\n",
31
+ "from threading import Thread\n",
32
+ "\n",
33
+ "import gradio as gr\n",
34
+ "import spaces\n",
35
+ "import torch\n",
36
+ "import numpy as np\n",
37
+ "from PIL import Image\n",
38
+ "import cv2\n",
39
+ "\n",
40
+ "from transformers import (\n",
41
+ " Qwen2_5_VLForConditionalGeneration,\n",
42
+ " AutoProcessor,\n",
43
+ " TextIteratorStreamer,\n",
44
+ ")\n",
45
+ "from transformers.image_utils import load_image\n",
46
+ "\n",
47
+ "# Constants for text generation\n",
48
+ "MAX_MAX_NEW_TOKENS = 2048\n",
49
+ "DEFAULT_MAX_NEW_TOKENS = 1024\n",
50
+ "# Increase or disable input truncation to avoid token mismatches\n",
51
+ "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"8192\"))\n",
52
+ "\n",
53
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
54
+ "\n",
55
+ "MODEL_ID = \"reducto/RolmOCR\"\n",
56
+ "processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)\n",
57
+ "model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(\n",
58
+ " MODEL_ID,\n",
59
+ " trust_remote_code=True,\n",
60
+ " torch_dtype=torch.float16\n",
61
+ ").to(\"cuda\").eval()\n",
62
+ "\n",
63
+ "def downsample_video(video_path):\n",
64
+ " \"\"\"\n",
65
+ " Downsamples the video to evenly spaced frames.\n",
66
+ " Each frame is returned as a PIL image along with its timestamp.\n",
67
+ " \"\"\"\n",
68
+ " vidcap = cv2.VideoCapture(video_path)\n",
69
+ " total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
70
+ " fps = vidcap.get(cv2.CAP_PROP_FPS)\n",
71
+ " frames = []\n",
72
+ " # Sample 10 evenly spaced frames.\n",
73
+ " frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)\n",
74
+ " for i in frame_indices:\n",
75
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)\n",
76
+ " success, image = vidcap.read()\n",
77
+ " if success:\n",
78
+ " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB\n",
79
+ " pil_image = Image.fromarray(image)\n",
80
+ " timestamp = round(i / fps, 2)\n",
81
+ " frames.append((pil_image, timestamp))\n",
82
+ " vidcap.release()\n",
83
+ " return frames\n",
84
+ "\n",
85
+ "@spaces.GPU\n",
86
+ "def generate_image(text: str, image: Image.Image,\n",
87
+ " max_new_tokens: int = 1024,\n",
88
+ " temperature: float = 0.6,\n",
89
+ " top_p: float = 0.9,\n",
90
+ " top_k: int = 50,\n",
91
+ " repetition_penalty: float = 1.2):\n",
92
+ "\n",
93
+ " if image is None:\n",
94
+ " yield \"Please upload an image.\"\n",
95
+ " return\n",
96
+ "\n",
97
+ " messages = [{\n",
98
+ " \"role\": \"user\",\n",
99
+ " \"content\": [\n",
100
+ " {\"type\": \"image\", \"image\": image},\n",
101
+ " {\"type\": \"text\", \"text\": text},\n",
102
+ " ]\n",
103
+ " }]\n",
104
+ " prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
105
+ " inputs = processor(\n",
106
+ " text=[prompt_full],\n",
107
+ " images=[image],\n",
108
+ " return_tensors=\"pt\",\n",
109
+ " padding=True,\n",
110
+ " truncation=False # Disable truncation to keep image tokens intact\n",
111
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
112
+ " ).to(\"cuda\")\n",
113
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
114
+ " generation_kwargs = {**inputs, \"streamer\": streamer, \"max_new_tokens\": max_new_tokens}\n",
115
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
116
+ " thread.start()\n",
117
+ " buffer = \"\"\n",
118
+ " for new_text in streamer:\n",
119
+ " buffer += new_text\n",
120
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
121
+ " time.sleep(0.01)\n",
122
+ " yield buffer\n",
123
+ "\n",
124
+ "@spaces.GPU\n",
125
+ "def generate_video(text: str, video_path: str,\n",
126
+ " max_new_tokens: int = 1024,\n",
127
+ " temperature: float = 0.6,\n",
128
+ " top_p: float = 0.9,\n",
129
+ " top_k: int = 50,\n",
130
+ " repetition_penalty: float = 1.2):\n",
131
+ "\n",
132
+ " if video_path is None:\n",
133
+ " yield \"Please upload a video.\"\n",
134
+ " return\n",
135
+ "\n",
136
+ " frames = downsample_video(video_path)\n",
137
+ " messages = [\n",
138
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
139
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": text}]}\n",
140
+ " ]\n",
141
+ " # Append each frame with its timestamp.\n",
142
+ " for frame in frames:\n",
143
+ " image, timestamp = frame\n",
144
+ " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame {timestamp}:\"})\n",
145
+ " messages[1][\"content\"].append({\"type\": \"image\", \"image\": image})\n",
146
+ " inputs = processor.apply_chat_template(\n",
147
+ " messages,\n",
148
+ " tokenize=True,\n",
149
+ " add_generation_prompt=True,\n",
150
+ " return_dict=True,\n",
151
+ " return_tensors=\"pt\",\n",
152
+ " truncation=False,,\n",
153
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
154
+ " ).to(\"cuda\")\n",
155
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
156
+ " generation_kwargs = {\n",
157
+ " **inputs,\n",
158
+ " \"streamer\": streamer,\n",
159
+ " \"max_new_tokens\": max_new_tokens,\n",
160
+ " \"do_sample\": True,\n",
161
+ " \"temperature\": temperature,\n",
162
+ " \"top_p\": top_p,\n",
163
+ " \"top_k\": top_k,\n",
164
+ " \"repetition_penalty\": repetition_penalty,\n",
165
+ " }\n",
166
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
167
+ " thread.start()\n",
168
+ " buffer = \"\"\n",
169
+ " for new_text in streamer:\n",
170
+ " buffer += new_text\n",
171
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
172
+ " time.sleep(0.01)\n",
173
+ " yield buffer\n",
174
+ "\n",
175
+ "css = \"\"\"\n",
176
+ ".submit-btn {\n",
177
+ " background-color: #2980b9 !important;\n",
178
+ " color: white !important;\n",
179
+ "}\n",
180
+ ".submit-btn:hover {\n",
181
+ " background-color: #3498db !important;\n",
182
+ "}\n",
183
+ "\"\"\"\n",
184
+ "\n",
185
+ "# Create the Gradio Interface\n",
186
+ "with gr.Blocks(css=css, theme=\"bethecloud/storj_theme\") as demo:\n",
187
+ " gr.Markdown(\"# **reducto/RolmOCR**\")\n",
188
+ " with gr.Row():\n",
189
+ " with gr.Column():\n",
190
+ " with gr.Tabs():\n",
191
+ " with gr.TabItem(\"Image Inference\"):\n",
192
+ " image_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
193
+ " image_upload = gr.Image(type=\"pil\", label=\"Image\")\n",
194
+ " image_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
195
+ "\n",
196
+ " with gr.TabItem(\"Video Inference\"):\n",
197
+ " video_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
198
+ " video_upload = gr.Video(label=\"Video\")\n",
199
+ " video_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
200
+ "\n",
201
+ " with gr.Accordion(\"Advanced options\", open=False):\n",
202
+ " max_new_tokens = gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)\n",
203
+ " temperature = gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.6)\n",
204
+ " top_p = gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9)\n",
205
+ " top_k = gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50)\n",
206
+ " repetition_penalty = gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.2)\n",
207
+ " with gr.Column():\n",
208
+ " output = gr.Textbox(label=\"Output\", interactive=False)\n",
209
+ "\n",
210
+ " image_submit.click(\n",
211
+ " fn=generate_image,\n",
212
+ " inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
213
+ " outputs=output\n",
214
+ " )\n",
215
+ " video_submit.click(\n",
216
+ " fn=generate_video,\n",
217
+ " inputs=[video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
218
+ " outputs=output\n",
219
+ " )\n",
220
+ "\n",
221
+ "if __name__ == \"__main__\":\n",
222
+ " demo.queue(max_size=30).launch(share=True, ssr_mode=False, show_error=True)"
223
+ ]
224
+ }
225
+ ],
226
+ "metadata": {
227
+ "accelerator": "GPU",
228
+ "colab": {
229
+ "gpuType": "T4",
230
+ "provenance": []
231
+ },
232
+ "kernelspec": {
233
+ "display_name": "Python 3",
234
+ "name": "python3"
235
+ },
236
+ "language_info": {
237
+ "name": "python"
238
+ }
239
+ },
240
+ "nbformat": 4,
241
+ "nbformat_minor": 0
242
+ }
olmOCR-Qwen2-VL/olmOCR_7B_0225.ipynb ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": null,
22
+ "metadata": {
23
+ "id": "xL8y37Y6bORU"
24
+ },
25
+ "outputs": [],
26
+ "source": [
27
+ "%%capture\n",
28
+ "!pip install gradio spaces transformers accelerate numpy requests\n",
29
+ "!pip install torch torchvision qwen-vl-utils av hf_xet\n",
30
+ "!pip install pillow huggingface_hub opencv-python"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "source": [
36
+ "import os\n",
37
+ "import random\n",
38
+ "import uuid\n",
39
+ "import json\n",
40
+ "import time\n",
41
+ "import asyncio\n",
42
+ "from threading import Thread\n",
43
+ "\n",
44
+ "import gradio as gr\n",
45
+ "import spaces\n",
46
+ "import torch\n",
47
+ "import numpy as np\n",
48
+ "from PIL import Image\n",
49
+ "import cv2\n",
50
+ "\n",
51
+ "from transformers import (\n",
52
+ " Qwen2_5_VLForConditionalGeneration,\n",
53
+ " AutoProcessor,\n",
54
+ " TextIteratorStreamer,\n",
55
+ ")\n",
56
+ "from transformers.image_utils import load_image\n",
57
+ "\n",
58
+ "# Constants for text generation\n",
59
+ "MAX_MAX_NEW_TOKENS = 2048\n",
60
+ "DEFAULT_MAX_NEW_TOKENS = 1024\n",
61
+ "# Increase or disable input truncation to avoid token mismatches\n",
62
+ "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"8192\"))\n",
63
+ "\n",
64
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
65
+ "\n",
66
+ "MODEL_ID = \"allenai/olmOCR-7B-0225-preview\"\n",
67
+ "processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)\n",
68
+ "model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(\n",
69
+ " MODEL_ID,\n",
70
+ " trust_remote_code=True,\n",
71
+ " torch_dtype=torch.float16\n",
72
+ ").to(\"cuda\").eval()\n",
73
+ "\n",
74
+ "def downsample_video(video_path):\n",
75
+ " \"\"\"\n",
76
+ " Downsamples the video to evenly spaced frames.\n",
77
+ " Each frame is returned as a PIL image along with its timestamp.\n",
78
+ " \"\"\"\n",
79
+ " vidcap = cv2.VideoCapture(video_path)\n",
80
+ " total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
81
+ " fps = vidcap.get(cv2.CAP_PROP_FPS)\n",
82
+ " frames = []\n",
83
+ " # Sample 10 evenly spaced frames.\n",
84
+ " frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)\n",
85
+ " for i in frame_indices:\n",
86
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)\n",
87
+ " success, image = vidcap.read()\n",
88
+ " if success:\n",
89
+ " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB\n",
90
+ " pil_image = Image.fromarray(image)\n",
91
+ " timestamp = round(i / fps, 2)\n",
92
+ " frames.append((pil_image, timestamp))\n",
93
+ " vidcap.release()\n",
94
+ " return frames\n",
95
+ "\n",
96
+ "@spaces.GPU\n",
97
+ "def generate_image(text: str, image: Image.Image,\n",
98
+ " max_new_tokens: int = 1024,\n",
99
+ " temperature: float = 0.6,\n",
100
+ " top_p: float = 0.9,\n",
101
+ " top_k: int = 50,\n",
102
+ " repetition_penalty: float = 1.2):\n",
103
+ "\n",
104
+ " if image is None:\n",
105
+ " yield \"Please upload an image.\"\n",
106
+ " return\n",
107
+ "\n",
108
+ " messages = [{\n",
109
+ " \"role\": \"user\",\n",
110
+ " \"content\": [\n",
111
+ " {\"type\": \"image\", \"image\": image},\n",
112
+ " {\"type\": \"text\", \"text\": text},\n",
113
+ " ]\n",
114
+ " }]\n",
115
+ " prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
116
+ " inputs = processor(\n",
117
+ " text=[prompt_full],\n",
118
+ " images=[image],\n",
119
+ " return_tensors=\"pt\",\n",
120
+ " padding=True,\n",
121
+ " truncation=False,\n",
122
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
123
+ " ).to(\"cuda\")\n",
124
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
125
+ " generation_kwargs = {**inputs, \"streamer\": streamer, \"max_new_tokens\": max_new_tokens}\n",
126
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
127
+ " thread.start()\n",
128
+ " buffer = \"\"\n",
129
+ " for new_text in streamer:\n",
130
+ " buffer += new_text\n",
131
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
132
+ " time.sleep(0.01)\n",
133
+ " yield buffer\n",
134
+ "\n",
135
+ "@spaces.GPU\n",
136
+ "def generate_video(text: str, video_path: str,\n",
137
+ " max_new_tokens: int = 1024,\n",
138
+ " temperature: float = 0.6,\n",
139
+ " top_p: float = 0.9,\n",
140
+ " top_k: int = 50,\n",
141
+ " repetition_penalty: float = 1.2):\n",
142
+ "\n",
143
+ " if video_path is None:\n",
144
+ " yield \"Please upload a video.\"\n",
145
+ " return\n",
146
+ "\n",
147
+ " frames = downsample_video(video_path)\n",
148
+ " messages = [\n",
149
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
150
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": text}]}\n",
151
+ " ]\n",
152
+ " # Append each frame with its timestamp.\n",
153
+ " for frame in frames:\n",
154
+ " image, timestamp = frame\n",
155
+ " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame {timestamp}:\"})\n",
156
+ " messages[1][\"content\"].append({\"type\": \"image\", \"image\": image})\n",
157
+ " inputs = processor.apply_chat_template(\n",
158
+ " messages,\n",
159
+ " tokenize=True,\n",
160
+ " add_generation_prompt=True,\n",
161
+ " return_dict=True,\n",
162
+ " return_tensors=\"pt\",\n",
163
+ " truncation=False,\n",
164
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
165
+ " ).to(\"cuda\")\n",
166
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
167
+ " generation_kwargs = {\n",
168
+ " **inputs,\n",
169
+ " \"streamer\": streamer,\n",
170
+ " \"max_new_tokens\": max_new_tokens,\n",
171
+ " \"do_sample\": True,\n",
172
+ " \"temperature\": temperature,\n",
173
+ " \"top_p\": top_p,\n",
174
+ " \"top_k\": top_k,\n",
175
+ " \"repetition_penalty\": repetition_penalty,\n",
176
+ " }\n",
177
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
178
+ " thread.start()\n",
179
+ " buffer = \"\"\n",
180
+ " for new_text in streamer:\n",
181
+ " buffer += new_text\n",
182
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
183
+ " time.sleep(0.01)\n",
184
+ " yield buffer\n",
185
+ "\n",
186
+ "css = \"\"\"\n",
187
+ ".submit-btn {\n",
188
+ " background-color: #2980b9 !important;\n",
189
+ " color: white !important;\n",
190
+ "}\n",
191
+ ".submit-btn:hover {\n",
192
+ " background-color: #3498db !important;\n",
193
+ "}\n",
194
+ "\"\"\"\n",
195
+ "\n",
196
+ "# Create the Gradio Interface\n",
197
+ "with gr.Blocks(css=css, theme=\"bethecloud/storj_theme\") as demo:\n",
198
+ " gr.Markdown(\"# **allenai/olmOCR-7B-0225-preview**\")\n",
199
+ " with gr.Row():\n",
200
+ " with gr.Column():\n",
201
+ " with gr.Tabs():\n",
202
+ " with gr.TabItem(\"Image Inference\"):\n",
203
+ " image_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
204
+ " image_upload = gr.Image(type=\"pil\", label=\"Image\")\n",
205
+ " image_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
206
+ "\n",
207
+ " with gr.TabItem(\"Video Inference\"):\n",
208
+ " video_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
209
+ " video_upload = gr.Video(label=\"Video\")\n",
210
+ " video_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
211
+ "\n",
212
+ " with gr.Accordion(\"Advanced options\", open=False):\n",
213
+ " max_new_tokens = gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)\n",
214
+ " temperature = gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.6)\n",
215
+ " top_p = gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9)\n",
216
+ " top_k = gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50)\n",
217
+ " repetition_penalty = gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.2)\n",
218
+ " with gr.Column():\n",
219
+ " output = gr.Textbox(label=\"Output\", interactive=False)\n",
220
+ "\n",
221
+ " image_submit.click(\n",
222
+ " fn=generate_image,\n",
223
+ " inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
224
+ " outputs=output\n",
225
+ " )\n",
226
+ " video_submit.click(\n",
227
+ " fn=generate_video,\n",
228
+ " inputs=[video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
229
+ " outputs=output\n",
230
+ " )\n",
231
+ "\n",
232
+ "if __name__ == \"__main__\":\n",
233
+ " demo.queue(max_size=30).launch(share=True, ssr_mode=False, show_error=True)"
234
+ ],
235
+ "metadata": {
236
+ "id": "Y-NTbL1tdL9X"
237
+ },
238
+ "execution_count": null,
239
+ "outputs": []
240
+ }
241
+ ]
242
+ }
typhoon-ocr-7b-Qwen2.5VL/typhoon_ocr_7b.ipynb ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "xL8y37Y6bORU"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "%%capture\n",
12
+ "!pip install gradio spaces transformers accelerate numpy requests\n",
13
+ "!pip install torch torchvision qwen-vl-utils av hf_xet\n",
14
+ "!pip install pillow huggingface_hub opencv-python"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {
21
+ "id": "Y-NTbL1tdL9X"
22
+ },
23
+ "outputs": [],
24
+ "source": [
25
+ "import os\n",
26
+ "import random\n",
27
+ "import uuid\n",
28
+ "import json\n",
29
+ "import time\n",
30
+ "import asyncio\n",
31
+ "from threading import Thread\n",
32
+ "\n",
33
+ "import gradio as gr\n",
34
+ "import spaces\n",
35
+ "import torch\n",
36
+ "import numpy as np\n",
37
+ "from PIL import Image\n",
38
+ "import cv2\n",
39
+ "\n",
40
+ "from transformers import (\n",
41
+ " Qwen2_5_VLForConditionalGeneration,\n",
42
+ " AutoProcessor,\n",
43
+ " TextIteratorStreamer,\n",
44
+ ")\n",
45
+ "from transformers.image_utils import load_image\n",
46
+ "\n",
47
+ "# Constants for text generation\n",
48
+ "MAX_MAX_NEW_TOKENS = 2048\n",
49
+ "DEFAULT_MAX_NEW_TOKENS = 1024\n",
50
+ "# Increase or disable input truncation to avoid token mismatches\n",
51
+ "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"8192\"))\n",
52
+ "\n",
53
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
54
+ "\n",
55
+ "MODEL_ID = \"scb10x/typhoon-ocr-7b\"\n",
56
+ "processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)\n",
57
+ "model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(\n",
58
+ " MODEL_ID,\n",
59
+ " trust_remote_code=True,\n",
60
+ " torch_dtype=torch.float16\n",
61
+ ").to(\"cuda\").eval()\n",
62
+ "\n",
63
+ "def downsample_video(video_path):\n",
64
+ " \"\"\"\n",
65
+ " Downsamples the video to evenly spaced frames.\n",
66
+ " Each frame is returned as a PIL image along with its timestamp.\n",
67
+ " \"\"\"\n",
68
+ " vidcap = cv2.VideoCapture(video_path)\n",
69
+ " total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
70
+ " fps = vidcap.get(cv2.CAP_PROP_FPS)\n",
71
+ " frames = []\n",
72
+ " # Sample 10 evenly spaced frames.\n",
73
+ " frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)\n",
74
+ " for i in frame_indices:\n",
75
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)\n",
76
+ " success, image = vidcap.read()\n",
77
+ " if success:\n",
78
+ " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB\n",
79
+ " pil_image = Image.fromarray(image)\n",
80
+ " timestamp = round(i / fps, 2)\n",
81
+ " frames.append((pil_image, timestamp))\n",
82
+ " vidcap.release()\n",
83
+ " return frames\n",
84
+ "\n",
85
+ "@spaces.GPU\n",
86
+ "def generate_image(text: str, image: Image.Image,\n",
87
+ " max_new_tokens: int = 1024,\n",
88
+ " temperature: float = 0.6,\n",
89
+ " top_p: float = 0.9,\n",
90
+ " top_k: int = 50,\n",
91
+ " repetition_penalty: float = 1.2):\n",
92
+ "\n",
93
+ " if image is None:\n",
94
+ " yield \"Please upload an image.\"\n",
95
+ " return\n",
96
+ "\n",
97
+ " messages = [{\n",
98
+ " \"role\": \"user\",\n",
99
+ " \"content\": [\n",
100
+ " {\"type\": \"image\", \"image\": image},\n",
101
+ " {\"type\": \"text\", \"text\": text},\n",
102
+ " ]\n",
103
+ " }]\n",
104
+ " prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
105
+ " inputs = processor(\n",
106
+ " text=[prompt_full],\n",
107
+ " images=[image],\n",
108
+ " return_tensors=\"pt\",\n",
109
+ " padding=True,\n",
110
+ " truncation=False,\n",
111
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
112
+ " ).to(\"cuda\")\n",
113
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
114
+ " generation_kwargs = {**inputs, \"streamer\": streamer, \"max_new_tokens\": max_new_tokens}\n",
115
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
116
+ " thread.start()\n",
117
+ " buffer = \"\"\n",
118
+ " for new_text in streamer:\n",
119
+ " buffer += new_text\n",
120
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
121
+ " time.sleep(0.01)\n",
122
+ " yield buffer\n",
123
+ "\n",
124
+ "@spaces.GPU\n",
125
+ "def generate_video(text: str, video_path: str,\n",
126
+ " max_new_tokens: int = 1024,\n",
127
+ " temperature: float = 0.6,\n",
128
+ " top_p: float = 0.9,\n",
129
+ " top_k: int = 50,\n",
130
+ " repetition_penalty: float = 1.2):\n",
131
+ "\n",
132
+ " if video_path is None:\n",
133
+ " yield \"Please upload a video.\"\n",
134
+ " return\n",
135
+ "\n",
136
+ " frames = downsample_video(video_path)\n",
137
+ " messages = [\n",
138
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]},\n",
139
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": text}]}\n",
140
+ " ]\n",
141
+ " # Append each frame with its timestamp.\n",
142
+ " for frame in frames:\n",
143
+ " image, timestamp = frame\n",
144
+ " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame {timestamp}:\"})\n",
145
+ " messages[1][\"content\"].append({\"type\": \"image\", \"image\": image})\n",
146
+ " inputs = processor.apply_chat_template(\n",
147
+ " messages,\n",
148
+ " tokenize=True,\n",
149
+ " add_generation_prompt=True,\n",
150
+ " return_dict=True,\n",
151
+ " return_tensors=\"pt\",\n",
152
+ " truncation=False,\n",
153
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
154
+ " ).to(\"cuda\")\n",
155
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
156
+ " generation_kwargs = {\n",
157
+ " **inputs,\n",
158
+ " \"streamer\": streamer,\n",
159
+ " \"max_new_tokens\": max_new_tokens,\n",
160
+ " \"do_sample\": True,\n",
161
+ " \"temperature\": temperature,\n",
162
+ " \"top_p\": top_p,\n",
163
+ " \"top_k\": top_k,\n",
164
+ " \"repetition_penalty\": repetition_penalty,\n",
165
+ " }\n",
166
+ " thread = Thread(target=model_m.generate, kwargs=generation_kwargs)\n",
167
+ " thread.start()\n",
168
+ " buffer = \"\"\n",
169
+ " for new_text in streamer:\n",
170
+ " buffer += new_text\n",
171
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
172
+ " time.sleep(0.01)\n",
173
+ " yield buffer\n",
174
+ "\n",
175
+ "css = \"\"\"\n",
176
+ ".submit-btn {\n",
177
+ " background-color: #2980b9 !important;\n",
178
+ " color: white !important;\n",
179
+ "}\n",
180
+ ".submit-btn:hover {\n",
181
+ " background-color: #3498db !important;\n",
182
+ "}\n",
183
+ "\"\"\"\n",
184
+ "\n",
185
+ "# Create the Gradio Interface\n",
186
+ "with gr.Blocks(css=css, theme=\"bethecloud/storj_theme\") as demo:\n",
187
+ " gr.Markdown(\"# **typhoon-ocr-7b**\")\n",
188
+ " with gr.Row():\n",
189
+ " with gr.Column():\n",
190
+ " with gr.Tabs():\n",
191
+ " with gr.TabItem(\"Image Inference\"):\n",
192
+ " image_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
193
+ " image_upload = gr.Image(type=\"pil\", label=\"Image\")\n",
194
+ " image_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
195
+ "\n",
196
+ " with gr.TabItem(\"Video Inference\"):\n",
197
+ " video_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
198
+ " video_upload = gr.Video(label=\"Video\")\n",
199
+ " video_submit = gr.Button(\"Submit\", elem_classes=\"submit-btn\")\n",
200
+ "\n",
201
+ " with gr.Accordion(\"Advanced options\", open=False):\n",
202
+ " max_new_tokens = gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)\n",
203
+ " temperature = gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.6)\n",
204
+ " top_p = gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9)\n",
205
+ " top_k = gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50)\n",
206
+ " repetition_penalty = gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.2)\n",
207
+ " with gr.Column():\n",
208
+ " output = gr.Textbox(label=\"Output\", interactive=False)\n",
209
+ "\n",
210
+ " image_submit.click(\n",
211
+ " fn=generate_image,\n",
212
+ " inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
213
+ " outputs=output\n",
214
+ " )\n",
215
+ " video_submit.click(\n",
216
+ " fn=generate_video,\n",
217
+ " inputs=[video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
218
+ " outputs=output\n",
219
+ " )\n",
220
+ "\n",
221
+ "if __name__ == \"__main__\":\n",
222
+ " demo.queue(max_size=30).launch(share=True, ssr_mode=False, show_error=True)"
223
+ ]
224
+ }
225
+ ],
226
+ "metadata": {
227
+ "accelerator": "GPU",
228
+ "colab": {
229
+ "gpuType": "T4",
230
+ "provenance": []
231
+ },
232
+ "kernelspec": {
233
+ "display_name": "Python 3",
234
+ "name": "python3"
235
+ },
236
+ "language_info": {
237
+ "name": "python"
238
+ }
239
+ },
240
+ "nbformat": 4,
241
+ "nbformat_minor": 0
242
+ }