video_information_extracting.ipynb 14 KB
Newer Older
chenzk's avatar
v1.0  
chenzk committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "f2c16396",
   "metadata": {},
   "source": [
    "### Video Information Extracting with Qwen2.5-Omni\n",
    "\n",
    "This notebook demonstrates how to use Qwen2.5-Omni to obtain information from the video stream."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "638e9082-c1ef-4efd-9a10-e35507e25363",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-01-29T12:40:04.049566Z",
     "iopub.status.busy": "2025-01-29T12:40:04.049365Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "!pip install git+https://github.com/huggingface/transformers@f742a644ca32e65758c3adb36225aef1731bd2a8\n",
    "!pip install qwen-omni-utils\n",
    "!pip install openai\n",
    "!pip install flash-attn --no-build-isolation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9596c50d-80a8-433f-b846-1fbf61145ccc",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2025-01-29T12:40:16.511701Z",
     "iopub.status.busy": "2025-01-29T12:40:16.510916Z",
     "iopub.status.idle": "2025-01-29T12:40:16.878038Z",
     "shell.execute_reply": "2025-01-29T12:40:16.877543Z",
     "shell.execute_reply.started": "2025-01-29T12:40:16.511678Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/omni/lib/python3.10/site-packages/_distutils_hack/__init__.py:53: UserWarning: Reliance on distutils from stdlib is deprecated. Users must rely on setuptools to provide the distutils module. Avoid importing distutils or import setuptools first, and avoid setting SETUPTOOLS_USE_DISTUTILS=stdlib. Register concerns at https://github.com/pypa/setuptools/issues/new?template=distutils-deprecation.yml\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "from qwen_omni_utils import process_mm_info\n",
    "\n",
    "# @title inference function\n",
    "def inference(video_path, prompt, sys_prompt):\n",
    "    messages = [\n",
    "        {\"role\": \"system\", \"content\": sys_prompt},\n",
    "        {\"role\": \"user\", \"content\": [\n",
    "                {\"type\": \"text\", \"text\": prompt},\n",
    "                {\"type\": \"video\", \"video\": video_path},\n",
    "            ]\n",
    "        },\n",
    "    ]\n",
    "    text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
    "    # image_inputs, video_inputs = process_vision_info([messages])\n",
    "    audios, images, videos = process_mm_info(messages, use_audio_in_video=False)\n",
    "    inputs = processor(text=text, audios=audios, images=images, videos=videos, return_tensors=\"pt\", padding=True, use_audio_in_video=False)\n",
    "    inputs = inputs.to(model.device).to(model.dtype)\n",
    "\n",
    "    output = model.generate(**inputs, use_audio_in_video=False, return_audio=False)\n",
    "\n",
    "    text = processor.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=False)\n",
    "    return text"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "386e4cd8",
   "metadata": {},
   "source": [
    "Load model and processors."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "e829b782-0be7-4bc6-a576-6b815323376e",
   "metadata": {
    "ExecutionIndicator": {
     "show": false
    },
    "execution": {
     "iopub.execute_input": "2025-01-29T12:40:18.337731Z",
     "iopub.status.busy": "2025-01-29T12:40:18.337470Z",
     "iopub.status.idle": "2025-01-29T12:40:47.760976Z",
     "shell.execute_reply": "2025-01-29T12:40:47.760220Z",
     "shell.execute_reply.started": "2025-01-29T12:40:18.337713Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/omni/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "2025-03-22 17:20:02.523530: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
      "2025-03-22 17:20:02.556178: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
      "2025-03-22 17:20:02.556202: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
      "2025-03-22 17:20:02.557034: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
      "2025-03-22 17:20:02.562397: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
      "To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
      "2025-03-22 17:20:03.318258: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
      "Loading checkpoint shards: 100%|██████████| 5/5 [00:05<00:00,  1.17s/it]\n",
      "/opt/conda/envs/omni/lib/python3.10/site-packages/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py:6129: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
      "  for key, value in torch.load(path).items():\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from transformers import Qwen2_5OmniModel, Qwen2_5OmniProcessor\n",
    "\n",
    "model_path = \"Qwen/Qwen2.5-Omni-7B\"\n",
    "model = Qwen2_5OmniModel.from_pretrained(\n",
    "    model_path,\n",
    "    torch_dtype=torch.bfloat16,\n",
    "    device_map=\"auto\",\n",
    "    attn_implementation=\"flash_attention_2\",\n",
    ")\n",
    "processor = Qwen2_5OmniProcessor.from_pretrained(model_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "ed93fb82",
   "metadata": {},
   "outputs": [],
   "source": [
    "from IPython.display import Video"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6a47ad45",
   "metadata": {},
   "source": [
    "#### Question 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1935af5e",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2025-01-29T12:41:18.150397Z",
     "iopub.status.busy": "2025-01-29T12:41:18.149631Z",
     "iopub.status.idle": "2025-01-29T12:41:19.978329Z",
     "shell.execute_reply": "2025-01-29T12:41:19.977054Z",
     "shell.execute_reply.started": "2025-01-29T12:41:18.150371Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<video src=\"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-Omni/shopping.mp4\" controls  width=\"640\"  height=\"360\">\n",
       "      Your browser does not support the <code>video</code> element.\n",
       "    </video>"
      ],
      "text/plain": [
       "<IPython.core.display.Video object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:root:System prompt modified, audio output may not work as expected. Audio output mode only works when using default system prompt 'You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.'\n",
      "qwen-vl-utils using torchvision to read video.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "system\n",
      "You are a helpful assistant.\n",
      "user\n",
      "How many kind of drinks can you see in the video?\n",
      "assistant\n",
      "There are five different kinds of drinks visible in the video.\n"
     ]
    }
   ],
   "source": [
    "video_path = \"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-Omni/shopping.mp4\"\n",
    "prompt = \"How many kind of drinks can you see in the video?\"\n",
    "\n",
    "display(Video(video_path, width=640, height=360))\n",
    "\n",
    "## Use a local HuggingFace model to inference.\n",
    "response = inference(video_path, prompt=prompt, sys_prompt=\"You are a helpful assistant.\")\n",
    "print(response[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f9961aae",
   "metadata": {},
   "source": [
    "#### Question 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0894f5f1",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2025-01-29T12:44:01.387553Z",
     "iopub.status.busy": "2025-01-29T12:44:01.386725Z",
     "iopub.status.idle": "2025-01-29T12:44:09.671782Z",
     "shell.execute_reply": "2025-01-29T12:44:09.671200Z",
     "shell.execute_reply.started": "2025-01-29T12:44:01.387530Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<video src=\"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-Omni/shopping.mp4\" controls  width=\"640\"  height=\"360\">\n",
       "      Your browser does not support the <code>video</code> element.\n",
       "    </video>"
      ],
      "text/plain": [
       "<IPython.core.display.Video object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:root:System prompt modified, audio output may not work as expected. Audio output mode only works when using default system prompt 'You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.'\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "system\n",
      "You are a helpful assistant.\n",
      "user\n",
      "How many bottles of drinks have I picked up?\n",
      "assistant\n",
      "You have picked up two bottles of drinks.\n"
     ]
    }
   ],
   "source": [
    "video_path = \"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-Omni/shopping.mp4\"\n",
    "prompt = \"How many bottles of drinks have I picked up?\"\n",
    "\n",
    "display(Video(video_path, width=640, height=360))\n",
    "\n",
    "## Use a local HuggingFace model to inference.\n",
    "response = inference(video_path, prompt=prompt, sys_prompt=\"You are a helpful assistant.\")\n",
    "print(response[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4e9b7651",
   "metadata": {},
   "source": [
    "#### Question 3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "16aa3dc5",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<video src=\"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-Omni/shopping.mp4\" controls  width=\"640\"  height=\"360\">\n",
       "      Your browser does not support the <code>video</code> element.\n",
       "    </video>"
      ],
      "text/plain": [
       "<IPython.core.display.Video object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:root:System prompt modified, audio output may not work as expected. Audio output mode only works when using default system prompt 'You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.'\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "system\n",
      "You are a helpful assistant.\n",
      "user\n",
      "How many milliliters are there in the bottle I picked up second time?\n",
      "assistant\n",
      "The bottle you picked up second time contains 500 milliliters of liquid.\n"
     ]
    }
   ],
   "source": [
    "video_path = \"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-Omni/shopping.mp4\"\n",
    "prompt = \"How many milliliters are there in the bottle I picked up second time?\"\n",
    "\n",
    "display(Video(video_path, width=640, height=360))\n",
    "\n",
    "## Use a local HuggingFace model to inference.\n",
    "response = inference(video_path, prompt=prompt, sys_prompt=\"You are a helpful assistant.\")\n",
    "print(response[0])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "omni",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}