{ "cells": [ { "cell_type": "markdown", "id": "f2c16396", "metadata": {}, "source": [ "### Voice Chatting with Qwen2.5-Omni\n", "\n", "This notebook demonstrates how to chat with Qwen2.5-Omni by voice input and output." ] }, { "cell_type": "code", "execution_count": null, "id": "638e9082-c1ef-4efd-9a10-e35507e25363", "metadata": { "execution": { "iopub.execute_input": "2025-01-29T12:40:04.049566Z", "iopub.status.busy": "2025-01-29T12:40:04.049365Z" }, "tags": [] }, "outputs": [], "source": [ "!pip install git+https://github.com/huggingface/transformers@f742a644ca32e65758c3adb36225aef1731bd2a8\n", "!pip install qwen-omni-utils\n", "!pip install openai\n", "!pip install flash-attn --no-build-isolation" ] }, { "cell_type": "code", "execution_count": null, "id": "9596c50d-80a8-433f-b846-1fbf61145ccc", "metadata": { "ExecutionIndicator": { "show": true }, "execution": { "iopub.execute_input": "2025-01-29T12:40:16.511701Z", "iopub.status.busy": "2025-01-29T12:40:16.510916Z", "iopub.status.idle": "2025-01-29T12:40:16.878038Z", "shell.execute_reply": "2025-01-29T12:40:16.877543Z", "shell.execute_reply.started": "2025-01-29T12:40:16.511678Z" }, "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/opt/conda/envs/omni/lib/python3.10/site-packages/_distutils_hack/__init__.py:53: UserWarning: Reliance on distutils from stdlib is deprecated. Users must rely on setuptools to provide the distutils module. Avoid importing distutils or import setuptools first, and avoid setting SETUPTOOLS_USE_DISTUTILS=stdlib. Register concerns at https://github.com/pypa/setuptools/issues/new?template=distutils-deprecation.yml\n", " warnings.warn(\n" ] } ], "source": [ "from qwen_omni_utils import process_mm_info\n", "\n", "# @title inference function\n", "def inference(audio_path):\n", " messages = [\n", " {\"role\": \"system\", \"content\": \"You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.\"},\n", " {\"role\": \"user\", \"content\": [\n", " {\"type\": \"audio\", \"audio\": audio_path},\n", " ]\n", " },\n", " ]\n", " text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", " audios, images, videos = process_mm_info(messages, use_audio_in_video=True)\n", " inputs = processor(text=text, audios=audios, images=images, videos=videos, return_tensors=\"pt\", padding=True, use_audio_in_video=True)\n", " inputs = inputs.to(model.device).to(model.dtype)\n", "\n", " output = model.generate(**inputs, use_audio_in_video=True, return_audio=True)\n", "\n", " text = processor.batch_decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)\n", " audio = output[1]\n", " return text, audio" ] }, { "cell_type": "markdown", "id": "386e4cd8", "metadata": {}, "source": [ "Load model and processors." ] }, { "cell_type": "code", "execution_count": 3, "id": "e829b782-0be7-4bc6-a576-6b815323376e", "metadata": { "ExecutionIndicator": { "show": false }, "execution": { "iopub.execute_input": "2025-01-29T12:40:18.337731Z", "iopub.status.busy": "2025-01-29T12:40:18.337470Z", "iopub.status.idle": "2025-01-29T12:40:47.760976Z", "shell.execute_reply": "2025-01-29T12:40:47.760220Z", "shell.execute_reply.started": "2025-01-29T12:40:18.337713Z" }, "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/opt/conda/envs/omni/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n", "2025-03-22 17:31:24.942300: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", "2025-03-22 17:31:24.975733: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", "2025-03-22 17:31:24.975757: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", "2025-03-22 17:31:24.976608: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", "2025-03-22 17:31:24.982391: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", "To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", "2025-03-22 17:31:25.734483: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", "Loading checkpoint shards: 100%|██████████| 5/5 [00:05<00:00, 1.14s/it]\n", "/opt/conda/envs/omni/lib/python3.10/site-packages/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py:6129: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", " for key, value in torch.load(path).items():\n" ] } ], "source": [ "import torch\n", "from transformers import Qwen2_5OmniModel, Qwen2_5OmniProcessor\n", "\n", "model_path = \"Qwen/Qwen2.5-Omni-7B\"\n", "model = Qwen2_5OmniModel.from_pretrained(\n", " model_path,\n", " torch_dtype=torch.bfloat16,\n", " device_map=\"auto\",\n", " attn_implementation=\"flash_attention_2\",\n", ")\n", "processor = Qwen2_5OmniProcessor.from_pretrained(model_path)" ] }, { "cell_type": "code", "execution_count": 4, "id": "ed93fb82", "metadata": {}, "outputs": [], "source": [ "import librosa\n", "\n", "from io import BytesIO\n", "from urllib.request import urlopen\n", "\n", "from IPython.display import Audio" ] }, { "cell_type": "markdown", "id": "6a47ad45", "metadata": {}, "source": [ "#### Voice Chat Example 1" ] }, { "cell_type": "code", "execution_count": null, "id": "1935af5e", "metadata": { "ExecutionIndicator": { "show": true }, "execution": { "iopub.execute_input": "2025-01-29T12:41:18.150397Z", "iopub.status.busy": "2025-01-29T12:41:18.149631Z", "iopub.status.idle": "2025-01-29T12:41:19.978329Z", "shell.execute_reply": "2025-01-29T12:41:19.977054Z", "shell.execute_reply.started": "2025-01-29T12:41:18.150371Z" }, "tags": [] }, "outputs": [ { "data": { "text/html": [ "\n", " \n", " " ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ "Setting `pad_token_id` to `eos_token_id`:8292 for open-end generation.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "system\n", "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.\n", "user\n", "\n", "assistant\n", "Well, I can't really guess your age and gender just from your voice. I don't have that kind of superpower. But if you want to talk about something else, like your hobbies or what you like to do, that could be really interesting.\n" ] }, { "data": { "text/html": [ "\n", " \n", " " ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "audio_path = \"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/guess_age_gender.wav\"\n", "\n", "audio = librosa.load(BytesIO(urlopen(audio_path).read()), sr=16000)[0]\n", "display(Audio(audio, rate=16000))\n", "\n", "## Use a local HuggingFace model to inference.\n", "response = inference(audio_path)\n", "print(response[0][0])\n", "display(Audio(response[1], rate=24000))" ] }, { "cell_type": "markdown", "id": "f9961aae", "metadata": {}, "source": [ "#### Voice Chat Example 2" ] }, { "cell_type": "code", "execution_count": null, "id": "0894f5f1", "metadata": { "ExecutionIndicator": { "show": true }, "execution": { "iopub.execute_input": "2025-01-29T12:44:01.387553Z", "iopub.status.busy": "2025-01-29T12:44:01.386725Z", "iopub.status.idle": "2025-01-29T12:44:09.671782Z", "shell.execute_reply": "2025-01-29T12:44:09.671200Z", "shell.execute_reply.started": "2025-01-29T12:44:01.387530Z" }, "tags": [] }, "outputs": [ { "data": { "text/html": [ "\n", " \n", " " ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ "Setting `pad_token_id` to `eos_token_id`:8292 for open-end generation.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "system\n", "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.\n", "user\n", "\n", "assistant\n", "每个人都想被欣赏,所以如果你欣赏某人,就别让它成为秘密。如果还有其他翻译相关的问题或者别的事,都可以跟我说哦。\n" ] }, { "data": { "text/html": [ "\n", " \n", " " ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "audio_path = \"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/translate_to_chinese.wav\"\n", "\n", "audio = librosa.load(BytesIO(urlopen(audio_path).read()), sr=16000)[0]\n", "display(Audio(audio, rate=16000))\n", "\n", "## Use a local HuggingFace model to inference.\n", "response = inference(audio_path)\n", "print(response[0][0])\n", "display(Audio(response[1], rate=24000))" ] } ], "metadata": { "kernelspec": { "display_name": "omni", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.16" } }, "nbformat": 4, "nbformat_minor": 5 }