Commit c0d47a5d authored by weishb's avatar weishb
Browse files

update README.md

parent be69ba06
...@@ -87,7 +87,14 @@ python test_model_12hz_base.py ...@@ -87,7 +87,14 @@ python test_model_12hz_base.py
VoiceDesign VoiceDesign
```bash ```bash
## serve启动 ## serve启动
VLLM_USE_V1=0 python -m vllm.entrypoints.openai.api_server --model Qwen3-TTS/Qwen3-TTS-12Hz-1.7B-VoiceDesign --served-model-name qwen3-tts --host 0.0.0.0 --port 8000 --trust-remote-code --dtype bfloat16 --disable-async-output-proc VLLM_USE_V1=0 python -m vllm.entrypoints.openai.api_server \
--model Qwen3-TTS/Qwen3-TTS-12Hz-1.7B-VoiceDesign \
--served-model-name qwen3-tts \
--host 0.0.0.0 \
--port 8000 \
--trust-remote-code \
--dtype bfloat16 \
--disable-async-output-proc
## client访问 ## client访问
curl -sS http://127.0.0.1:8000/v1/audio/speech \ curl -sS http://127.0.0.1:8000/v1/audio/speech \
...@@ -113,7 +120,14 @@ curl -sS http://127.0.0.1:8000/v1/audio/speech \ ...@@ -113,7 +120,14 @@ curl -sS http://127.0.0.1:8000/v1/audio/speech \
CustomVoice CustomVoice
```bash ```bash
## serve启动 ## serve启动
VLLM_USE_V1=0 python -m vllm.entrypoints.openai.api_server --model Qwen3-TTS/Qwen3-TTS-12Hz-1.7B-CustomVoice --served-model-name qwen3-tts --host 0.0.0.0 --port 8000 --trust-remote-code --dtype bfloat16 --disable-async-output-proc VLLM_USE_V1=0 python -m vllm.entrypoints.openai.api_server \
--model Qwen3-TTS/Qwen3-TTS-12Hz-1.7B-CustomVoice \
--served-model-name qwen3-tts \
--host 0.0.0.0 \
--port 8000 \
--trust-remote-code \
--dtype bfloat16 \
--disable-async-output-proc
## client访问 ## client访问
curl -sS http://127.0.0.1:8000/v1/audio/speech \ curl -sS http://127.0.0.1:8000/v1/audio/speech \
...@@ -140,7 +154,14 @@ curl -sS http://127.0.0.1:8000/v1/audio/speech \ ...@@ -140,7 +154,14 @@ curl -sS http://127.0.0.1:8000/v1/audio/speech \
Voice Clone Voice Clone
```bash ```bash
## serve启动 ## serve启动
VLLM_USE_V1=0 python -m vllm.entrypoints.openai.api_server --model Qwen3-TTS/Qwen3-TTS-12Hz-1.7B-Base --served-model-name qwen3-tts --host 0.0.0.0 --port 8000 --trust-remote-code --dtype bfloat16 --disable-async-output-proc VLLM_USE_V1=0 python -m vllm.entrypoints.openai.api_server \
--model Qwen3-TTS/Qwen3-TTS-12Hz-1.7B-Base \
--served-model-name qwen3-tts \
--host 0.0.0.0 \
--port 8000 \
--trust-remote-code \
--dtype bfloat16 \
--disable-async-output-proc
## client访问 ## client访问
curl -sS http://127.0.0.1:8000/v1/audio/speech \ curl -sS http://127.0.0.1:8000/v1/audio/speech \
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment