Commit f2698399 authored by wangkx1's avatar wangkx1
Browse files

update new-moon

parent c0705977
Flask==2.0.1
Flask-SocketIO==5.1.1
eventlet==0.33.1
SQLAlchemy==1.4.23
Werkzeug==2.0.1
Jinja2==3.0.1
MarkupSafe==2.0.1
itsdangerous==2.0.1
click==8.0.1
python-engineio==4.2.1
python-socketio==5.4.0
greenlet==1.1.2
six==1.16.0
dnspython==2.2.1
\ No newline at end of file
#!/bin/bash
# 模型管理工具启动脚本
echo "========================================="
echo "Linux模型管理工具启动脚本"
echo "========================================="
# 检查Python版本
echo "检查Python版本..."
if ! command -v python3 &> /dev/null; then
echo "错误: 未找到Python3,请先安装Python3"
exit 1
fi
PYTHON_VERSION=$(python3 --version 2>&1 | awk '{print $2}')
echo "已安装Python版本: $PYTHON_VERSION"
# 创建虚拟环境(如果不存在)
if [ ! -d "venv" ]; then
echo "创建Python虚拟环境..."
python3 -m venv venv
if [ $? -ne 0 ]; then
echo "错误: 创建虚拟环境失败"
exit 1
fi
fi
# 激活虚拟环境
echo "激活虚拟环境..."
if [ $? -ne 0 ]; then
echo "错误: 激活虚拟环境失败"
exit 1
fi
# 升级pip
echo "升级pip..."
pip3 install --upgrade pip -i https://pypi.tuna.tsinghua.edu.cn/simple
if [ $? -ne 0 ]; then
echo "警告: 升级pip失败"
fi
# 安装依赖
echo "安装依赖包..."
pip install flask flask-cors flask-socketio eventlet requests modelscope -i https://pypi.tuna.tsinghua.edu.cn/simple
if [ $? -ne 0 ]; then
echo "警告: 安装依赖包失败,某些功能可能受限"
fi
# 创建必要的目录
echo "创建必要的目录..."
# mkdir -p ~/models
mkdir -p backend/static
mkdir -p backend/templates
# 检查端口是否被占用
PORT=2026
if lsof -Pi :$PORT -sTCP:LISTEN -t >/dev/null 2>&1; then
echo "警告: 端口 $PORT 已被占用,请先关闭占用该端口的进程"
echo "您可以使用以下命令关闭占用端口的进程:"
echo "sudo lsof -t -i:$PORT | xargs kill -9"
read -p "是否继续启动?(y/N): " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
exit 1
fi
fi
# 启动应用
echo "========================================="
echo "启动模型管理工具..."
echo "访问地址: http://localhost:$PORT"
echo "========================================="
# 切换到backend目录
cd backend
# 启动Flask应用
python3 app.py
\ No newline at end of file
home = /usr/bin
include-system-site-packages = false
version = 3.10.12
-----BEGIN CERTIFICATE-----
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
-----END CERTIFICATE-----
# 模型下载管理器
一个基于Gradio的模型下载、上传和管理工具,支持ModelScope模型下载和CsgHub上传。
## 功能特点
### 1. 模型下载
- 支持通过模型ID下载ModelScope模型
- 任务优先级管理(0-10,数字越大优先级越高)
- 每次仅同时下载一个模型
- 预估远程模型大小
- 根据文件数量显示下载进度
- 支持暂停、恢复、删除下载任务
### 2. 模型上传
- 上传本地模型到CsgHub平台
- 显示上传进度(按文件数量)
- 已上传的模型自动变灰显示在页面底部
- 支持暂停、恢复、删除上传任务
### 3. 本地模型管理
- 显示本地所有已下载/已上传的模型
- 支持删除本地模型
- 已上传的模型有特殊标记
### 4. 自动操作
- 下载后自动上传(可配置)
- 上传后自动删除本地文件(可配置)
### 5. 配置管理
- 设置本地模型存储目录
- 配置最大同时下载/上传数
- 配置CsgHub连接信息
- 配置自动操作选项
## 安装与运行
### 前提条件
- Python 3.7+
- pip
### 安装步骤
1. 进入项目目录
```bash
cd /data/DataStore/models/exp-net/new-moon
```
2. 安装依赖
```bash
pip install -r requirements.txt
```
3. 启动应用
```bash
python start_manager.py
```
或者直接运行:
```bash
python model_download_manager.py
```
### 快速启动
```bash
cd /data/DataStore/models/exp-net/new-moon && python start_manager.py
```
## 使用说明
### 首次使用
1. 打开浏览器访问:`http://localhost:7860`
2. 在顶部配置面板中设置本地目录路径
3. 点击"设置目录"保存配置
### 下载模型
1. 切换到"📥 下载"标签页
2. 输入模型ID(例如:`Qwen/Qwen2.5-7B-Instruct`
3. 设置优先级(可选,默认0)
4. 点击"添加下载任务"
5. 在任务列表中查看下载进度
### 上传模型
1. 切换到"📤 上传"标签页
2. 输入已下载的模型ID
3. 点击"添加上传任务"
4. 在任务列表中查看上传进度
### 管理本地模型
1. 切换到"💾 本地模型"标签页
2. 查看本地所有模型列表
3. 点击"上传"按钮上传未上传的模型
4. 点击"删除"按钮删除本地模型
### 配置选项
- **本地目录**: 模型存储路径
- **下载后自动上传**: 下载完成后自动开始上传
- **上传后自动删除**: 上传完成后自动删除本地文件
- **最大同时下载数**: 控制并发下载数量(默认1)
- **最大同时上传数**: 控制并发上传数量(默认1)
## 注意事项
1. **首次使用**: 必须设置本地目录才能正常使用
2. **模型ID格式**: 使用`组织/模型名`格式,如`Qwen/Qwen2.5-7B-Instruct`
3. **网络连接**: 确保可以访问ModelScope和CsgHub服务
4. **磁盘空间**: 确保有足够的磁盘空间存储模型
5. **权限**: 确保有读写本地目录的权限
## 故障排除
### 常见问题
1. **无法启动应用**
- 检查Python版本:`python --version`
- 检查依赖安装:`pip list | grep gradio`
2. **下载失败**
- 检查网络连接
- 检查模型ID是否正确
- 检查磁盘空间
3. **上传失败**
- 检查CsgHub服务是否可用
- 检查token配置是否正确
- 检查本地模型是否存在
4. **界面无法加载**
- 检查端口7860是否被占用
- 尝试重启应用
### 日志查看
应用运行日志会输出到控制台,包含:
- 任务状态更新
- 下载/上传进度
- 错误信息
## 配置说明
### 配置文件
- `model_manager_config.json`: 应用配置文件
- `model_manager_state.json`: 任务状态文件
### 默认配置
```json
{
"local_dir": "",
"max_concurrent_downloads": 1,
"max_concurrent_uploads": 1,
"auto_upload_after_download": false,
"auto_delete_after_upload": false,
"csghub_config": {
"base_url": "http://10.17.27.227:4997",
"token": "f5dad38a9426410aa861155cd184f84a",
"repo_type": "model",
"revision": "main"
}
}
```
## 开发说明
### 项目结构
```
new-moon/
├── model_download_manager.py # 主程序
├── start_manager.py # 启动脚本
├── requirements.txt # 依赖列表
├── model_manager_config.json # 配置文件
├── model_manager_state.json # 状态文件
└── README.md # 说明文档
```
### 核心模块
1. **GlobalState**: 全局状态管理
2. **DownloadTask/UploadTask**: 任务管理
3. **download_worker/upload_worker**: 工作线程
4. **create_interface()**: Gradio界面
### 扩展功能
如需扩展功能,可以修改以下部分:
- 添加新的模型源
- 支持更多上传平台
- 增加模型转换功能
- 添加批量操作功能
## 许可证
本项目基于现有代码库开发,遵循原有许可证。
## 支持
如有问题或建议,请联系项目维护者。
\ No newline at end of file
csghub:
base_url: http://10.17.27.227:4997
repo_type: model
revision: main
token: f5dad38a9426410aa861155cd184f84a
download:
max_concurrent: 1
max_retries: 10
retry_interval: 1
local:
config_db_path: config_db.json
default_model_path: /data/DataStore/models/exp-net/new-moon/models
models_db_path: models_db.json
upload:
create_repo_default: true
num_workers: 1
version: '3'
services:
model-download-manager:
image: python:3.9-slim
container_name: model_download_manager
volumes:
- /data/DataStore/models/exp-net/new-moon/models:/workspace/new-moon/models
ports:
- "7865:7865"
working_dir: /workspace/new-moon
command: bash -c "python3 model_download_manager.py"
restart: unless-stopped
environment:
- PYTHONUNBUFFERED=1
# 简单修复:只导入必要的模块,避免与huggingface_hub的冲突
import sys
import os
import json
import yaml
import time
import threading
import queue
import shutil
import multiprocessing
from datetime import datetime
from typing import Dict, List, Optional
# 移除了huggingface_hub的Mock替换,因为已经安装了真实的库
# 仅导入Gradio的基本组件,避免使用需要huggingface_hub的功能
import gradio as gr
from typing import Dict, List, Optional
try:
from modelscope.hub.snapshot_download import snapshot_download
from modelscope.hub.api import HubApi
HAS_MODELSCOPE = True
except ImportError:
print("警告: modelscope未安装")
HAS_MODELSCOPE = False
try:
from pycsghub.csghub_api import CsgHubApi
from pycsghub.upload_large_folder.main import upload_large_folder_internal, create_repo
HAS_PYCSGHUB = True
except ImportError:
print("警告: pycsghub未安装")
HAS_PYCSGHUB = False
CONFIG_FILE = "config.yaml"
STATE_FILE = "model_manager_state.json"
DEFAULT_CONFIG = {
"local": {
"default_model_path": "",
"models_db_path": "models_db.json",
"config_db_path": "config_db.json"
},
"csghub": {
"base_url": "",
"token": "",
"repo_type": "model",
"revision": "main"
},
"download": {
"max_retries": 10,
"retry_interval": 5,
"max_concurrent": 1
},
"upload": {
"create_repo_default": True,
"num_workers": 1
}
}
class GlobalState:
_instance = None
_lock = threading.RLock()
def __new__(cls):
if cls._instance is None:
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
if self._initialized:
return
self._initialized = True
self.config = DEFAULT_CONFIG.copy()
self.state = {
"download_tasks": {},
"upload_tasks": {},
"local_models": [],
"remote_cache": {}
}
self.download_queue = queue.PriorityQueue()
self.upload_queue = queue.Queue()
self.active_downloads = {} # task_id -> model_id
self.download_processes = {} # task_id -> process object
self.active_uploads = {}
self.operation_lock = threading.RLock()
self.load_config()
self.load_state()
def load_config(self):
if os.path.exists(CONFIG_FILE):
try:
with open(CONFIG_FILE, 'r', encoding='utf-8') as f:
loaded = yaml.safe_load(f)
if loaded:
# Merge loaded config with default config
for key, value in loaded.items():
if key in self.config:
if isinstance(value, dict) and isinstance(self.config[key], dict):
self.config[key].update(value)
else:
self.config[key] = value
except Exception as e:
print(f"加载配置失败: {e}")
def save_config(self):
try:
with open(CONFIG_FILE, 'w', encoding='utf-8') as f:
yaml.dump(self.config, f, default_flow_style=False, allow_unicode=True)
except Exception as e:
print(f"保存配置失败: {e}")
def load_state(self):
if os.path.exists(STATE_FILE):
try:
with open(STATE_FILE, 'r', encoding='utf-8') as f:
loaded = json.load(f)
# 加载下载任务并将所有需要下载的模型初始状态设为pause
download_tasks = loaded.get("download_tasks", {})
for task_id, task in download_tasks.items():
# 如果任务状态是pending或downloading,将其设置为pause
if task.get("status") in ["pending", "downloading"]:
task["status"] = "paused"
task["message"] = "任务已暂停"
self.state["download_tasks"] = download_tasks
self.state["upload_tasks"] = loaded.get("upload_tasks", {})
self.state["remote_cache"] = loaded.get("remote_cache", {})
# 保存更新后的状态
self.save_state()
except Exception as e:
print(f"加载状态失败: {e}")
def save_state(self):
try:
with open(STATE_FILE, 'w', encoding='utf-8') as f:
json.dump(self.state, f, indent=2, ensure_ascii=False)
except Exception as e:
print(f"保存状态失败: {e}")
global_state = GlobalState()
def format_size(size_bytes):
if size_bytes < 1024:
return f"{size_bytes}B"
elif size_bytes < 1024 * 1024:
return f"{size_bytes / 1024:.1f}KB"
elif size_bytes < 1024 * 1024 * 1024:
return f"{size_bytes / (1024 * 1024):.1f}MB"
else:
return f"{size_bytes / (1024 * 1024 * 1024):.1f}GB"
def get_dir_size(path):
total = 0
if os.path.exists(path):
for root, dirs, files in os.walk(path):
for f in files:
fp = os.path.join(root, f)
try:
total += os.path.getsize(fp)
except:
pass
return total
def get_file_count(path):
count = 0
if os.path.exists(path):
for root, dirs, files in os.walk(path):
count += len(files)
return count
def estimate_model_size(model_id):
cache_key = f"size_{model_id}"
if cache_key in global_state.state["remote_cache"]:
cached = global_state.state["remote_cache"][cache_key]
if time.time() - cached.get("ts", 0) < 3600:
return cached.get("size", "未知")
if not HAS_MODELSCOPE:
return "未知"
try:
api = HubApi()
info = api.get_model(model_id)
# 使用StorageSize获取模型大小,单位为字节
storage_size = info.get("StorageSize", 0)
# 转换为人类可读格式
if storage_size > 0:
if storage_size >= 1024 * 1024 * 1024:
size_str = f"{storage_size / (1024 * 1024 * 1024):.2f} GB"
elif storage_size >= 1024 * 1024:
size_str = f"{storage_size / (1024 * 1024):.2f} MB"
elif storage_size >= 1024:
size_str = f"{storage_size / 1024:.2f} KB"
else:
size_str = f"{storage_size} B"
else:
size_str = "未知"
global_state.state["remote_cache"][cache_key] = {"size": size_str, "ts": time.time()}
global_state.save_state()
return size_str
except Exception as e:
print(f"预估大小失败: {e}")
return "未知"
def get_remote_file_count(model_id):
cache_key = f"file_count_{model_id}"
if cache_key in global_state.state["remote_cache"]:
cached = global_state.state["remote_cache"][cache_key]
if time.time() - cached.get("ts", 0) < 3600:
return cached.get("count", 0)
if not HAS_MODELSCOPE:
return 0
try:
api = HubApi()
info = api.get_model(model_id)
# 获取模型文件数量 - 参考ms-demo.py的实现
file_count = 0
if "ModelInfos" in info:
# 优先检查safetensor文件
if "safetensor" in info["ModelInfos"] and "files" in info["ModelInfos"]["safetensor"]:
file_count = len(info["ModelInfos"]["safetensor"]["files"])
else:
# 如果没有safetensor,统计所有文件
for library in info["ModelInfos"].values():
if "files" in library and isinstance(library["files"], list):
file_count += len(library["files"])
global_state.state["remote_cache"][cache_key] = {"count": file_count, "ts": time.time()}
global_state.save_state()
return file_count
except Exception as e:
print(f"获取文件数量失败: {e}")
return 0
def scan_local_models():
local_dir = global_state.config.get("local", {}).get("default_model_path", "")
if not local_dir or not os.path.exists(local_dir):
return []
models = []
try:
for org in os.listdir(local_dir):
org_path = os.path.join(local_dir, org)
if not os.path.isdir(org_path):
continue
for model_name in os.listdir(org_path):
model_path = os.path.join(org_path, model_name)
if not os.path.isdir(model_path):
continue
has_model = False
for f in os.listdir(model_path):
if f.endswith(('.safetensors', '.bin', '.pth', '.pt', '.ckpt')):
has_model = True
break
if has_model:
model_id = f"{org}/{model_name}"
size = get_dir_size(model_path)
files = get_file_count(model_path)
uploaded = False
for tid, task in global_state.state["upload_tasks"].items():
if task.get("model_id") == model_id and task.get("status") == "completed":
uploaded = True
break
models.append({
"id": model_id,
"path": model_path,
"size": format_size(size),
"size_bytes": size,
"file_count": files,
"uploaded": uploaded,
"status": "已上传" if uploaded else "已下载"
})
except Exception as e:
print(f"扫描失败: {e}")
return models
def create_download_task(model_id, priority=0):
task_id = f"dl_{int(time.time()*1000)}_{hash(model_id) % 10000}"
with global_state.operation_lock:
global_state.state["download_tasks"][task_id] = {
"task_id": task_id,
"model_id": model_id,
"priority": priority,
"status": "pending",
"progress": 0,
"total_files": 0,
"completed_files": 0,
"estimated_size": estimate_model_size(model_id),
"message": "等待下载...",
"retry_count": 0,
"auto_upload": global_state.config.get("auto_upload_after_download", False),
"auto_delete": global_state.config.get("auto_delete_after_upload", False),
"start_time": None,
"end_time": None
}
global_state.save_state()
global_state.download_queue.put((-priority, task_id))
# 如果只有一个任务且没有正在下载的任务,立即开始下载
with global_state.operation_lock:
if len(global_state.state["download_tasks"]) == 1 and len(global_state.active_downloads) == 0:
# 立即处理队列中的任务
pass # 工作线程会自动处理
return task_id
def create_upload_task(model_id, local_path):
task_id = f"ul_{int(time.time()*1000)}_{hash(model_id) % 10000}"
with global_state.operation_lock:
global_state.state["upload_tasks"][task_id] = {
"task_id": task_id,
"model_id": model_id,
"local_path": local_path,
"status": "pending",
"progress": 0,
"total_files": 0,
"completed_files": 0,
"message": "等待上传...",
"auto_delete": global_state.config.get("auto_delete_after_upload", False),
"start_time": None,
"end_time": None
}
global_state.save_state()
global_state.upload_queue.put(task_id)
return task_id
def download_worker():
while True:
try:
neg_priority, task_id = global_state.download_queue.get(timeout=1)
with global_state.operation_lock:
if task_id not in global_state.state["download_tasks"]:
continue
task = global_state.state["download_tasks"][task_id]
model_id = task["model_id"]
# 检查是否有相同模型的任务正在下载
model_in_progress = False
for active_id, active_model in global_state.active_downloads.items():
if active_model == model_id:
model_in_progress = True
break
if model_in_progress:
global_state.download_queue.put((neg_priority, task_id))
continue
# 检查任务当前状态 - 只有pending状态的任务才会被执行
if task.get("status") != "pending":
# 如果是paused状态,不放入队列,保持暂停
if task.get("status") == "paused":
continue
# 其他状态(如completed, failed)不处理
continue
global_state.active_downloads[task_id] = model_id
task["status"] = "downloading"
task["start_time"] = time.time()
task["message"] = "开始下载..."
global_state.save_state()
max_retries = 10
retry = task.get("retry_count", 0)
try:
# 在调用snapshot_download之前再次检查任务状态
with global_state.operation_lock:
if task_id not in global_state.state["download_tasks"]:
continue
task = global_state.state["download_tasks"][task_id]
if task.get("status") != "downloading":
# 如果任务已经不是下载状态,将其放回队列
global_state.download_queue.put((neg_priority, task_id))
continue
# 执行下载
success = download_model_impl(task_id, model_id)
# 如果返回False,表示任务被暂停或删除,不继续处理
if not success:
with global_state.operation_lock:
if task_id in global_state.state["download_tasks"]:
task = global_state.state["download_tasks"][task_id]
if task.get("status") == "paused":
print(f"任务 {task_id} 已暂停,不继续处理")
continue # 跳过后续的成功处理逻辑
with global_state.operation_lock:
task = global_state.state["download_tasks"].get(task_id)
if task and task["status"] == "completed":
if task.get("auto_upload"):
local_path = os.path.join(
global_state.config.get("local", {}).get("default_model_path", ""),
model_id.replace("/", os.path.sep)
)
if os.path.exists(local_path):
create_upload_task(model_id, local_path)
except Exception as e:
print(f"下载线程异常: {e}")
with global_state.operation_lock:
if task_id in global_state.state["download_tasks"]:
task = global_state.state["download_tasks"][task_id]
# 检查任务是否已暂停,如果是,跳过重试逻辑
if task.get("status") == "paused":
print(f"任务 {task_id} 已暂停,跳过重试")
continue # 跳过重试逻辑
retry += 1
task["retry_count"] = retry
task["message"] = f"下载失败 (尝试 {retry}/{max_retries}): {str(e)[:50]}"
global_state.save_state()
finally:
# 无论下载成功、失败还是被中断,都从active_downloads中移除任务
with global_state.operation_lock:
if task_id in global_state.active_downloads:
del global_state.active_downloads[task_id]
# 检查任务是否被暂停,如果是,将其放回队列
if task_id in global_state.state["download_tasks"]:
task = global_state.state["download_tasks"][task_id]
if task.get("status") == "paused":
task["message"] = "任务已暂停"
global_state.save_state()
global_state.download_queue.put((neg_priority, task_id))
if retry >= max_retries:
with global_state.operation_lock:
if task_id in global_state.state["download_tasks"]:
task = global_state.state["download_tasks"][task_id]
task["status"] = "failed"
task["end_time"] = time.time()
task["message"] = f"下载失败: 已达到最大重试次数"
global_state.save_state()
continue
time.sleep(5)
global_state.download_queue.task_done()
except queue.Empty:
continue
except Exception as e:
print(f"下载线程异常: {e}")
time.sleep(1)
def download_process_impl(task_id, model_id, local_dir):
"""在子进程中执行下载的函数"""
print(f"download process start: {model_id}")
try:
local_path = os.path.join(local_dir, model_id.replace("/", os.path.sep))
os.makedirs(os.path.dirname(local_path), exist_ok=True)
# 实际下载
snapshot_download(
model_id=model_id,
cache_dir=local_dir,
revision="master"
)
print(f"download process success: {model_id} -> {local_dir}")
if os.path.exists(local_path):
file_count = get_file_count(local_path)
size = get_dir_size(local_path)
return {
"success": True,
"file_count": file_count,
"size": size,
"size_str": format_size(size)
}
else:
raise FileNotFoundError(f"下载后未找到: {local_path}")
except Exception as e:
print(f"download process failed: {e}")
return {
"success": False,
"error": str(e)
}
def download_model_impl(task_id, model_id):
"""管理下载进程"""
local_dir = global_state.config.get("local", {}).get("default_model_path", "")
if not local_dir:
raise ValueError("未设置本地目录")
with global_state.operation_lock:
if task_id in global_state.state["download_tasks"]:
task = global_state.state["download_tasks"][task_id]
task["message"] = "正在下载..."
global_state.save_state()
if not HAS_MODELSCOPE:
raise ImportError("modelscope未安装")
# 创建并启动下载进程
process = multiprocessing.Process(
target=download_process_impl,
args=(task_id, model_id, local_dir),
name=f"download_process_{task_id}"
)
with global_state.operation_lock:
global_state.download_processes[task_id] = process
process.start()
# 等待进程完成或任务被暂停
while process.is_alive():
time.sleep(1) # 每秒检查一次
with global_state.operation_lock:
if task_id not in global_state.state["download_tasks"]:
# 任务已被删除
process.terminate()
process.join(timeout=5)
if process.is_alive():
process.kill()
return False
task = global_state.state["download_tasks"][task_id]
if task.get("status") == "paused":
# 任务已被暂停
process.terminate()
process.join(timeout=5)
if process.is_alive():
process.kill()
return False
# 进程完成,获取结果
with global_state.operation_lock:
if task_id in global_state.download_processes:
del global_state.download_processes[task_id]
if process.exitcode == 0:
# 下载成功
local_path = os.path.join(local_dir, model_id.replace("/", os.path.sep))
if os.path.exists(local_path):
file_count = get_file_count(local_path)
size = get_dir_size(local_path)
with global_state.operation_lock:
if task_id in global_state.state["download_tasks"]:
task = global_state.state["download_tasks"][task_id]
task["status"] = "completed"
task["progress"] = 100
task["total_files"] = file_count
task["completed_files"] = file_count
task["message"] = f"下载完成: {file_count}文件, {format_size(size)}"
task["end_time"] = time.time()
global_state.save_state()
return True
else:
raise FileNotFoundError(f"下载后未找到: {local_path}")
else:
# 下载失败
raise RuntimeError(f"下载进程退出码: {process.exitcode}")
def upload_worker():
while True:
try:
task_id = global_state.upload_queue.get(timeout=1)
with global_state.operation_lock:
if task_id not in global_state.state["upload_tasks"]:
continue
task = global_state.state["upload_tasks"][task_id]
model_id = task["model_id"]
local_path = task["local_path"]
for active_id in global_state.active_uploads:
if global_state.active_uploads[active_id] == model_id:
global_state.upload_queue.put(task_id)
continue
global_state.active_uploads[task_id] = model_id
task["status"] = "uploading"
task["start_time"] = time.time()
task["message"] = "开始上传..."
global_state.save_state()
try:
# 检查任务是否被暂停
with global_state.operation_lock:
if task_id not in global_state.state["upload_tasks"]:
continue
task = global_state.state["upload_tasks"][task_id]
if task.get("status") == "paused":
task["message"] = "任务已暂停"
global_state.save_state()
# 将任务放回队列,以便后续可以继续执行
global_state.upload_queue.put(task_id)
continue
upload_model_impl(task_id, model_id, local_path)
with global_state.operation_lock:
task = global_state.state["upload_tasks"].get(task_id)
if task and task["status"] == "completed" and task.get("auto_delete"):
try:
if os.path.exists(local_path):
shutil.rmtree(local_path)
task["message"] += " (已自动删除本地)"
except Exception as e:
print(f"自动删除失败: {e}")
except Exception as e:
with global_state.operation_lock:
if task_id in global_state.state["upload_tasks"]:
task = global_state.state["upload_tasks"][task_id]
task["status"] = "failed"
task["end_time"] = time.time()
task["message"] = f"上传失败: {str(e)[:50]}"
global_state.save_state()
with global_state.operation_lock:
if task_id in global_state.active_uploads:
del global_state.active_uploads[task_id]
global_state.upload_queue.task_done()
except queue.Empty:
continue
except Exception as e:
print(f"上传线程异常: {e}")
time.sleep(1)
def upload_model_impl(task_id, model_id, local_path):
if not HAS_PYCSGHUB:
raise ImportError("pycsghub未安装")
csghub_config = global_state.config.get("csghub", {})
if not csghub_config.get("base_url") or not csghub_config.get("token"):
raise ValueError("CSGHUB配置不完整")
with global_state.operation_lock:
if task_id in global_state.state["upload_tasks"]:
task = global_state.state["upload_tasks"][task_id]
task["message"] = "正在连接..."
global_state.save_state()
csg_api = CsgHubApi()
repo_id = f"root/{model_id.split('/')[1]}"
create_repo(
api=csg_api,
repo_id=repo_id,
repo_type=csghub_config["repo_type"],
revision=csghub_config["revision"],
endpoint=csghub_config["base_url"],
token=csghub_config["token"]
)
file_count = get_file_count(local_path)
with global_state.operation_lock:
if task_id in global_state.state["upload_tasks"]:
task = global_state.state["upload_tasks"][task_id]
task["total_files"] = file_count
task["message"] = f"上传中: 0/{file_count}"
global_state.save_state()
upload_large_folder_internal(
repo_id=repo_id,
local_path=local_path,
repo_type=csghub_config["repo_type"],
revision=csghub_config["revision"],
endpoint=csghub_config["base_url"],
token=csghub_config["token"],
num_workers=1,
print_report=False,
allow_patterns=None,
ignore_patterns=None,
print_report_every=1,
)
with global_state.operation_lock:
if task_id in global_state.state["upload_tasks"]:
task = global_state.state["upload_tasks"][task_id]
task["status"] = "completed"
task["progress"] = 100
task["completed_files"] = file_count
task["message"] = f"上传完成: {file_count}文件"
task["end_time"] = time.time()
global_state.save_state()
def get_downloads_data():
with global_state.operation_lock:
tasks = list(global_state.state["download_tasks"].values())
if not tasks:
return []
tasks.sort(key=lambda x: (
-x.get("priority", 0),
0 if x["status"] == "downloading" else 1,
x.get("start_time") or 0
))
data = []
for t in tasks:
total_files = t.get("total_files", 0)
completed_files = t.get("completed_files", 0)
files_info = f"{completed_files}/{total_files}" if total_files > 0 else "0/0"
data.append([
t.get("task_id", ""),
t.get("model_id", ""),
t.get("status", ""),
t.get("progress", 0),
t.get("estimated_size", "未知"),
files_info,
t.get("message", "")
])
return data
def get_uploads_data():
with global_state.operation_lock:
tasks = list(global_state.state["upload_tasks"].values())
if not tasks:
return []
data = []
for t in tasks:
data.append([
t.get("model_id", ""),
t.get("status", ""),
t.get("progress", 0),
t.get("completed_files", 0),
t.get("total_files", 0),
t.get("message", "")
])
return data
def get_local_models_data():
models = scan_local_models()
if not models:
return []
models.sort(key=lambda x: (0 if x["uploaded"] else 1, -x["size_bytes"]))
data = []
for m in models:
data.append([
m.get("id", ""),
m.get("status", ""),
m.get("size", ""),
m.get("file_count", 0),
"是" if m.get("uploaded") else "否"
])
return data
# 创建综合任务表格数据
def get_combined_tasks_data():
# 获取所有数据
downloads = get_downloads_data()
uploads = get_uploads_data()
local_models = get_local_models_data()
# 创建综合数据列表
combined = []
# 添加下载任务
for task in downloads:
combined.append({
"type": "download",
"id": task[0], # 任务ID
"model_id": task[1],
"status": task[2],
"progress": task[3],
"size": task[4],
"file_count": task[5].split("/")[-1] if task[5] else "0",
"message": task[6]
})
# 添加上传任务
for task in uploads:
combined.append({
"type": "upload",
"id": task[0], # 模型ID
"model_id": task[0],
"status": task[1],
"progress": task[2],
"size": "",
"file_count": f"{task[3]}/{task[4]}",
"message": task[5]
})
# 添加本地模型
for model in local_models:
combined.append({
"type": "local",
"id": model[0], # 模型ID
"model_id": model[0],
"status": model[1],
"progress": "",
"size": model[2],
"file_count": model[3],
"message": "已上传" if model[4] == "是" else "未上传"
})
# 转换为表格数据格式
data = []
for item in combined:
data.append([
item["model_id"],
item["type"], # 任务类型
item["status"],
item["progress"],
item["size"],
item["file_count"],
item["message"]
])
return data
def refresh_all():
return (
get_downloads_data(),
get_combined_tasks_data()
)
def start_workers():
for i in range(global_state.config.get("download", {}).get("max_concurrent", 1)):
t = threading.Thread(target=download_worker, daemon=True, name=f"dl_worker_{i}")
t.start()
for i in range(global_state.config.get("upload", {}).get("num_workers", 1)):
t = threading.Thread(target=upload_worker, daemon=True, name=f"ul_worker_{i}")
t.start()
def state_cleaner():
while True:
time.sleep(60)
with global_state.operation_lock:
for task_type, tasks in [("download", global_state.state["download_tasks"]),
("upload", global_state.state["upload_tasks"])]:
to_remove = []
for tid, task in tasks.items():
if task.get("status") in ["completed", "failed", "cancelled"]:
if time.time() - task.get("end_time", 0) > 300:
to_remove.append(tid)
for tid in to_remove:
del tasks[tid]
global_state.save_state()
t = threading.Thread(target=state_cleaner, daemon=True, name="state_cleaner")
t.start()
def create_interface():
# 创建主题切换器
with gr.Blocks(title="模型下载管理器", theme=gr.themes.Base()) as app:
gr.Markdown("# 模型下载管理器")
# 创建左侧按钮组和主内容区域
with gr.Row():
# 左侧按钮列
with gr.Column(scale=1, min_width=150):
config_btn = gr.Button("配置", variant="primary", size="lg")
download_btn = gr.Button("下载", variant="secondary", size="lg")
manage_btn = gr.Button("管理", variant="secondary", size="lg")
# 右侧主内容区域
with gr.Column(scale=4):
# 创建状态变量来跟踪当前选中的标签
current_tab = gr.State("config_tab")
# 根据当前标签显示不同内容
with gr.Column(visible=True) as config_tab_content:
gr.Markdown("## 本地路径配置")
local_dir_input = gr.Textbox(
label="本地模型目录",
value=global_state.config.get("local", {}).get("default_model_path", ""),
placeholder="请输入本地模型存储目录"
)
set_dir_btn = gr.Button("保存目录", variant="primary")
config_result = gr.Textbox(label="配置结果", interactive=False)
gr.Markdown("## 高级配置")
auto_upload_cb = gr.Checkbox(
label="下载后自动上传",
value=global_state.config.get("auto_upload_after_download", False)
)
auto_delete_cb = gr.Checkbox(
label="上传后自动删除",
value=global_state.config.get("auto_delete_after_upload", False)
)
update_config_btn = gr.Button("更新高级配置")
with gr.Column(visible=False) as download_tab_content:
gr.Markdown("## 下载模型")
model_id_input = gr.Textbox(
label="模型ID",
placeholder="例如: Qwen/Qwen2.5-7B-Instruct"
)
add_dl_btn = gr.Button("添加下载任务", variant="primary")
gr.Markdown("## 下载任务列表")
selected_task = gr.Textbox(label="选中的任务ID", interactive=False)
task_result = gr.Textbox(label="操作结果", interactive=False)
with gr.Row():
refresh_btn = gr.Button("刷新任务列表")
pause_btn = gr.Button("暂停任务")
resume_btn = gr.Button("恢复任务")
delete_task_btn = gr.Button("删除任务")
with gr.Row():
move_top_btn = gr.Button("任务置顶")
move_up_btn = gr.Button("任务上移")
move_down_btn = gr.Button("任务下移")
downloads_table = gr.Dataframe(
headers=["任务ID", "模型ID", "状态", "进度", "预估大小", "文件数", "消息"],
datatype=["str", "str", "str", "number", "str", "number", "str"],
value=get_downloads_data(),
interactive=False
)
with gr.Column(visible=False) as manage_tab_content:
# 顶部扫描模型按钮
gr.Markdown("## 模型任务管理")
scan_btn = gr.Button("扫描模型", variant="primary", size="lg")
scan_result = gr.Textbox(label="扫描结果", interactive=False, container=False)
# 综合任务管理表格
combined_tasks_table = gr.Dataframe(
headers=["模型ID", "任务类型", "状态", "进度", "大小", "文件数", "消息"],
datatype=["str", "str", "str", "str", "str", "str", "str"],
value=get_combined_tasks_data(),
interactive=False
)
# 表格操作按钮
with gr.Row():
selected_model_id = gr.Textbox(label="选中的模型ID", interactive=False)
# 综合任务表格选择
def select_combined_task(evt: gr.SelectData):
try:
# 获取当前显示的所有任务数据
tasks = get_combined_tasks_data()
# 处理不同格式的索引
row_idx = None
if isinstance(evt.index, (list, tuple)) and len(evt.index) >= 1:
row_idx = evt.index[0]
elif isinstance(evt.index, int):
row_idx = evt.index
# 获取选中行的模型ID
if isinstance(row_idx, int) and 0 <= row_idx < len(tasks):
return tasks[row_idx][0] # 返回模型ID
except Exception as e:
print(f"选择任务时出错: {e}")
return ""
combined_tasks_table.select(select_combined_task, outputs=[selected_model_id])
with gr.Row():
upload_task_btn = gr.Button("添加上传任务", variant="primary")
delete_model_btn = gr.Button("删除模型", variant="secondary")
upload_result = gr.Textbox(label="操作结果", interactive=False, container=False)
# 批量操作按钮
with gr.Row():
delete_failed_tasks_btn = gr.Button("删除失败任务", variant="secondary")
delete_completed_tasks_btn = gr.Button("删除已完成任务", variant="secondary")
delete_tasks_result = gr.Textbox(label="批量操作结果", interactive=False, container=False)
# 配置保存
def save_config_dir(d):
if d:
global_state.config.get("local", {}).update({"default_model_path": d})
global_state.save_config()
return "目录已设置"
return "目录无效"
set_dir_btn.click(
fn=save_config_dir,
inputs=[local_dir_input],
outputs=[config_result]
).then(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
# 高级配置
update_config_btn.click(
fn=lambda au, ad: (global_state.config.update({
"auto_upload_after_download": au,
"auto_delete_after_upload": ad
}), global_state.save_config(), "配置已更新"),
inputs=[auto_upload_cb, auto_delete_cb],
outputs=[config_result]
)
# 添加下载任务
def add_download(mid, au, ad):
local_dir = global_state.config.get("local", {}).get("default_model_path", "")
if not local_dir:
return "请先设置本地目录"
if mid:
global_state.config.update({
"auto_upload_after_download": au,
"auto_delete_after_upload": ad
})
global_state.save_config()
# 获取远程文件数量
remote_file_count = get_remote_file_count(mid)
# 默认优先级为0
task_id = create_download_task(mid, 0)
# 更新任务的总文件数
with global_state.operation_lock:
if task_id in global_state.state["download_tasks"]:
task = global_state.state["download_tasks"][task_id]
task["total_files"] = remote_file_count
global_state.save_state()
return "任务已添加"
add_dl_btn.click(
fn=add_download,
inputs=[model_id_input, auto_upload_cb, auto_delete_cb],
outputs=None
).then(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
# 添加上传任务
def add_upload(mid):
local_dir = global_state.config.get("local", {}).get("default_model_path", "")
if not local_dir:
return "请先设置本地目录"
if mid:
create_upload_task(mid, os.path.join(
local_dir,
mid.replace("/", os.path.sep)
))
return "任务已添加"
return "请先选择一个模型"
upload_task_btn.click(
fn=add_upload,
inputs=[selected_model_id],
outputs=[upload_result]
).then(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
# 删除模型
def delete_model(mid):
local_dir = global_state.config.get("local", {}).get("default_model_path", "")
if not local_dir:
return "请先设置本地目录"
if mid:
model_path = os.path.join(local_dir, mid.replace("/", os.path.sep))
if os.path.exists(model_path):
shutil.rmtree(model_path)
return "已删除"
return "模型不存在"
return "请先选择一个模型"
delete_model_btn.click(
fn=delete_model,
inputs=[selected_model_id],
outputs=[upload_result]
).then(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
# 暂停任务
def pause_download(task_id):
if not task_id:
return "请先选择一个任务"
with global_state.operation_lock:
if task_id in global_state.state["download_tasks"]:
task = global_state.state["download_tasks"][task_id]
current_status = task.get("status")
if current_status == "downloading":
# 更新任务状态为暂停
task["status"] = "paused"
task["message"] = "任务已暂停"
# 从active_downloads中移除
if task_id in global_state.active_downloads:
del global_state.active_downloads[task_id]
# 保存状态,download_model_impl会检测到暂停状态并终止进程
global_state.save_state()
return f"任务已暂停: {task_id}"
elif current_status == "pending":
# 对于等待中的任务,直接标记为暂停
task["status"] = "paused"
task["message"] = "任务已暂停"
global_state.save_state()
return f"任务已暂停: {task_id}"
else:
return f"任务当前状态: {current_status},无法暂停"
return "未找到该任务或任务不在可暂停状态"
# 恢复任务
def resume_download(task_id):
with global_state.operation_lock:
if task_id in global_state.state["download_tasks"]:
task = global_state.state["download_tasks"][task_id]
if task.get("status") == "paused":
task["status"] = "pending"
task["message"] = "等待下载..."
# 将任务放回队列
global_state.download_queue.put((-task.get("priority", 0), task_id))
global_state.save_state()
return f"任务已恢复: {task_id}"
return "未找到该任务或任务不在暂停状态"
pause_btn.click(
fn=pause_download,
inputs=[selected_task],
outputs=[task_result]
).then(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
# 恢复任务按钮
resume_btn.click(
fn=resume_download,
inputs=[selected_task],
outputs=[task_result]
).then(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
# 添加表格行点击选择功能
def select_task(evt: gr.SelectData):
# 简化的任务选择函数
try:
# 直接从evt.value获取行数据(如果是整行选择)
if isinstance(evt.value, list) and len(evt.value) > 0:
# 如果evt.value是列表,第一个元素就是任务ID
task_id = evt.value[0]
return task_id, f"已选中任务: {task_id}"
# 如果是单元格选择,使用索引获取行数据
row_idx = 0
if isinstance(evt.index, tuple):
row_idx = evt.index[0]
elif isinstance(evt.index, list):
row_idx = evt.index[0]
else:
row_idx = evt.index
downloads = get_downloads_data()
if 0 <= row_idx < len(downloads):
row_data = downloads[row_idx]
if isinstance(row_data, list) and len(row_data) > 0:
task_id = row_data[0]
return task_id, f"已选中任务: {task_id}"
except Exception as e:
print(f"选择任务错误: {str(e)}")
return "", "未选中任何任务"
downloads_table.select(
fn=select_task,
inputs=[],
outputs=[selected_task, task_result]
)
# 根据任务ID删除任务
def delete_download_task(task_id):
with global_state.operation_lock:
if task_id in global_state.state["download_tasks"]:
# 如果任务正在下载,先从active_downloads中移除
if task_id in global_state.active_downloads:
del global_state.active_downloads[task_id]
# 终止相关的下载进程
if task_id in global_state.download_processes:
process = global_state.download_processes[task_id]
process.terminate()
try:
process.join(timeout=3)
if process.is_alive():
process.kill()
except Exception:
pass
finally:
# 从下载进程字典中移除
del global_state.download_processes[task_id]
# 删除任务记录
del global_state.state["download_tasks"][task_id]
global_state.save_state()
return f"已删除任务: {task_id}"
return "未找到该任务"
# 任务置顶
def move_task_top(task_id):
with global_state.operation_lock:
if task_id not in global_state.state["download_tasks"]:
return "未找到该任务"
# 找到当前最高优先级
max_priority = max((task.get("priority", 0) for task in global_state.state["download_tasks"].values()), default=0)
# 设置当前任务为最高优先级+1
task = global_state.state["download_tasks"][task_id]
old_priority = task.get("priority", 0)
task["priority"] = max_priority + 1
# 如果任务正在下载,将其暂停
if task.get("status") == "downloading":
task["status"] = "paused"
task["message"] = "任务已暂停"
# 从active_downloads中移除
if task_id in global_state.active_downloads:
del global_state.active_downloads[task_id]
# 终止正在运行的下载进程
if task_id in global_state.download_processes:
process = global_state.download_processes[task_id]
process.terminate()
try:
process.join(timeout=3)
if process.is_alive():
process.kill()
except Exception:
pass
finally:
del global_state.download_processes[task_id]
global_state.save_state()
return f"任务 {task_id} 已置顶"
# 任务上移
def move_task_up(task_id):
with global_state.operation_lock:
if task_id not in global_state.state["download_tasks"]:
return "未找到该任务"
# 获取所有任务并按优先级排序(降序)
tasks = sorted(global_state.state["download_tasks"].values(),
key=lambda x: (-x.get("priority", 0), x.get("start_time", 0)))
# 找到当前任务的索引
current_index = -1
for i, task in enumerate(tasks):
if task["task_id"] == task_id:
current_index = i
break
if current_index <= 0:
return "任务已经在最顶部"
# 与前一个任务交换优先级
current_task = tasks[current_index]
prev_task = tasks[current_index - 1]
# 交换优先级
current_priority = current_task.get("priority", 0)
prev_priority = prev_task.get("priority", 0)
current_task["priority"] = prev_priority
prev_task["priority"] = current_priority
# 如果当前任务正在下载,且前面的任务处于pending状态,将当前任务暂停
if current_task.get("status") == "downloading" and prev_task.get("status") == "pending":
current_task["status"] = "paused"
current_task["message"] = "任务已暂停"
# 从active_downloads中移除
if task_id in global_state.active_downloads:
del global_state.active_downloads[task_id]
# 终止正在运行的下载进程
if task_id in global_state.download_processes:
process = global_state.download_processes[task_id]
process.terminate()
try:
process.join(timeout=3)
if process.is_alive():
process.kill()
except Exception:
pass
finally:
del global_state.download_processes[task_id]
global_state.save_state()
return f"任务 {task_id} 已上移"
# 任务下移
def move_task_down(task_id):
with global_state.operation_lock:
if task_id not in global_state.state["download_tasks"]:
return "未找到该任务"
# 获取所有任务并按优先级排序(降序)
tasks = sorted(global_state.state["download_tasks"].values(),
key=lambda x: (-x.get("priority", 0), x.get("start_time", 0)))
# 找到当前任务的索引
current_index = -1
for i, task in enumerate(tasks):
if task["task_id"] == task_id:
current_index = i
break
if current_index >= len(tasks) - 1:
return "任务已经在最底部"
# 与后一个任务交换优先级
current_task = tasks[current_index]
next_task = tasks[current_index + 1]
# 交换优先级
current_priority = current_task.get("priority", 0)
next_priority = next_task.get("priority", 0)
current_task["priority"] = next_priority
next_task["priority"] = current_priority
global_state.save_state()
return f"任务 {task_id} 已下移"
delete_task_btn.click(
fn=delete_download_task,
inputs=[selected_task],
outputs=[task_result]
).then(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
# 任务置顶、上移、下移按钮
move_top_btn.click(
fn=move_task_top,
inputs=[selected_task],
outputs=[task_result]
).then(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
move_up_btn.click(
fn=move_task_up,
inputs=[selected_task],
outputs=[task_result]
).then(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
move_down_btn.click(
fn=move_task_down,
inputs=[selected_task],
outputs=[task_result]
).then(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
# 刷新
refresh_btn.click(
fn=refresh_all,
outputs=[downloads_table, combined_tasks_table]
)
# 扫描模型
def scan_models():
local_dir = global_state.config.get("local", {}).get("default_model_path", "")
if not local_dir:
return "请先设置本地目录", [], []
models = scan_local_models()
return f"已扫描到 {len(models)} 个模型", get_downloads_data(), get_combined_tasks_data()
scan_btn.click(
fn=scan_models,
outputs=[scan_result, downloads_table, combined_tasks_table]
)
# 删除失败任务
def delete_failed_tasks():
with global_state.operation_lock:
# 删除失败的下载任务
dl_to_delete = []
for task_id, task in global_state.state["download_tasks"].items():
if task.get("status") == "failed":
dl_to_delete.append(task_id)
for task_id in dl_to_delete:
del global_state.state["download_tasks"][task_id]
# 删除失败的上传任务
ul_to_delete = []
for task_id, task in global_state.state["upload_tasks"].items():
if task.get("status") == "failed":
ul_to_delete.append(task_id)
for task_id in ul_to_delete:
del global_state.state["upload_tasks"][task_id]
global_state.save_state()
return f"已删除 {len(dl_to_delete)} 个失败的下载任务和 {len(ul_to_delete)} 个失败的上传任务"
delete_failed_tasks_btn.click(
fn=delete_failed_tasks,
outputs=[delete_tasks_result]
).then(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
# 删除已完成任务
def delete_completed_tasks():
with global_state.operation_lock:
# 删除已完成的下载任务
dl_to_delete = []
for task_id, task in global_state.state["download_tasks"].items():
if task.get("status") == "completed":
dl_to_delete.append(task_id)
for task_id in dl_to_delete:
del global_state.state["download_tasks"][task_id]
# 删除已完成的上传任务
ul_to_delete = []
for task_id, task in global_state.state["upload_tasks"].items():
if task.get("status") == "completed":
ul_to_delete.append(task_id)
for task_id in ul_to_delete:
del global_state.state["upload_tasks"][task_id]
global_state.save_state()
return f"已删除 {len(dl_to_delete)} 个已完成的下载任务和 {len(ul_to_delete)} 个已完成的上传任务"
delete_completed_tasks_btn.click(
fn=delete_completed_tasks,
outputs=[delete_tasks_result]
).then(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
# 切换标签页的函数
def switch_to_config_tab():
return (
"config_tab",
gr.Column(visible=True), # config_tab_content
gr.Column(visible=False), # download_tab_content
gr.Column(visible=False), # manage_tab_content
gr.Button(variant="primary"), # config_btn
gr.Button(variant="secondary"), # download_btn
gr.Button(variant="secondary") # manage_btn
)
def switch_to_download_tab():
return (
"download_tab",
gr.Column(visible=False), # config_tab_content
gr.Column(visible=True), # download_tab_content
gr.Column(visible=False), # manage_tab_content
gr.Button(variant="secondary"), # config_btn
gr.Button(variant="primary"), # download_btn
gr.Button(variant="secondary") # manage_btn
)
def switch_to_manage_tab():
return (
"manage_tab",
gr.Column(visible=False), # config_tab_content
gr.Column(visible=False), # download_tab_content
gr.Column(visible=True), # manage_tab_content
gr.Button(variant="secondary"), # config_btn
gr.Button(variant="secondary"), # download_btn
gr.Button(variant="primary") # manage_btn
)
# 左侧按钮点击事件
config_btn.click(
fn=switch_to_config_tab,
outputs=[current_tab, config_tab_content, download_tab_content, manage_tab_content, config_btn, download_btn, manage_btn]
)
download_btn.click(
fn=switch_to_download_tab,
outputs=[current_tab, config_tab_content, download_tab_content, manage_tab_content, config_btn, download_btn, manage_btn]
)
manage_btn.click(
fn=switch_to_manage_tab,
outputs=[current_tab, config_tab_content, download_tab_content, manage_tab_content, config_btn, download_btn, manage_btn]
)
# 移除了主题切换功能,因为在Gradio 6.0中不支持将Blocks作为输出组件
# 初始化完成
# 应用加载时刷新数据
app.load(fn=refresh_all, outputs=[downloads_table, combined_tasks_table])
return app
def main():
print("=" * 50)
print("模型下载管理器")
print("=" * 50)
print(f"本地目录: {global_state.config.get('local', {}).get('default_model_path', '未设置')}")
print(f"CSGHUB地址: {global_state.config.get('csghub', {}).get('base_url', '未设置')}")
print(f"modelscope: {'已安装' if HAS_MODELSCOPE else '未安装'}")
print(f"pycsghub: {'已安装' if HAS_PYCSGHUB else '未安装'}")
start_workers()
app = create_interface()
app.launch(
server_name="0.0.0.0",
server_port=7865,
share=False
)
if __name__ == "__main__":
main()
{
"local_dir": "/data/DataStore/models/exp-net/new-moon/models",
"max_concurrent_downloads": 1,
"max_concurrent_uploads": 1,
"auto_upload_after_download": false,
"auto_delete_after_upload": false,
"auto_delete_after_download": false
}
\ No newline at end of file
{
"download_tasks": {
"dl_1776058653752_62": {
"task_id": "dl_1776058653752_62",
"model_id": "ZhipuAI/GLM-5.1",
"priority": 11,
"status": "paused",
"progress": 0,
"total_files": 283,
"completed_files": 0,
"estimated_size": "1404.21 GB",
"message": "任务已暂停",
"retry_count": 0,
"auto_upload": false,
"auto_delete": false,
"start_time": 1776063865.4479935,
"end_time": null
},
"dl_1776060948371_3712": {
"task_id": "dl_1776060948371_3712",
"model_id": "MiniMax/MiniMax-M2.7",
"priority": 12,
"status": "paused",
"progress": 0,
"total_files": 126,
"completed_files": 0,
"estimated_size": "214.36 GB",
"message": "任务已暂停",
"retry_count": 0,
"auto_upload": false,
"auto_delete": false,
"start_time": 1776065919.5642004,
"end_time": null
},
"dl_1776066712087_6690": {
"task_id": "dl_1776066712087_6690",
"model_id": "FunAudioLLM/Fun-CineForge",
"priority": 0,
"status": "paused",
"progress": 0,
"total_files": 1,
"completed_files": 0,
"estimated_size": "12.68 GB",
"message": "任务已暂停",
"retry_count": 0,
"auto_upload": false,
"auto_delete": false,
"start_time": 1776066712.2827172,
"end_time": null
}
},
"upload_tasks": {},
"local_models": [],
"remote_cache": {
"file_count_Tencent-Hunyuan/HY-Embodied-0.5": {
"count": 3,
"ts": 1776001069.9385698
},
"size_Tencent-Hunyuan/HY-Embodied-0.5": {
"size": "7.06 GB",
"ts": 1776001070.119618
},
"file_count_ZhipuAI/GLM-5.1": {
"count": 283,
"ts": 1776056541.5302312
},
"size_ZhipuAI/GLM-5.1": {
"size": "1404.21 GB",
"ts": 1776056541.7415226
},
"file_count_ZhipuAI/GLM-5.1-FP8": {
"count": 143,
"ts": 1776043182.2655714
},
"size_ZhipuAI/GLM-5.1-FP8": {
"size": "704.27 GB",
"ts": 1776043185.9523427
},
"file_count_MiniMax/MiniMax-M2.7": {
"count": 126,
"ts": 1776060948.3704467
},
"size_MiniMax/MiniMax-M2.7": {
"size": "214.36 GB",
"ts": 1776060948.5605428
},
"file_count_FunAudioLLM/Fun-CineForge": {
"count": 1,
"ts": 1776066712.0869305
},
"size_FunAudioLLM/Fun-CineForge": {
"size": "12.68 GB",
"ts": 1776066712.2810178
},
"file_count_Qwen/Qwen3.5-0.8B": {
"count": 2,
"ts": 1776066996.7542646
},
"size_Qwen/Qwen3.5-0.8B": {
"size": "1.65 GB",
"ts": 1776067001.8658347
}
}
}
\ No newline at end of file
{
"AigcAttributes": "{}",
"AigcIsTop": 0,
"AigcType": "",
"AlreadyStar": False,
"ApplyMeta": "{}",
"ApprovalMode": 1,
"ApprovalNotifyEmail": "",
"Architectures": [],
"Avatar": "https://img.alicdn.com/imgextra/i1/O1CN01yhHrHg1Pdl3UKPhGc_!!6000000001864-2-tps-88-88.png",
"Backbone": [],
"BackendSupport": {
"architectures": None,
"backend_info": {
"deploy_task": None,
"lmdeploy": None,
"lmdeploy_turbomind": None,
"ollama": None,
"sglang": None,
"vllm": None
},
"model_id": "OpenBMB/VoxCPM2"
},
"BaseModel": [],
"BaseModelRelation": "",
"CardReady": 0,
"CardUnreadyReason": "",
"CertificationCreateBy": "",
"CertificationCreatedTime": -62135596800,
"ChineseName": "VoxCPM2",
"CoverImages": [],
"CreatedBy": "LabmemZhouyx",
"CreatedTime": 1775194576,
"DashSdkParameter": "",
"Datasets": {},
"DemoAvailable": 0,
"DemoUnavailableReason": "",
"Description": "",
"Domain": [],
"Downloads": 5067,
"ExampleCodeAvailable": 0,
"ExampleCodeUnavailableReason": "",
"ForbiddenVisibilityUpdate": False,
"Frameworks": [],
"FromSite": "maas",
"Id": 1125292,
"Integrating": 0,
"IntegrationFailureLog": "",
"IntegrationFailureReason": "",
"IsAccessible": 1,
"IsCertification": 4,
"IsHot": 0,
"IsNewModel": True,
"IsOnline": 1,
"IsPreTrain": 0,
"IsPublished": 1,
"IsTop": 41,
"Language": [
"zh",
"en",
"ar",
"my",
"da",
"nl",
"fi",
"fr",
"de",
"el",
"he",
"hi",
"id",
"it",
"ja",
"km",
"ko",
"lo",
"ms",
"no",
"pl",
"pt",
"ru",
"es",
"sw",
"sv",
"tl",
"th",
"tr",
"vi"
],
"LastUpdatedTime": 1775645023,
"Libraries": [
"safetensors"
],
"License": "apache-2.0",
"Meta": "",
"Metrics": [],
"ModelDetail": {},
"ModelInfos": {
"safetensor": {
"chat_template": "{% for message in messages %}{{"<|im_start|>" + message["role"] + "\n" + message["content"] + "<|im_end|>" + "\n"}}{% endfor %}{% if add_generation_prompt %}{{ "<|im_start|>assistant\n" }}{% endif %}",
"files": [
{
"name": "model.safetensors",
"sha256": "f7f964cfa9da23653baec6e6f7750719977ad944ed9f95fe52fe3a620506891d",
"size": 4580080592
}
],
"model_size": 2290004544,
"tensor_type": [
"BF16"
]
}
},
"ModelRevisions": None,
"ModelSource": "USER_UPLOAD",
"ModelTools": "",
"ModelType": [],
"MuseInfo": None,
"NEXA": {
"Catalogues": None,
"ModelCover": "",
"ScientificField": "",
"Source": "",
"SubScientificField": None
},
"Name": "VoxCPM2",
"NewVersion": "",
"NickName": "",
"OfficialTags": None,
"OpenAiSwingDeployInfo": {
"Order": 0,
"Recommend": None,
"lmdeploy": {
"eas": {
"Script": "",
"requirements": ""
},
"ens": {
"Script": "",
"requirements": ""
},
"fc": {
"Script": "",
"requirements": ""
},
"image_tag": ""
},
"ollama": {
"eas": {
"Script": "",
"requirements": ""
},
"ens": {
"Script": "",
"requirements": ""
},
"fc": {
"Script": "",
"requirements": ""
},
"image_tag": ""
},
"pipeline": {
"eas": {
"Script": "",
"requirements": ""
},
"ens": {
"Script": "",
"requirements": ""
},
"fc": {
"Script": "",
"requirements": ""
},
"image_tag": ""
},
"vllm": {
"eas": {
"Script": "",
"requirements": ""
},
"ens": {
"Script": "",
"requirements": ""
},
"fc": {
"Script": "",
"requirements": ""
},
"image_tag": ""
}
},
"Organization": {
"ApplyFailureReason": "",
"ApplyReason": "",
"Avatar": "https://resouces.modelscope.cn/avatar/e23b1834-049d-464e-8ffc-4b10093114d0.png",
"CreateCompetition": False,
"CreatedBy": "hicicada",
"Description": "["root",{},["p",{},["span",{"data-type":"text"},["span",{"color":"rgb(101,
109,
118)","data-type":"leaf"},"OpenBMB (Open Lab for Big Model Base) aims to build foundation models and systems towards AGI."]]]]",
"DisplayUrl": "",
"Email": "",
"FromSite": "",
"FullName": "OpenBMB",
"GithubAddress": "https://github.com/OpenBMB",
"GmtCreated": "2023-03-21T07:55:04Z",
"GmtModified": "2025-01-07T09:42:14Z",
"Id": 63,
"InitAdminMembers": "",
"IsApply": False,
"IsCertification": "",
"Mobile": "",
"Name": "OpenBMB",
"Path": "",
"Roles": None,
"StarCnt": 0,
"Status": 0,
"SubscribeVo": None,
"Type": 2
},
"PaiModelGalleryUrl": None,
"PaiSdkParameter": None,
"Path": "OpenBMB",
"ProtectedMode": 2,
"ReadMeContent": "\n# VoxCPM2\n\n**VoxCPM2** is a tokenizer-free, diffusion autoregressive Text-to-Speech model — **2B parameters**, **30 languages**, **48kHz** audio output, trained on over **2 million hours** of multilingual speech data.\n\n[![GitHub](https://img.shields.io/badge/GitHub-VoxCPM-blue?logo=github)](https://github.com/OpenBMB/VoxCPM)\n[![Docs](https://img.shields.io/badge/Docs-ReadTheDocs-8CA1AF)](https://voxcpm.readthedocs.io/en/latest/)\n[![Demo](https://img.shields.io/badge/Live%20Playground-Demo-orange)](https://huggingface.co/spaces/OpenBMB/VoxCPM-Demo)\n[![Audio Samples](https://img.shields.io/badge/Audio%20Samples-Demo%20Page-green)](https://openbmb.github.io/voxcpm2-demopage)\n[![Discord](https://img.shields.io/badge/Discord-VoxCPM-5865F2?logo=discord&logoColor=white)](https://discord.gg/KZUx7tVNwz)\n\n## Highlights\n\n- 🌍 **30-Language Multilingual** — No language tag needed; input text in any supported language directly\n- 🎨 **Voice Design** — Generate a novel voice from a natural-language description alone (gender, age, tone, emotion, pace…); no reference audio required\n- 🎛️ **Controllable Cloning** — Clone any voice from a short clip, with optional style guidance to steer emotion, pace, and expression while preserving timbre\n- 🎙️ **Ultimate Cloning** — Provide reference audio + its transcript for audio-continuation cloning; every vocal nuance faithfully reproduced\n- 🔊 **48kHz Studio-Quality Output** — Accepts 16kHz reference; outputs 48kHz via AudioVAE V2\"s built-in super-resolution, no external upsampler needed\n- 🧠 **Context-Aware Synthesis** — Automatically infers appropriate prosody and expressiveness from text content\n- ⚡ **Real-Time Streaming** — RTF as low as ~0.3 on NVIDIA RTX 4090, and ~0.13 accelerated by [Nano-VLLM](https://github.com/a710128/nanovllm-voxcpm)\n- 📜 **Fully Open-Source & Commercial-Ready** — Apache-2.0 license, free for commercial use\n\n国内用户欢迎访问官网体验:https://voxcpm.modelbest.cn/\n\n<summary><b>Supported Languages (30)</b></summary>\n\nArabic, Burmese, Chinese, Danish, Dutch, English, Finnish, French, German, Greek, Hebrew, Hindi, Indonesian, Italian, Japanese, Khmer, Korean, Lao, Malay, Norwegian, Polish, Portuguese, Russian, Spanish, Swahili, Swedish, Tagalog, Thai, Turkish, Vietnamese\n\nChinese Dialects: 四川话, 粤语, 吴语, 东北话, 河南话, 陕西话, 山东话, 天津话, 闽南话\n\n\n## Quick Start\n\n### Installation\n\n```bash\npip install voxcpm\n```\n\n**Requirements:** Python ≥ 3.10, PyTorch ≥ 2.5.0, CUDA ≥ 12.0 · [Full Quick Start →](https://voxcpm.readthedocs.io/en/latest/quickstart.html)\n\n### Text-to-Speech\n\n```python\nfrom voxcpm import VoxCPM\nimport soundfile as sf\n\nmodel = VoxCPM.from_pretrained("openbmb/VoxCPM2", load_denoiser=False)\n\nwav = model.generate(\n text="VoxCPM2 brings multilingual support, creative voice design, and controllable voice cloning.",\n cfg_value=2.0,\n inference_timesteps=10,\n)\nsf.write("output.wav", wav, model.tts_model.sample_rate)\n```\n\nIf you prefer downloading from ModelScope first, you can use:\n\n```bash\npip install modelscope\n```\n\n```python\nfrom modelscope import snapshot_download\nsnapshot_download("OpenBMB/VoxCPM2", local_dir=\"./pretrained_models/VoxCPM2\") # specify the local directory to save the model\n\nfrom voxcpm import VoxCPM\nimport soundfile as sf\nmodel = VoxCPM.from_pretrained("./pretrained_models/VoxCPM2", load_denoiser=False)\n\nwav = model.generate(\n text="VoxCPM2 is the current recommended release for realistic multilingual speech synthesis.",\n cfg_value=2.0,\n inference_timesteps=10,\n)\nsf.write("demo.wav", wav, model.tts_model.sample_rate)\n```\n\n### Voice Design\n\nPut the voice description in parentheses at the start of `text`, followed by the content to synthesize:\n\n```python\nwav = model.generate(\n text="(A young woman, gentle and sweet voice)Hello, welcome to VoxCPM2!",\n cfg_value=2.0,\n inference_timesteps=10,\n)\nsf.write("voice_design.wav", wav, model.tts_model.sample_rate)\n```\n\n### Controllable Voice Cloning\n\n```python\n# Basic cloning\nwav = model.generate(\n text="This is a cloned voice generated by VoxCPM2.",\n reference_wav_path="speaker.wav",\n)\nsf.write("clone.wav", wav, model.tts_model.sample_rate)\n\n# Cloning with style control\nwav = model.generate(\n text="(slightly faster, cheerful tone)This is a cloned voice with style control.",\n reference_wav_path="speaker.wav",\n cfg_value=2.0,\n inference_timesteps=10,\n)\nsf.write("controllable_clone.wav", wav, model.tts_model.sample_rate)\n```\n\n### Ultimate Cloning\n\nProvide both the reference audio and its exact transcript for maximum fidelity. Pass the same clip to both `reference_wav_path` and `prompt_wav_path` for highest similarity:\n\n```python\nwav = model.generate(\n text="This is an ultimate cloning demonstration using VoxCPM2.",\n prompt_wav_path="speaker_reference.wav",\n prompt_text="The transcript of the reference audio.",\n reference_wav_path="speaker_reference.wav",\n)\nsf.write("hifi_clone.wav", wav, model.tts_model.sample_rate)\n```\n\n### Streaming\n\n```python\nimport numpy as np\n\nchunks = []\nfor chunk in model.generate_streaming(text="Streaming is easy with VoxCPM!"):\n chunks.append(chunk)\nwav = np.concatenate(chunks)\nsf.write("streaming.wav", wav, model.tts_model.sample_rate)\n```\n\n## Model Details\n\n| Property | Value |\n|---|---|\n| Architecture | Tokenizer-free Diffusion Autoregressive (LocEnc → TSLM → RALM → LocDiT) |\n| Backbone | Based on MiniCPM-4, totally 2B parameters |\n| Audio VAE | AudioVAE V2 (asymmetric encode/decode, 16kHz in → 48kHz out) |\n| Training Data | 2M+ hours multilingual speech |\n| LM Token Rate | 6.25 Hz |\n| Max Sequence Length | 8192 tokens |\n| dtype | bfloat16 |\n| VRAM | ~8 GB |\n| RTF (RTX 4090) | ~0.30 (standard) / ~0.13 (Nano-vLLM) |\n\n## Performance\n\nVoxCPM2 achieves state-of-the-art or competitive results on major zero-shot and controllable TTS benchmarks.\n\nSee the [GitHub repo](https://github.com/OpenBMB/VoxCPM#-performance) for full benchmark tables (Seed-TTS-eval, CV3-eval, InstructTTSEval, MiniMax Multilingual Test).\n\n## Fine-tuning\n\nVoxCPM2 supports both full SFT and LoRA fine-tuning with as little as 5–10 minutes of audio:\n\n```bash\n# LoRA fine-tuning (recommended)\npython scripts/train_voxcpm_finetune.py \\\n --config_path conf/voxcpm_v2/voxcpm_finetune_lora.yaml\n\n# Full fine-tuning\npython scripts/train_voxcpm_finetune.py \\\n --config_path conf/voxcpm_v2/voxcpm_finetune_all.yaml\n```\n\nSee the [Fine-tuning Guide](https://voxcpm.readthedocs.io/en/latest/finetuning/finetune.html) for full instructions.\n\n## Limitations\n\n- Voice Design and Style Control results may vary between runs; generating 1–3 times is recommended to obtain the desired output.\n- Performance varies across languages depending on training data availability.\n- Occasional instability may occur with very long or highly expressive inputs.\n- **Strictly forbidden** to use for impersonation, fraud, or disinformation. AI-generated content should be clearly labeled.\n\n## Citation\n\n```bibtex\n@article{voxcpm2_2026,\n title = {VoxCPM2: Tokenizer-Free TTS for Multilingual Speech Generation, Creative Voice Design, and True-to-Life Cloning},\n author = {VoxCPM Team},\n journal = {GitHub},\n year = {2026},\n}\n\n@article{voxcpm2025,\n title = {VoxCPM: Tokenizer-Free TTS for Context-Aware Speech Generation and True-to-Life Voice Cloning},\n author = {Zhou, Yixuan and Zeng, Guoyang and Liu, Xin and Li, Xiang and\n Yu, Renjie and Wang, Ziyang and Ye, Runchuan and Sun, Weiyue and\n Gui, Jiancheng and Li, Kehan and Wu, Zhiyong and Liu, Zhiyuan},\n journal = {arXiv preprint arXiv:2509.24650},\n year = {2025},\n}\n```\n\n## License\n\nReleased under the [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) license, free for commercial use. For production deployments, we recommend thorough testing and safety evaluation tailored to your use case.\n\n",
"ReadMeTips": None,
"RelatedArxivId": [
"2509.24650"
],
"RelatedPaper": [
192183
],
"Revision": "master",
"Stars": 56,
"StorageSize": 4960729800,
"Studios": [],
"SubVisionFoundation": "",
"SupportApiInference": False,
"SupportDashDeployment": 0,
"SupportDashInference": 0,
"SupportDashTraining": 0,
"SupportDeployment": 0,
"SupportExperience": 0,
"SupportFinetuning": 0,
"SupportFlexTrain": 0,
"SupportInference": "",
"SupportPaiModelGallery": None,
"SupportPaiSdk": 0,
"SwingDeployInfo": None,
"Tags": [
"text-to-speech",
"tts",
"multilingual",
"voice-cloning",
"voice-design",
"diffusion",
"audio"
],
"Tasks": [
{
"ChineseName": "语音合成",
"Description": "",
"DomainName": "audio",
"Id": 32,
"IsExhibition": True,
"IsHot": 0,
"IsLeaf": True,
"IsLoginRequired": True,
"IsRetrieval": True,
"Level": 1,
"Name": "text-to-speech",
"ParentId": -1,
"ParentTask": None,
"Sorting": 0,
"SupportWidgets": True,
"TypicalModel": "",
"WidgetConfig": "{"task": "text-to-speech", "inputs": [{"type": "text", "validator": {"max_words": 300}, "displayType": "OnlyTextArea"}], "output": {"displayType": "AudioPlayer", "transformOutputs": [{"fileType": "pcm", "outputKey": "output_pcm"}], "displayOutputMapping": "output_pcm"}, "examples": []}",
"WidgetValidator": ""
}
],
"Tools": [],
"TriggerWords": None,
"Visibility": 5,
"VisionFoundation": "",
"_": None,
"widgets": []
}
\ No newline at end of file
--- /usr/local/lib/python3.10/dist-packages/pycsghub/csghub_api.py 2026-04-12 00:00:00.000000000 +0000
+++ /usr/local/lib/python3.10/dist-packages/pycsghub/csghub_api.py 2026-04-12 00:00:00.000000000 +0000
@@ -178,7 +178,7 @@
"name": name,
"nickname": name,
"default_branch": DEFAULT_REVISION,
- "private": True,
+ "private": False,
"license": DEFAULT_LICENCE,
}
\ No newline at end of file
from modelscope.hub.api import HubApi
# 创建 API 实例
api = HubApi()
# 获取模型信息,将 <MODEL_ID> 替换为你的模型ID
# 例如: model_info = api.get_model('DAMO-NLP/bert-base-uncased')
model_info = api.get_model('deepseek-ai/DeepSeek-V3.2')
# 获取文件列表并统计数量
# 注意:get_model 返回的对象结构可能包含文件信息,
# 你需要根据其实际返回的结构来访问文件列表。
# 以下是一个示例,具体属性名可能需要根据返回对象调整
file_count = len(model_info['ModelInfos']["safetensor"]['files']) # 假设文件列表在 .safetensors_files 属性中
StorageSize = model_info['StorageSize'] / 1024 / 1024 / 1024
print(f"模型文件总数: {file_count}, 仓库大小: {StorageSize:.2f} GB")
\ No newline at end of file
from modelscope.hub.snapshot_download import snapshot_download
snapshot_download(
model_id="Tencent-Hunyuan/HY-Embodied-0.5",
cache_dir="/data/DataStore/models/exp-net/new-moon/models",
revision="master"
)
\ No newline at end of file
patch /usr/local/lib/python3.10/dist-packages/pycsghub/csghub_api.py < change_private.patch
sed -n '178,182p' /usr/local/lib/python3.10/dist-packages/pycsghub/csghub_api.py
\ No newline at end of file
gradio>=4.44.1
modelscope>=1.11.0
requests>=2.28.0
tqdm>=4.65.0
python-multipart>=0.0.6
aiofiles>=23.0.0
pyyaml>=6.0
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment