i18n.py 11.5 KB
Newer Older
litzh's avatar
litzh committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
"""国际化支持模块"""

import os

# 默认语言
DEFAULT_LANG = os.getenv("GRADIO_LANG", "zh")

# 翻译字典
TRANSLATIONS = {
    "zh": {
        "title": "🎬 LightX2V 图片/视频生成器",
        "model_config": "🗂️ 模型配置",
        "model_config_hint": "💡 **提示**:请确保以下每个模型选项至少有一个已下载✅的模型可用,否则可能无法正常生成视频。",
        "fp8_not_supported": "⚠️ **您的设备不支持fp8推理**,已自动隐藏包含fp8的模型选项。",
        "model_type": "模型类型",
        "model_type_info": "Wan2.2 需要分别指定高噪模型和低噪模型; Qwen-Image-Edit-2511 用于图片编辑(i2i); Qwen-Image-2512 用于文本生成图片(t2i); Z-Image-Turbo 用于文本生成图片(t2i)",
        "qwen3_encoder": "📝 Qwen3 编码器",
        "scheduler": "⏱️ 调度器",
        "qwen25vl_encoder": "📝 Qwen25-VL 编码器",
        "task_type": "任务类型",
        "task_type_info": "I2V: 图生视频, T2V: 文生视频, T2I: 文生图, I2I: 图片编辑",
        "download_source": "📥 下载源",
        "download_source_info": "选择模型下载源",
        "diffusion_model": "🎨 Diffusion模型",
        "high_noise_model": "🔊 高噪模型",
        "low_noise_model": "🔇 低噪模型",
        "text_encoder": "📝 文本编码器",
        "text_encoder_tokenizer": "📝 文本编码器 Tokenizer",
        "image_encoder": "🖼️ 图像编码器",
        "image_encoder_tokenizer": "🖼️ 图像编码器 Tokenizer",
        "vae": "🎞️ VAE编码/解码器",
        "attention_operator": "⚡ 注意力算子",
        "attention_operator_info": "使用适当的注意力算子加速推理",
        "quant_operator": "⚡矩阵乘法算子",
        "quant_operator_info": "选择低精度矩阵乘法算子以加速推理",
        "input_params": "📥 输入参数",
        "input_image": "输入图像(可拖入多张图片)",
        "image_preview": "已上传的图片预览",
        "image_path": "图片路径",
        "prompt": "提示词",
        "prompt_placeholder": "描述视频/图片内容...",
        "negative_prompt": "负向提示词",
        "negative_prompt_placeholder": "不希望出现在视频/图片中的内容...",
        "max_resolution": "最大分辨率",
        "max_resolution_info": "如果显存不足,可调低分辨率",
        "random_seed": "随机种子",
        "infer_steps": "推理步数",
        "infer_steps_distill": "蒸馏模型推理步数默认为4。",
        "infer_steps_info": "视频生成的推理步数。增加步数可能提高质量但降低速度。",
        "sample_shift": "分布偏移",
        "sample_shift_info": "控制样本分布偏移的程度。值越大表示偏移越明显。",
        "cfg_scale": "CFG缩放因子",
        "cfg_scale_info": "控制提示词的影响强度。值越高,提示词的影响越大。当值为1时,自动禁用CFG。",
        "enable_cfg": "启用无分类器引导",
        "fps": "每秒帧数(FPS)",
        "fps_info": "视频的每秒帧数。较高的FPS会产生更流畅的视频。",
        "num_frames": "总帧数",
        "num_frames_info": "视频中的总帧数。更多帧数会产生更长的视频。",
        "video_duration": "视频时长(秒)",
        "video_duration_info": "视频的时长(秒)。实际帧数 = 时长 × FPS。",
        "output_path": "输出视频路径",
        "output_path_info": "必须包含.mp4扩展名。如果留空或使用默认值,将自动生成唯一文件名。",
        "output_image_path": "输出图片路径",
        "output_image_path_info": "必须包含.png扩展名。如果留空或使用默认值,将自动生成唯一文件名。",
        "output_result": "📤 生成的结果",
        "output_image": "输出图片",
        "generate_video": "🎬 生成视频",
        "generate_image": "🖼️ 生成图片",
        "infer_steps_image_info": "图片编辑的推理步数,默认为8。",
        "aspect_ratio": "宽高比",
        "aspect_ratio_info": "选择生成图片的宽高比",
        "model_config_hint_image": "💡 **提示**:请确保以下每个模型选项至少有一个已下载✅的模型可用,否则可能无法正常生成图片。",
        "download": "📥 下载",
        "downloaded": "✅ 已下载",
        "not_downloaded": "❌ 未下载",
        "download_complete": "✅ {model_name} 下载完成",
        "download_start": "开始从 {source} 下载 {model_name}...",
        "please_select_model": "请先选择模型",
        "loading_models": "正在加载 Hugging Face 模型列表缓存...",
        "models_loaded": "模型列表缓存加载完成",
        "use_lora": "使用 LoRA",
        "lora": "🎨 LoRA",
        "lora_info": "选择要使用的 LoRA 模型",
        "lora_strength": "LoRA 强度",
        "lora_strength_info": "控制 LoRA 的影响强度,范围 0-10",
        "high_noise_lora": "🔊 高噪模型 LoRA",
        "high_noise_lora_info": "选择高噪模型使用的 LoRA",
        "high_noise_lora_strength": "高噪模型 LoRA 强度",
        "high_noise_lora_strength_info": "控制高噪模型 LoRA 的影响强度,范围 0-10",
        "low_noise_lora": "🔇 低噪模型 LoRA",
        "low_noise_lora_info": "选择低噪模型使用的 LoRA",
        "low_noise_lora_strength": "低噪模型 LoRA 强度",
        "low_noise_lora_strength_info": "控制低噪模型 LoRA 的影响强度,范围 0-10",
    },
    "en": {
        "title": "🎬 LightX2V Image/Video Generator",
        "model_config": "🗂️ Model Configuration",
        "model_config_hint": "💡 **Tip**: Please ensure at least one downloaded ✅ model is available for each model option below, otherwise video generation may fail.",
        "fp8_not_supported": "⚠️ **Your device does not support fp8 inference**, fp8 model options have been automatically hidden.",
        "model_type": "Model Type",
        "model_type_info": "Wan2.2 requires separate high-noise and low-noise models; Qwen-Image-Edit-2511 is for image editing (i2i); Qwen-Image-2512 is for text-to-image (t2i); Z-Image-Turbo is for text-to-image (t2i)",
        "qwen3_encoder": "📝 Qwen3 Encoder",
        "scheduler": "⏱️ Scheduler",
        "qwen25vl_encoder": "📝 Qwen25-VL Encoder",
        "task_type": "Task Type",
        "task_type_info": "I2V: Image-to-Video, T2V: Text-to-Video, T2I: Text-to-Image, I2I: Image Editing",
        "download_source": "📥 Download Source",
        "download_source_info": "Select model download source",
        "diffusion_model": "🎨 Diffusion Model",
        "high_noise_model": "🔊 High Noise Model",
        "low_noise_model": "🔇 Low Noise Model",
        "text_encoder": "📝 Text Encoder",
        "text_encoder_tokenizer": "📝 Text Encoder Tokenizer",
        "image_encoder": "🖼️ Image Encoder",
        "image_encoder_tokenizer": "🖼️ Image Encoder Tokenizer",
        "vae": "🎞️ VAE Encoder/Decoder",
        "attention_operator": "⚡ Attention Operator",
        "attention_operator_info": "Use appropriate attention operator to accelerate inference",
        "quant_operator": "⚡ Matrix Multiplication Operator",
        "quant_operator_info": "Select low-precision matrix multiplication operator to accelerate inference",
        "input_params": "📥 Input Parameters",
        "input_image": "Input Image (drag multiple images)",
        "image_preview": "Uploaded Image Preview",
        "image_path": "Image Path",
        "prompt": "Prompt",
        "prompt_placeholder": "Describe video/image content...",
        "negative_prompt": "Negative Prompt",
        "negative_prompt_placeholder": "Content you don't want in the video/image...",
        "max_resolution": "Max Resolution",
        "max_resolution_info": "Reduce resolution if VRAM is insufficient",
        "random_seed": "Random Seed",
        "infer_steps": "Inference Steps",
        "infer_steps_distill": "Distill model inference steps default to 4.",
        "infer_steps_info": "Number of inference steps for video generation. More steps may improve quality but reduce speed.",
        "sample_shift": "Sample Shift",
        "sample_shift_info": "Control the degree of sample distribution shift. Higher values indicate more obvious shift.",
        "cfg_scale": "CFG Scale",
        "cfg_scale_info": "Control the influence strength of prompts. Higher values mean stronger prompt influence. When value is 1, CFG is automatically disabled.",
        "enable_cfg": "Enable Classifier-Free Guidance",
        "fps": "Frames Per Second (FPS)",
        "fps_info": "Frames per second of the video. Higher FPS produces smoother videos.",
        "num_frames": "Total Frames",
        "num_frames_info": "Total number of frames in the video. More frames produce longer videos.",
        "video_duration": "Video Duration (seconds)",
        "video_duration_info": "Duration of the video in seconds. Actual frames = duration × FPS.",
        "output_path": "Output Video Path",
        "output_path_info": "Must include .mp4 extension. If left empty or using default value, a unique filename will be automatically generated.",
        "output_image_path": "Output Image Path",
        "output_image_path_info": "Must include .png extension. If left empty or using default value, a unique filename will be automatically generated.",
        "output_result": "📤 Generated Result",
        "output_image": "Output Image",
        "generate_video": "🎬 Generate Video",
        "generate_image": "🖼️ Generate Image",
        "infer_steps_image_info": "Number of inference steps for image editing, default is 8.",
        "aspect_ratio": "Aspect Ratio",
        "aspect_ratio_info": "Select the aspect ratio for generated images",
        "model_config_hint_image": "💡 **Tip**: Please ensure at least one downloaded ✅ model is available for each model option below, otherwise image generation may fail.",
        "download": "📥 Download",
        "downloaded": "✅ Downloaded",
        "not_downloaded": "❌ Not Downloaded",
        "download_complete": "✅ {model_name} download complete",
        "download_start": "Starting to download {model_name} from {source}...",
        "please_select_model": "Please select a model first",
        "loading_models": "Loading Hugging Face model list cache...",
        "models_loaded": "Model list cache loaded",
        "use_lora": "Use LoRA",
        "lora": "🎨 LoRA",
        "lora_info": "Select LoRA model to use",
        "lora_strength": "LoRA Strength",
        "lora_strength_info": "Control LoRA influence strength, range 0-10",
        "high_noise_lora": "🔊 High Noise Model LoRA",
        "high_noise_lora_info": "Select high noise model LoRA to use",
        "high_noise_lora_strength": "High Noise Model LoRA Strength",
        "high_noise_lora_strength_info": "Control high noise model LoRA influence strength, range 0-10",
        "low_noise_lora": "🔇 Low Noise Model LoRA",
        "low_noise_lora_info": "Select low noise model LoRA to use",
        "low_noise_lora_strength": "Low Noise Model LoRA Strength",
        "low_noise_lora_strength_info": "Control low noise model LoRA influence strength, range 0-10",
    },
}


def t(key: str, lang: str = None) -> str:
    """获取翻译文本"""
    if lang is None:
        lang = DEFAULT_LANG

    if lang not in TRANSLATIONS:
        lang = "zh"

    return TRANSLATIONS[lang].get(key, key)


def set_language(lang: str):
    """设置语言"""
    global DEFAULT_LANG
    if lang in TRANSLATIONS:
        DEFAULT_LANG = lang
        os.environ["GRADIO_LANG"] = lang