|
| 1 | +"""国际化支持模块""" |
| 2 | + |
| 3 | +import os |
| 4 | + |
| 5 | +# 默认语言 |
| 6 | +DEFAULT_LANG = os.getenv("GRADIO_LANG", "zh") |
| 7 | + |
| 8 | +# 翻译字典 |
| 9 | +TRANSLATIONS = { |
| 10 | + "zh": { |
| 11 | + "title": "🎬 LightX2V 图片/视频生成器", |
| 12 | + "model_config": "🗂️ 模型配置", |
| 13 | + "model_config_hint": "💡 **提示**:请确保以下每个模型选项至少有一个已下载✅的模型可用,否则可能无法正常生成视频。", |
| 14 | + "fp8_not_supported": "⚠️ **您的设备不支持fp8推理**,已自动隐藏包含fp8的模型选项。", |
| 15 | + "model_type": "模型类型", |
| 16 | + "model_type_info": "Wan2.2 需要分别指定高噪模型和低噪模型; Qwen-Image-Edit-2511 用于图片编辑(i2i); Qwen-Image-2512 用于文本生成图片(t2i); Z-Image-Turbo 用于文本生成图片(t2i)", |
| 17 | + "qwen3_encoder": "📝 Qwen3 编码器", |
| 18 | + "scheduler": "⏱️ 调度器", |
| 19 | + "qwen25vl_encoder": "📝 Qwen25-VL 编码器", |
| 20 | + "task_type": "任务类型", |
| 21 | + "task_type_info": "I2V: 图生视频, T2V: 文生视频, T2I: 文生图, I2I: 图片编辑", |
| 22 | + "download_source": "📥 下载源", |
| 23 | + "download_source_info": "选择模型下载源", |
| 24 | + "diffusion_model": "🎨 Diffusion模型", |
| 25 | + "high_noise_model": "🔊 高噪模型", |
| 26 | + "low_noise_model": "🔇 低噪模型", |
| 27 | + "text_encoder": "📝 文本编码器", |
| 28 | + "text_encoder_tokenizer": "📝 文本编码器 Tokenizer", |
| 29 | + "image_encoder": "🖼️ 图像编码器", |
| 30 | + "image_encoder_tokenizer": "🖼️ 图像编码器 Tokenizer", |
| 31 | + "vae": "🎞️ VAE编码/解码器", |
| 32 | + "attention_operator": "⚡ 注意力算子", |
| 33 | + "attention_operator_info": "使用适当的注意力算子加速推理", |
| 34 | + "quant_operator": "⚡矩阵乘法算子", |
| 35 | + "quant_operator_info": "选择低精度矩阵乘法算子以加速推理", |
| 36 | + "input_params": "📥 输入参数", |
| 37 | + "input_image": "输入图像(可拖入多张图片)", |
| 38 | + "image_preview": "已上传的图片预览", |
| 39 | + "image_path": "图片路径", |
| 40 | + "prompt": "提示词", |
| 41 | + "prompt_placeholder": "描述视频/图片内容...", |
| 42 | + "negative_prompt": "负向提示词", |
| 43 | + "negative_prompt_placeholder": "不希望出现在视频/图片中的内容...", |
| 44 | + "max_resolution": "最大分辨率", |
| 45 | + "max_resolution_info": "如果显存不足,可调低分辨率", |
| 46 | + "random_seed": "随机种子", |
| 47 | + "infer_steps": "推理步数", |
| 48 | + "infer_steps_distill": "蒸馏模型推理步数默认为4。", |
| 49 | + "infer_steps_info": "视频生成的推理步数。增加步数可能提高质量但降低速度。", |
| 50 | + "sample_shift": "分布偏移", |
| 51 | + "sample_shift_info": "控制样本分布偏移的程度。值越大表示偏移越明显。", |
| 52 | + "cfg_scale": "CFG缩放因子", |
| 53 | + "cfg_scale_info": "控制提示词的影响强度。值越高,提示词的影响越大。当值为1时,自动禁用CFG。", |
| 54 | + "enable_cfg": "启用无分类器引导", |
| 55 | + "fps": "每秒帧数(FPS)", |
| 56 | + "fps_info": "视频的每秒帧数。较高的FPS会产生更流畅的视频。", |
| 57 | + "num_frames": "总帧数", |
| 58 | + "num_frames_info": "视频中的总帧数。更多帧数会产生更长的视频。", |
| 59 | + "video_duration": "视频时长(秒)", |
| 60 | + "video_duration_info": "视频的时长(秒)。实际帧数 = 时长 × FPS。", |
| 61 | + "output_path": "输出视频路径", |
| 62 | + "output_path_info": "必须包含.mp4扩展名。如果留空或使用默认值,将自动生成唯一文件名。", |
| 63 | + "output_image_path": "输出图片路径", |
| 64 | + "output_image_path_info": "必须包含.png扩展名。如果留空或使用默认值,将自动生成唯一文件名。", |
| 65 | + "output_result": "📤 生成的结果", |
| 66 | + "output_image": "输出图片", |
| 67 | + "generate_video": "🎬 生成视频", |
| 68 | + "generate_image": "🖼️ 生成图片", |
| 69 | + "infer_steps_image_info": "图片编辑的推理步数,默认为8。", |
| 70 | + "aspect_ratio": "宽高比", |
| 71 | + "aspect_ratio_info": "选择生成图片的宽高比", |
| 72 | + "model_config_hint_image": "💡 **提示**:请确保以下每个模型选项至少有一个已下载✅的模型可用,否则可能无法正常生成图片。", |
| 73 | + "download": "📥 下载", |
| 74 | + "downloaded": "✅ 已下载", |
| 75 | + "not_downloaded": "❌ 未下载", |
| 76 | + "download_complete": "✅ {model_name} 下载完成", |
| 77 | + "download_start": "开始从 {source} 下载 {model_name}...", |
| 78 | + "please_select_model": "请先选择模型", |
| 79 | + "loading_models": "正在加载 Hugging Face 模型列表缓存...", |
| 80 | + "models_loaded": "模型列表缓存加载完成", |
| 81 | + "use_lora": "使用 LoRA", |
| 82 | + "lora": "🎨 LoRA", |
| 83 | + "lora_info": "选择要使用的 LoRA 模型", |
| 84 | + "lora_strength": "LoRA 强度", |
| 85 | + "lora_strength_info": "控制 LoRA 的影响强度,范围 0-10", |
| 86 | + "high_noise_lora": "🔊 高噪模型 LoRA", |
| 87 | + "high_noise_lora_info": "选择高噪模型使用的 LoRA", |
| 88 | + "high_noise_lora_strength": "高噪模型 LoRA 强度", |
| 89 | + "high_noise_lora_strength_info": "控制高噪模型 LoRA 的影响强度,范围 0-10", |
| 90 | + "low_noise_lora": "🔇 低噪模型 LoRA", |
| 91 | + "low_noise_lora_info": "选择低噪模型使用的 LoRA", |
| 92 | + "low_noise_lora_strength": "低噪模型 LoRA 强度", |
| 93 | + "low_noise_lora_strength_info": "控制低噪模型 LoRA 的影响强度,范围 0-10", |
| 94 | + }, |
| 95 | + "en": { |
| 96 | + "title": "🎬 LightX2V Image/Video Generator", |
| 97 | + "model_config": "🗂️ Model Configuration", |
| 98 | + "model_config_hint": "💡 **Tip**: Please ensure at least one downloaded ✅ model is available for each model option below, otherwise video generation may fail.", |
| 99 | + "fp8_not_supported": "⚠️ **Your device does not support fp8 inference**, fp8 model options have been automatically hidden.", |
| 100 | + "model_type": "Model Type", |
| 101 | + "model_type_info": "Wan2.2 requires separate high-noise and low-noise models; Qwen-Image-Edit-2511 is for image editing (i2i); Qwen-Image-2512 is for text-to-image (t2i); Z-Image-Turbo is for text-to-image (t2i)", |
| 102 | + "qwen3_encoder": "📝 Qwen3 Encoder", |
| 103 | + "scheduler": "⏱️ Scheduler", |
| 104 | + "qwen25vl_encoder": "📝 Qwen25-VL Encoder", |
| 105 | + "task_type": "Task Type", |
| 106 | + "task_type_info": "I2V: Image-to-Video, T2V: Text-to-Video, T2I: Text-to-Image, I2I: Image Editing", |
| 107 | + "download_source": "📥 Download Source", |
| 108 | + "download_source_info": "Select model download source", |
| 109 | + "diffusion_model": "🎨 Diffusion Model", |
| 110 | + "high_noise_model": "🔊 High Noise Model", |
| 111 | + "low_noise_model": "🔇 Low Noise Model", |
| 112 | + "text_encoder": "📝 Text Encoder", |
| 113 | + "text_encoder_tokenizer": "📝 Text Encoder Tokenizer", |
| 114 | + "image_encoder": "🖼️ Image Encoder", |
| 115 | + "image_encoder_tokenizer": "🖼️ Image Encoder Tokenizer", |
| 116 | + "vae": "🎞️ VAE Encoder/Decoder", |
| 117 | + "attention_operator": "⚡ Attention Operator", |
| 118 | + "attention_operator_info": "Use appropriate attention operator to accelerate inference", |
| 119 | + "quant_operator": "⚡ Matrix Multiplication Operator", |
| 120 | + "quant_operator_info": "Select low-precision matrix multiplication operator to accelerate inference", |
| 121 | + "input_params": "📥 Input Parameters", |
| 122 | + "input_image": "Input Image (drag multiple images)", |
| 123 | + "image_preview": "Uploaded Image Preview", |
| 124 | + "image_path": "Image Path", |
| 125 | + "prompt": "Prompt", |
| 126 | + "prompt_placeholder": "Describe video/image content...", |
| 127 | + "negative_prompt": "Negative Prompt", |
| 128 | + "negative_prompt_placeholder": "Content you don't want in the video/image...", |
| 129 | + "max_resolution": "Max Resolution", |
| 130 | + "max_resolution_info": "Reduce resolution if VRAM is insufficient", |
| 131 | + "random_seed": "Random Seed", |
| 132 | + "infer_steps": "Inference Steps", |
| 133 | + "infer_steps_distill": "Distill model inference steps default to 4.", |
| 134 | + "infer_steps_info": "Number of inference steps for video generation. More steps may improve quality but reduce speed.", |
| 135 | + "sample_shift": "Sample Shift", |
| 136 | + "sample_shift_info": "Control the degree of sample distribution shift. Higher values indicate more obvious shift.", |
| 137 | + "cfg_scale": "CFG Scale", |
| 138 | + "cfg_scale_info": "Control the influence strength of prompts. Higher values mean stronger prompt influence. When value is 1, CFG is automatically disabled.", |
| 139 | + "enable_cfg": "Enable Classifier-Free Guidance", |
| 140 | + "fps": "Frames Per Second (FPS)", |
| 141 | + "fps_info": "Frames per second of the video. Higher FPS produces smoother videos.", |
| 142 | + "num_frames": "Total Frames", |
| 143 | + "num_frames_info": "Total number of frames in the video. More frames produce longer videos.", |
| 144 | + "video_duration": "Video Duration (seconds)", |
| 145 | + "video_duration_info": "Duration of the video in seconds. Actual frames = duration × FPS.", |
| 146 | + "output_path": "Output Video Path", |
| 147 | + "output_path_info": "Must include .mp4 extension. If left empty or using default value, a unique filename will be automatically generated.", |
| 148 | + "output_image_path": "Output Image Path", |
| 149 | + "output_image_path_info": "Must include .png extension. If left empty or using default value, a unique filename will be automatically generated.", |
| 150 | + "output_result": "📤 Generated Result", |
| 151 | + "output_image": "Output Image", |
| 152 | + "generate_video": "🎬 Generate Video", |
| 153 | + "generate_image": "🖼️ Generate Image", |
| 154 | + "infer_steps_image_info": "Number of inference steps for image editing, default is 8.", |
| 155 | + "aspect_ratio": "Aspect Ratio", |
| 156 | + "aspect_ratio_info": "Select the aspect ratio for generated images", |
| 157 | + "model_config_hint_image": "💡 **Tip**: Please ensure at least one downloaded ✅ model is available for each model option below, otherwise image generation may fail.", |
| 158 | + "download": "📥 Download", |
| 159 | + "downloaded": "✅ Downloaded", |
| 160 | + "not_downloaded": "❌ Not Downloaded", |
| 161 | + "download_complete": "✅ {model_name} download complete", |
| 162 | + "download_start": "Starting to download {model_name} from {source}...", |
| 163 | + "please_select_model": "Please select a model first", |
| 164 | + "loading_models": "Loading Hugging Face model list cache...", |
| 165 | + "models_loaded": "Model list cache loaded", |
| 166 | + "use_lora": "Use LoRA", |
| 167 | + "lora": "🎨 LoRA", |
| 168 | + "lora_info": "Select LoRA model to use", |
| 169 | + "lora_strength": "LoRA Strength", |
| 170 | + "lora_strength_info": "Control LoRA influence strength, range 0-10", |
| 171 | + "high_noise_lora": "🔊 High Noise Model LoRA", |
| 172 | + "high_noise_lora_info": "Select high noise model LoRA to use", |
| 173 | + "high_noise_lora_strength": "High Noise Model LoRA Strength", |
| 174 | + "high_noise_lora_strength_info": "Control high noise model LoRA influence strength, range 0-10", |
| 175 | + "low_noise_lora": "🔇 Low Noise Model LoRA", |
| 176 | + "low_noise_lora_info": "Select low noise model LoRA to use", |
| 177 | + "low_noise_lora_strength": "Low Noise Model LoRA Strength", |
| 178 | + "low_noise_lora_strength_info": "Control low noise model LoRA influence strength, range 0-10", |
| 179 | + }, |
| 180 | +} |
| 181 | + |
| 182 | + |
| 183 | +def t(key: str, lang: str = None) -> str: |
| 184 | + """获取翻译文本""" |
| 185 | + if lang is None: |
| 186 | + lang = DEFAULT_LANG |
| 187 | + |
| 188 | + if lang not in TRANSLATIONS: |
| 189 | + lang = "zh" |
| 190 | + |
| 191 | + return TRANSLATIONS[lang].get(key, key) |
| 192 | + |
| 193 | + |
| 194 | +def set_language(lang: str): |
| 195 | + """设置语言""" |
| 196 | + global DEFAULT_LANG |
| 197 | + if lang in TRANSLATIONS: |
| 198 | + DEFAULT_LANG = lang |
| 199 | + os.environ["GRADIO_LANG"] = lang |
0 commit comments