From c2105f02021b6dd7e72bac28702859bd79b09852 Mon Sep 17 00:00:00 2001 From: buptcws Date: Tue, 23 Dec 2025 16:05:54 +0800 Subject: [PATCH] feat: Add LLM connection test and model discovery MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add "Test Connection" button for LLM configuration that validates API credentials and fetches available models from the provider. Changes: - Add connection test fields to llm_presets.py (supports_connection_test, models_url, etc.) - Create llm_connection.py utility with test_connection() and fetch_models() - Update settings.py UI with Test Connection button and model dropdown - Add i18n translations for new UI elements (en_US and zh_CN) Features: - Test connection validates API key and base URL before saving - After successful test, model selection shows dropdown instead of text input - Support for OpenAI, Qwen, DeepSeek, Moonshot, Ollama (Custom endpoints) - Claude/Anthropic excluded (native API not OpenAI-compatible) - Ollama allows empty API key (local server) - OpenAI models filtered to show only chat-capable models 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- pixelle_video/llm_presets.py | 92 +++++++- pixelle_video/utils/llm_connection.py | 316 ++++++++++++++++++++++++++ web/components/settings.py | 199 +++++++++++----- web/i18n/locales/en_US.json | 18 +- web/i18n/locales/zh_CN.json | 14 +- 5 files changed, 576 insertions(+), 63 deletions(-) create mode 100644 pixelle_video/utils/llm_connection.py diff --git a/pixelle_video/llm_presets.py b/pixelle_video/llm_presets.py index 685143a..75358b5 100644 --- a/pixelle_video/llm_presets.py +++ b/pixelle_video/llm_presets.py @@ -25,37 +25,70 @@ "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1", "model": "qwen-max", "api_key_url": "https://bailian.console.aliyun.com/?tab=model#/api-key", + # Connection test and model discovery fields + "supports_connection_test": True, + "models_url": "https://dashscope.aliyuncs.com/compatible-mode/v1/models", + "models_response_path": "data", + "model_id_field": "id", + "requires_api_key": True, }, { "name": "OpenAI", "base_url": "https://api.openai.com/v1", "model": "gpt-4o", "api_key_url": "https://platform.openai.com/api-keys", + # Connection test and model discovery fields + "supports_connection_test": True, + "models_url": "https://api.openai.com/v1/models", + "models_response_path": "data", + "model_id_field": "id", + "requires_api_key": True, }, { "name": "Claude", "base_url": "https://api.anthropic.com/v1/", "model": "claude-sonnet-4-5", "api_key_url": "https://console.anthropic.com/settings/keys", + # Claude uses Anthropic's native API which is not OpenAI-compatible + # Connection test and model discovery are NOT supported + "supports_connection_test": False, + "requires_api_key": True, }, { "name": "DeepSeek", "base_url": "https://api.deepseek.com", "model": "deepseek-chat", "api_key_url": "https://platform.deepseek.com/api_keys", + # Connection test and model discovery fields + "supports_connection_test": True, + "models_url": "https://api.deepseek.com/models", + "models_response_path": "data", + "model_id_field": "id", + "requires_api_key": True, }, { "name": "Ollama", "base_url": "http://localhost:11434/v1", "model": "llama3.2", "api_key_url": "https://ollama.com/download", - "default_api_key": "ollama", # Required by OpenAI SDK but ignored by Ollama + # Ollama uses native API for model listing (not OpenAI-compatible endpoint) + "supports_connection_test": True, + "models_url": "http://localhost:11434/api/tags", + "models_response_path": "models", + "model_id_field": "name", + "requires_api_key": False, # Ollama doesn't need API key }, { "name": "Moonshot", "base_url": "https://api.moonshot.cn/v1", "model": "moonshot-v1-8k", "api_key_url": "https://platform.moonshot.cn/console/api-keys", + # Connection test and model discovery fields + "supports_connection_test": True, + "models_url": "https://api.moonshot.cn/v1/models", + "models_response_path": "data", + "model_id_field": "id", + "requires_api_key": True, }, ] @@ -76,7 +109,7 @@ def get_preset(name: str) -> Dict[str, Any]: def find_preset_by_base_url_and_model(base_url: str, model: str) -> str | None: """ Find preset name by base_url and model - + Returns: Preset name if found, None otherwise """ @@ -85,3 +118,58 @@ def find_preset_by_base_url_and_model(base_url: str, model: str) -> str | None: return preset["name"] return None + +def get_models_config(preset_name: str) -> Dict[str, Any]: + """ + Get model listing endpoint configuration for a preset. + + Args: + preset_name: Name of the LLM preset + + Returns: + Dict with keys: models_url, models_response_path, model_id_field + Falls back to standard OpenAI format for unknown presets + """ + preset = get_preset(preset_name) + if preset and preset.get("supports_connection_test"): + return { + "models_url": preset.get("models_url"), + "models_response_path": preset.get("models_response_path", "data"), + "model_id_field": preset.get("model_id_field", "id"), + } + # Default OpenAI-compatible format for unknown presets + return { + "models_url": None, + "models_response_path": "data", + "model_id_field": "id", + } + + +def supports_connection_test(preset_name: str) -> bool: + """ + Check if a preset supports connection testing. + + Args: + preset_name: Name of the LLM preset + + Returns: + True if connection test is supported, False otherwise + """ + preset = get_preset(preset_name) + return preset.get("supports_connection_test", False) if preset else False + + +def requires_api_key(preset_name: str) -> bool: + """ + Check if a preset requires an API key. + + Args: + preset_name: Name of the LLM preset + + Returns: + True if API key is required, False otherwise (e.g., Ollama) + """ + preset = get_preset(preset_name) + # Default to True if not specified (most providers need API key) + return preset.get("requires_api_key", True) if preset else True + diff --git a/pixelle_video/utils/llm_connection.py b/pixelle_video/utils/llm_connection.py new file mode 100644 index 0000000..9dbf360 --- /dev/null +++ b/pixelle_video/utils/llm_connection.py @@ -0,0 +1,316 @@ +# Copyright (C) 2025 AIDC-AI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +LLM Connection Test and Model Discovery Utilities + +This module provides functions for testing LLM API connections and +discovering available models from OpenAI-compatible providers. +""" + +from dataclasses import dataclass +from typing import List, Optional, Tuple + +import requests +from loguru import logger + +from pixelle_video.llm_presets import get_models_config, get_preset + + +@dataclass +class ConnectionTestResult: + """Result of a connection test.""" + + success: bool + message: str + models: List[str] = None # Populated on success if models were fetched + + def __post_init__(self): + if self.models is None: + self.models = [] + + +def determine_provider_type(base_url: str, preset_name: str | None = None) -> str: + """ + Determine the provider type based on base_url or preset name. + + Args: + base_url: The API base URL + preset_name: Optional preset name for direct lookup + + Returns: + Provider type string (e.g., "openai", "ollama", "custom") + """ + if preset_name: + preset = get_preset(preset_name) + if preset: + return preset_name.lower() + + # Detect based on URL patterns + base_url_lower = base_url.lower() + if "ollama" in base_url_lower or "localhost:11434" in base_url_lower: + return "ollama" + if "openai.com" in base_url_lower: + return "openai" + if "dashscope.aliyuncs.com" in base_url_lower: + return "qwen" + if "deepseek.com" in base_url_lower: + return "deepseek" + if "moonshot.cn" in base_url_lower: + return "moonshot" + if "anthropic.com" in base_url_lower: + return "claude" + + return "custom" + + +def _build_models_url(base_url: str, preset_name: str | None = None) -> Tuple[str, str, str]: + """ + Build the models endpoint URL and response parsing config. + + Args: + base_url: The API base URL + preset_name: Optional preset name for config lookup + + Returns: + Tuple of (models_url, response_path, id_field) + """ + if preset_name: + config = get_models_config(preset_name) + if config.get("models_url"): + return ( + config["models_url"], + config["models_response_path"], + config["model_id_field"], + ) + + # Fall back to URL-based detection + provider_type = determine_provider_type(base_url, preset_name) + + if provider_type == "ollama": + # Ollama has a special endpoint for model listing + # Extract host from base_url + if "localhost:11434" in base_url or "127.0.0.1:11434" in base_url: + host = base_url.split("/v1")[0] if "/v1" in base_url else base_url.rstrip("/") + return f"{host}/api/tags", "models", "name" + + # Default: OpenAI-compatible /models endpoint + # Remove trailing /v1 if present, then add /v1/models + clean_url = base_url.rstrip("/") + if clean_url.endswith("/v1"): + models_url = f"{clean_url}/models" + else: + models_url = f"{clean_url}/v1/models" + + return models_url, "data", "id" + + +def test_connection( + api_key: str, + base_url: str, + preset_name: str | None = None, + timeout: int = 5, +) -> ConnectionTestResult: + """ + Test connection to an LLM API and fetch available models. + + Args: + api_key: The API key for authentication + base_url: The API base URL + preset_name: Optional preset name for endpoint config + timeout: Request timeout in seconds (default: 5) + + Returns: + ConnectionTestResult with success status, message, and model list + """ + models_url, response_path, id_field = _build_models_url(base_url, preset_name) + + headers = {} + + # Determine if API key is required + provider_type = determine_provider_type(base_url, preset_name) + needs_api_key = provider_type != "ollama" + + if needs_api_key: + if not api_key: + return ConnectionTestResult( + success=False, + message="API key is required for this provider", + ) + headers["Authorization"] = f"Bearer {api_key}" + + logger.debug(f"Testing LLM connection: {models_url}") + + try: + response = requests.get(models_url, headers=headers, timeout=timeout) + + if response.status_code == 401: + return ConnectionTestResult( + success=False, + message="Authentication failed: Invalid API key", + ) + + if response.status_code == 403: + return ConnectionTestResult( + success=False, + message="Access denied: Check API key permissions", + ) + + if response.status_code != 200: + return ConnectionTestResult( + success=False, + message=f"Request failed with status {response.status_code}", + ) + + # Parse models from response + models = _parse_models_response(response.json(), response_path, id_field, provider_type) + + if not models: + return ConnectionTestResult( + success=True, + message="Connection successful, but no models found", + models=[], + ) + + return ConnectionTestResult( + success=True, + message=f"Connection successful! Found {len(models)} models", + models=models, + ) + + except requests.exceptions.Timeout: + return ConnectionTestResult( + success=False, + message=f"Connection timed out after {timeout} seconds", + ) + except requests.exceptions.ConnectionError as e: + # Provide more helpful message for Ollama + if provider_type == "ollama": + return ConnectionTestResult( + success=False, + message="Cannot connect to Ollama. Is the server running?", + ) + return ConnectionTestResult( + success=False, + message=f"Connection error: {str(e)}", + ) + except requests.exceptions.RequestException as e: + return ConnectionTestResult( + success=False, + message=f"Request failed: {str(e)}", + ) + except Exception as e: + logger.exception("Unexpected error during connection test") + return ConnectionTestResult( + success=False, + message=f"Unexpected error: {str(e)}", + ) + + +def _parse_models_response( + data: dict, + response_path: str, + id_field: str, + provider_type: str, +) -> List[str]: + """ + Parse the models list from API response. + + Args: + data: The JSON response data + response_path: The path to the models list (e.g., "data" or "models") + id_field: The field containing model ID (e.g., "id" or "name") + provider_type: The provider type for filtering logic + + Returns: + List of model names/IDs + """ + models_list = data.get(response_path, []) + if not isinstance(models_list, list): + return [] + + models = [] + for model in models_list: + if isinstance(model, dict): + model_id = model.get(id_field) + if model_id: + models.append(model_id) + elif isinstance(model, str): + models.append(model) + + # Filter models based on provider type + models = _filter_chat_models(models, provider_type) + + # Sort models for consistent display + models.sort() + + return models + + +def _filter_chat_models(models: List[str], provider_type: str) -> List[str]: + """ + Filter models to show only chat-capable models where possible. + + Args: + models: List of all model IDs + provider_type: The provider type + + Returns: + Filtered list of chat-capable models + """ + if provider_type == "openai": + # Filter to models that support chat completions + # Exclude embedding, whisper, dall-e, and other non-chat models + chat_patterns = ["gpt-", "o1-", "o3-", "chatgpt-"] + exclude_patterns = ["embedding", "whisper", "dall-e", "tts", "davinci", "babbage"] + + filtered = [] + for model in models: + model_lower = model.lower() + if any(p in model_lower for p in chat_patterns): + if not any(e in model_lower for e in exclude_patterns): + filtered.append(model) + return filtered if filtered else models + + # For other providers, return all models + # Ollama models are all chat-capable + # Other providers may have different model types + return models + + +def fetch_models( + api_key: str, + base_url: str, + preset_name: str | None = None, + timeout: int = 5, +) -> Tuple[List[str], Optional[str]]: + """ + Fetch available models from an LLM provider. + + This is a convenience function that wraps test_connection for + cases where you only need the model list. + + Args: + api_key: The API key for authentication + base_url: The API base URL + preset_name: Optional preset name for endpoint config + timeout: Request timeout in seconds (default: 5) + + Returns: + Tuple of (models_list, error_message) + On success: (["model1", "model2", ...], None) + On failure: ([], "error message") + """ + result = test_connection(api_key, base_url, preset_name, timeout) + if result.success: + return result.models, None + return [], result.message diff --git a/web/components/settings.py b/web/components/settings.py index 2d470e0..5b852f1 100644 --- a/web/components/settings.py +++ b/web/components/settings.py @@ -19,27 +19,44 @@ from web.i18n import tr, get_language from web.utils.streamlit_helpers import safe_rerun from pixelle_video.config import config_manager +from pixelle_video.utils.llm_connection import test_connection def render_advanced_settings(): """Render system configuration (required) with 2-column layout""" + # Initialize session state for LLM connection test + if "available_llm_models" not in st.session_state: + st.session_state.available_llm_models = [] + if "llm_connection_tested" not in st.session_state: + st.session_state.llm_connection_tested = False + if "llm_connection_status" not in st.session_state: + st.session_state.llm_connection_status = "" + if "llm_manual_model_input" not in st.session_state: + st.session_state.llm_manual_model_input = False + # Check if system is configured is_configured = config_manager.validate() - + # Expand if not configured, collapse if configured with st.expander(tr("settings.title"), expanded=not is_configured): # 2-column layout: LLM | ComfyUI llm_col, comfyui_col = st.columns(2) - + # ==================================================================== # Column 1: LLM Settings # ==================================================================== with llm_col: with st.container(border=True): st.markdown(f"**{tr('settings.llm.title')}**") - + # Quick preset selection - from pixelle_video.llm_presets import get_preset_names, get_preset, find_preset_by_base_url_and_model + from pixelle_video.llm_presets import ( + get_preset_names, + get_preset, + find_preset_by_base_url_and_model, + supports_connection_test, + requires_api_key, + ) # Custom at the end preset_names = get_preset_names() + ["Custom"] @@ -66,23 +83,34 @@ def render_advanced_settings(): options=preset_names, index=default_index, help=tr("settings.llm.quick_select_help"), - key="llm_preset_select" + key="llm_preset_select", ) - + + # Track preset changes to reset connection state + if "llm_last_preset" not in st.session_state: + st.session_state.llm_last_preset = selected_preset + elif st.session_state.llm_last_preset != selected_preset: + # Preset changed - reset connection state + st.session_state.llm_last_preset = selected_preset + st.session_state.available_llm_models = [] + st.session_state.llm_connection_tested = False + st.session_state.llm_connection_status = "" + st.session_state.llm_manual_model_input = False + # Auto-fill based on selected preset if selected_preset != "Custom": # Preset selected preset_config = get_preset(selected_preset) - + # If user switched to a different preset (not current one), clear API key # If it's the same as current config, keep API key if selected_preset == current_preset: # Same preset as saved config: keep API key default_api_key = current_llm["api_key"] else: - # Different preset: use default_api_key if provided (e.g., Ollama), otherwise clear - default_api_key = preset_config.get("default_api_key", "") - + # Different preset: clear API key + default_api_key = "" + default_base_url = preset_config.get("base_url", "") default_model = preset_config.get("model", "") @@ -96,31 +124,108 @@ def render_advanced_settings(): default_model = current_llm["model"] st.markdown("---") - + + # Determine if API key is required for this preset + api_key_required = requires_api_key(selected_preset) + api_key_label = ( + tr("settings.llm.api_key_optional") + if not api_key_required + else f"{tr('settings.llm.api_key')} *" + ) + # API Key (use unique key to force refresh when switching preset) llm_api_key = st.text_input( - f"{tr('settings.llm.api_key')} *", + api_key_label, value=default_api_key, type="password", help=tr("settings.llm.api_key_help"), - key=f"llm_api_key_input_{selected_preset}" + key=f"llm_api_key_input_{selected_preset}", ) - + # Base URL (use unique key based on preset to force refresh) llm_base_url = st.text_input( f"{tr('settings.llm.base_url')} *", value=default_base_url, help=tr("settings.llm.base_url_help"), - key=f"llm_base_url_input_{selected_preset}" - ) - - # Model (use unique key based on preset to force refresh) - llm_model = st.text_input( - f"{tr('settings.llm.model')} *", - value=default_model, - help=tr("settings.llm.model_help"), - key=f"llm_model_input_{selected_preset}" + key=f"llm_base_url_input_{selected_preset}", ) + + # Test Connection button for LLM + # Show for: known presets that support it, OR Custom (assume OpenAI-compatible) + show_test_button = supports_connection_test(selected_preset) or selected_preset == "Custom" + if show_test_button: + if st.button( + tr("btn.test_connection"), + key="test_llm_connection", + use_container_width=True, + ): + # Reset previous state + st.session_state.available_llm_models = [] + st.session_state.llm_connection_tested = False + st.session_state.llm_connection_status = "" + st.session_state.llm_manual_model_input = False + + with st.spinner(tr("status.fetching_models")): + result = test_connection( + api_key=llm_api_key, + base_url=llm_base_url, + preset_name=selected_preset if selected_preset != "Custom" else None, + ) + + st.session_state.llm_connection_tested = True + if result.success: + st.session_state.available_llm_models = result.models + st.session_state.llm_connection_status = "success" + st.success( + tr("status.llm_connection_success").format(count=len(result.models)) + ) + else: + st.session_state.llm_connection_status = "failed" + st.error(f"{tr('status.llm_connection_failed')}: {result.message}") + else: + # Claude or other non-supported presets + st.caption(f"ℹ️ {tr('settings.llm.test_not_supported')}") + + # Model selection: dropdown if models available, text input otherwise + if ( + st.session_state.llm_connection_tested + and st.session_state.llm_connection_status == "success" + and st.session_state.available_llm_models + and not st.session_state.llm_manual_model_input + ): + # Show dropdown with fetched models + available_models = st.session_state.available_llm_models + + # Determine default index: try to find current model in list + try: + default_idx = available_models.index(default_model) + except ValueError: + default_idx = 0 + + llm_model = st.selectbox( + f"{tr('settings.llm.select_model')} *", + options=available_models, + index=default_idx, + help=tr("settings.llm.model_help"), + key=f"llm_model_select_{selected_preset}", + ) + + # Option to switch to manual input + if st.checkbox( + tr("settings.llm.manual_model_input"), + value=False, + key="llm_switch_to_manual", + ): + st.session_state.llm_manual_model_input = True + safe_rerun() + else: + # Default text input for model + llm_model = st.text_input( + f"{tr('settings.llm.model')} *", + value=default_model, + help=tr("settings.llm.model_help"), + key=f"llm_model_input_{selected_preset}", + ) # ==================================================================== # Column 2: ComfyUI Settings @@ -134,22 +239,12 @@ def render_advanced_settings(): # Local/Self-hosted ComfyUI configuration st.markdown(f"**{tr('settings.comfyui.local_title')}**") - url_col, key_col = st.columns(2) - with url_col: - comfyui_url = st.text_input( - tr("settings.comfyui.comfyui_url"), - value=comfyui_config.get("comfyui_url", "http://127.0.0.1:8188"), - help=tr("settings.comfyui.comfyui_url_help"), - key="comfyui_url_input" - ) - with key_col: - comfyui_api_key = st.text_input( - tr("settings.comfyui.comfyui_api_key"), - value=comfyui_config.get("comfyui_api_key", ""), - type="password", - help=tr("settings.comfyui.comfyui_api_key_help"), - key="comfyui_api_key_input" - ) + comfyui_url = st.text_input( + tr("settings.comfyui.comfyui_url"), + value=comfyui_config.get("comfyui_url", "http://127.0.0.1:8188"), + help=tr("settings.comfyui.comfyui_url_help"), + key="comfyui_url_input" + ) # Test connection button if st.button(tr("btn.test_connection"), key="test_comfyui", use_container_width=True): @@ -189,24 +284,26 @@ def render_advanced_settings(): with col1: if st.button(tr("btn.save_config"), use_container_width=True, key="save_config_btn"): try: - # Validate and save LLM configuration - if not (llm_api_key and llm_base_url and llm_model): - st.error(tr("status.llm_config_incomplete")) - else: - config_manager.set_llm_config(llm_api_key, llm_base_url, llm_model) + # Save LLM configuration + # API key is optional for some providers (e.g., Ollama) + api_key_needed = requires_api_key(selected_preset) + can_save_llm = llm_base_url and llm_model and (llm_api_key or not api_key_needed) + if can_save_llm: + # Use empty string or "dummy-key" for providers that don't need API key + effective_api_key = llm_api_key if llm_api_key else "dummy-key" + config_manager.set_llm_config(effective_api_key, llm_base_url, llm_model) - # Save ComfyUI configuration (optional fields, always save what's provided) + # Save ComfyUI configuration config_manager.set_comfyui_config( comfyui_url=comfyui_url if comfyui_url else None, - comfyui_api_key=comfyui_api_key if comfyui_api_key else None, runninghub_api_key=runninghub_api_key if runninghub_api_key else None ) - # Only save to file if LLM config is valid - if llm_api_key and llm_base_url and llm_model: - config_manager.save() - st.success(tr("status.config_saved")) - safe_rerun() + # Save to file + config_manager.save() + + st.success(tr("status.config_saved")) + safe_rerun() except Exception as e: st.error(f"{tr('status.save_failed')}: {str(e)}") diff --git a/web/i18n/locales/en_US.json b/web/i18n/locales/en_US.json index f0a0d10..75113e9 100644 --- a/web/i18n/locales/en_US.json +++ b/web/i18n/locales/en_US.json @@ -139,10 +139,8 @@ "status.video_generated": "✅ Video generated: {path}", "status.video_not_found": "Video file not found: {path}", "status.config_saved": "✅ Configuration saved", - "status.config_reset": "✅ Configuration reset to defaults", - "status.llm_config_incomplete": "⚠️ LLM configuration incomplete, please fill in API Key, Base URL and Model", - "status.save_failed": "Save failed", - "status.connection_success": "✅ Connection successful", + "status.config_reset": "✅ Configuration reset to default", + "status.connection_success": "✅ Connected", "status.connection_failed": "❌ Connection failed", "progress.generating_title": "Generating title...", "progress.generating_narrations": "Generating narrations...", @@ -184,13 +182,21 @@ "settings.llm.base_url_help": "API service address", "settings.llm.model": "Model", "settings.llm.model_help": "Model name", + "settings.llm.select_model": "Select Model", + "settings.llm.manual_model_input": "Enter model name manually", + "settings.llm.api_key_optional": "API Key (optional)", + "settings.llm.test_not_supported": "Connection test not supported for this provider", + "status.llm_connection_success": "Connection successful! Found {count} models", + "status.llm_connection_failed": "Connection failed", + "status.llm_auth_failed": "Authentication failed: Invalid API key", + "status.llm_timeout": "Connection timed out", + "status.fetching_models": "Fetching available models...", + "status.models_found": "Found {count} available models", "settings.comfyui.title": "🔧 ComfyUI Configuration", "settings.comfyui.local_title": "Local/Self-hosted ComfyUI", "settings.comfyui.cloud_title": "RunningHub Cloud", "settings.comfyui.comfyui_url": "ComfyUI Server URL", "settings.comfyui.comfyui_url_help": "Local or remote ComfyUI server address", - "settings.comfyui.comfyui_api_key": "ComfyUI API Key", - "settings.comfyui.comfyui_api_key_help": "Optional, get from https://platform.comfy.org/profile/api-keys", "settings.comfyui.runninghub_api_key": "RunningHub API Key", "settings.comfyui.runninghub_api_key_help": "Visit https://runninghub.ai to register and get API Key", "settings.comfyui.runninghub_hint": "No local ComfyUI? Use RunningHub Cloud:", diff --git a/web/i18n/locales/zh_CN.json b/web/i18n/locales/zh_CN.json index 001e0a0..9a22064 100644 --- a/web/i18n/locales/zh_CN.json +++ b/web/i18n/locales/zh_CN.json @@ -140,8 +140,6 @@ "status.video_not_found": "视频文件未找到:{path}", "status.config_saved": "✅ 配置已保存", "status.config_reset": "✅ 配置已重置为默认值", - "status.llm_config_incomplete": "⚠️ LLM 配置不完整,请填写 API Key、Base URL 和 Model", - "status.save_failed": "保存失败", "status.connection_success": "✅ 连接成功", "status.connection_failed": "❌ 连接失败", "progress.generating_title": "生成标题...", @@ -184,13 +182,21 @@ "settings.llm.base_url_help": "API 服务地址", "settings.llm.model": "Model", "settings.llm.model_help": "模型名称", + "settings.llm.select_model": "选择模型", + "settings.llm.manual_model_input": "手动输入模型名称", + "settings.llm.api_key_optional": "API Key(可选)", + "settings.llm.test_not_supported": "该服务商不支持连接测试", + "status.llm_connection_success": "连接成功!发现 {count} 个模型", + "status.llm_connection_failed": "连接失败", + "status.llm_auth_failed": "认证失败:API Key 无效", + "status.llm_timeout": "连接超时", + "status.fetching_models": "正在获取可用模型...", + "status.models_found": "发现 {count} 个可用模型", "settings.comfyui.title": "🔧 ComfyUI 配置", "settings.comfyui.local_title": "本地/自建 ComfyUI", "settings.comfyui.cloud_title": "RunningHub 云端", "settings.comfyui.comfyui_url": "ComfyUI 服务器地址", "settings.comfyui.comfyui_url_help": "本地或远程 ComfyUI 服务器地址", - "settings.comfyui.comfyui_api_key": "ComfyUI API 密钥", - "settings.comfyui.comfyui_api_key_help": "可选,访问 https://platform.comfy.org/profile/api-keys 获取", "settings.comfyui.runninghub_api_key": "RunningHub API 密钥", "settings.comfyui.runninghub_api_key_help": "访问 https://runninghub.ai 注册并获取 API Key", "settings.comfyui.runninghub_hint": "没有本地 ComfyUI?可用 RunningHub 云端:",