Add LLM configuration and MCP server management UI and backend functionality

- Implemented a new SPA page for LLM Bridge and MCP Server settings in `llm-config.js`.
- Added functionality for managing LLM and MCP configurations, including toggling, saving settings, and testing connections.
- Created HTTP endpoints in `llm_utils.py` for handling LLM chat, status checks, and MCP server configuration.
- Integrated model fetching from LaRuche and Ollama backends.
- Enhanced error handling and logging for better debugging and user feedback.
This commit is contained in:
infinition
2026-03-16 20:33:22 +01:00
parent aac77a3e76
commit b759ab6d4b
41 changed files with 9991 additions and 397 deletions

View File

@@ -1249,5 +1249,66 @@
"loki.quick_placeholder": "Quick type text here...",
"loki.quick_send": "Type",
"loki.quick_sent": "Text sent to target",
"loki.quick_error": "Failed to send text"
"loki.quick_error": "Failed to send text",
"nav.llm_chat": "LLM Chat",
"nav.llm_config": "LLM & MCP",
"llm_chat.checking": "checking...",
"llm_chat.disabled": "LLM disabled",
"llm_chat.online": "online",
"llm_chat.unavailable": "unavailable",
"llm_chat.disabled_msg": "LLM is disabled. Enable it in",
"llm_chat.settings_link": "Settings → LLM Bridge",
"llm_chat.thinking": "Bjorn is thinking...",
"llm_chat.placeholder": "Ask Bjorn anything about the network...",
"llm_chat.send": "SEND",
"llm_chat.clear_history": "Clear history",
"llm_chat.orch_log": "Orch Log",
"llm_chat.orch_title": "View LLM Orchestrator reasoning log",
"llm_chat.back_chat": "← Back to chat",
"llm_chat.session_started": "Session started. Type a question or command.",
"llm_chat.history_cleared": "History cleared.",
"llm_chat.back_to_chat": "Back to chat. Type a question or command.",
"llm_chat.loading_log": "Loading LLM Orchestrator reasoning log…",
"llm_chat.no_log": "No reasoning log yet. Enable llm_orchestrator_log_reasoning in config and run a cycle.",
"llm_chat.log_header": "Orchestrator log",
"llm_chat.log_error": "Error loading reasoning log",
"llm_chat.error": "Error",
"llm_chat.net_error": "Network error",
"llm_cfg.enable_bridge": "Enable LLM Bridge",
"llm_cfg.epd_comments": "LLM comments on EPD display",
"llm_cfg.backend": "BACKEND PRIORITY",
"llm_cfg.laruche_discovery": "Auto-discover LaRuche nodes via mDNS",
"llm_cfg.laruche_url": "LARUCHE NODE URL (optional — overrides discovery)",
"llm_cfg.ollama_url": "OLLAMA URL",
"llm_cfg.ollama_model": "MODEL",
"llm_cfg.provider": "PROVIDER",
"llm_cfg.api_model": "MODEL",
"llm_cfg.api_key": "API KEY",
"llm_cfg.api_key_placeholder":"Leave empty to keep current",
"llm_cfg.base_url": "CUSTOM BASE URL (OpenRouter / proxy)",
"llm_cfg.timeout": "TIMEOUT (s)",
"llm_cfg.max_tokens_chat": "MAX TOKENS (chat)",
"llm_cfg.max_tokens_epd": "MAX TOKENS (EPD comment)",
"llm_cfg.api_key_set": "API key: set",
"llm_cfg.api_key_not_set": "API key: not set",
"llm_cfg.save_llm": "SAVE LLM CONFIG",
"llm_cfg.test_connection": "TEST CONNECTION",
"llm_cfg.enable_mcp": "Enable MCP Server",
"llm_cfg.transport": "TRANSPORT",
"llm_cfg.mcp_port": "PORT (HTTP SSE only)",
"llm_cfg.exposed_tools": "EXPOSED TOOLS",
"llm_cfg.mcp_running": "Server running on port",
"llm_cfg.mcp_stopped": "Server not running.",
"llm_cfg.save_mcp": "SAVE MCP CONFIG",
"llm_cfg.saved_llm": "LLM config saved.",
"llm_cfg.saved_mcp": "MCP config saved.",
"llm_cfg.mcp_enabled": "MCP server enabled.",
"llm_cfg.mcp_disabled": "MCP server disabled.",
"llm_cfg.testing": "Testing…",
"llm_cfg.test_failed": "Failed",
"llm_cfg.error": "Error",
"llm_cfg.save_error": "Save error"
}