Add LLM configuration and MCP server management UI and backend functionality

- Implemented a new SPA page for LLM Bridge and MCP Server settings in `llm-config.js`.
- Added functionality for managing LLM and MCP configurations, including toggling, saving settings, and testing connections.
- Created HTTP endpoints in `llm_utils.py` for handling LLM chat, status checks, and MCP server configuration.
- Integrated model fetching from LaRuche and Ollama backends.
- Enhanced error handling and logging for better debugging and user feedback.
This commit is contained in:
infinition
2026-03-16 20:33:22 +01:00
parent aac77a3e76
commit b759ab6d4b
41 changed files with 9991 additions and 397 deletions

View File

@@ -1249,5 +1249,66 @@
"loki.quick_placeholder": "Quick type text here...",
"loki.quick_send": "Type",
"loki.quick_sent": "Text sent to target",
"loki.quick_error": "Failed to send text"
"loki.quick_error": "Failed to send text",
"nav.llm_chat": "LLM Chat",
"nav.llm_config": "LLM & MCP",
"llm_chat.checking": "checking...",
"llm_chat.disabled": "LLM disabled",
"llm_chat.online": "online",
"llm_chat.unavailable": "unavailable",
"llm_chat.disabled_msg": "LLM is disabled. Enable it in",
"llm_chat.settings_link": "Settings → LLM Bridge",
"llm_chat.thinking": "Bjorn is thinking...",
"llm_chat.placeholder": "Ask Bjorn anything about the network...",
"llm_chat.send": "SEND",
"llm_chat.clear_history": "Clear history",
"llm_chat.orch_log": "Orch Log",
"llm_chat.orch_title": "View LLM Orchestrator reasoning log",
"llm_chat.back_chat": "← Back to chat",
"llm_chat.session_started": "Session started. Type a question or command.",
"llm_chat.history_cleared": "History cleared.",
"llm_chat.back_to_chat": "Back to chat. Type a question or command.",
"llm_chat.loading_log": "Loading LLM Orchestrator reasoning log…",
"llm_chat.no_log": "No reasoning log yet. Enable llm_orchestrator_log_reasoning in config and run a cycle.",
"llm_chat.log_header": "Orchestrator log",
"llm_chat.log_error": "Error loading reasoning log",
"llm_chat.error": "Error",
"llm_chat.net_error": "Network error",
"llm_cfg.enable_bridge": "Enable LLM Bridge",
"llm_cfg.epd_comments": "LLM comments on EPD display",
"llm_cfg.backend": "BACKEND PRIORITY",
"llm_cfg.laruche_discovery": "Auto-discover LaRuche nodes via mDNS",
"llm_cfg.laruche_url": "LARUCHE NODE URL (optional — overrides discovery)",
"llm_cfg.ollama_url": "OLLAMA URL",
"llm_cfg.ollama_model": "MODEL",
"llm_cfg.provider": "PROVIDER",
"llm_cfg.api_model": "MODEL",
"llm_cfg.api_key": "API KEY",
"llm_cfg.api_key_placeholder":"Leave empty to keep current",
"llm_cfg.base_url": "CUSTOM BASE URL (OpenRouter / proxy)",
"llm_cfg.timeout": "TIMEOUT (s)",
"llm_cfg.max_tokens_chat": "MAX TOKENS (chat)",
"llm_cfg.max_tokens_epd": "MAX TOKENS (EPD comment)",
"llm_cfg.api_key_set": "API key: set",
"llm_cfg.api_key_not_set": "API key: not set",
"llm_cfg.save_llm": "SAVE LLM CONFIG",
"llm_cfg.test_connection": "TEST CONNECTION",
"llm_cfg.enable_mcp": "Enable MCP Server",
"llm_cfg.transport": "TRANSPORT",
"llm_cfg.mcp_port": "PORT (HTTP SSE only)",
"llm_cfg.exposed_tools": "EXPOSED TOOLS",
"llm_cfg.mcp_running": "Server running on port",
"llm_cfg.mcp_stopped": "Server not running.",
"llm_cfg.save_mcp": "SAVE MCP CONFIG",
"llm_cfg.saved_llm": "LLM config saved.",
"llm_cfg.saved_mcp": "MCP config saved.",
"llm_cfg.mcp_enabled": "MCP server enabled.",
"llm_cfg.mcp_disabled": "MCP server disabled.",
"llm_cfg.testing": "Testing…",
"llm_cfg.test_failed": "Failed",
"llm_cfg.error": "Error",
"llm_cfg.save_error": "Save error"
}

View File

@@ -932,5 +932,66 @@
"loki.quick_placeholder": "Taper du texte ici...",
"loki.quick_send": "Taper",
"loki.quick_sent": "Texte envoyé à la cible",
"loki.quick_error": "Échec de l'envoi"
"loki.quick_error": "Échec de l'envoi",
"nav.llm_chat": "Chat LLM",
"nav.llm_config": "LLM & MCP",
"llm_chat.checking": "vérification...",
"llm_chat.disabled": "LLM désactivé",
"llm_chat.online": "en ligne",
"llm_chat.unavailable": "indisponible",
"llm_chat.disabled_msg": "Le LLM est désactivé. Activez-le dans",
"llm_chat.settings_link": "Paramètres → LLM Bridge",
"llm_chat.thinking": "Bjorn réfléchit...",
"llm_chat.placeholder": "Demandez à Bjorn n'importe quoi sur le réseau...",
"llm_chat.send": "ENVOYER",
"llm_chat.clear_history": "Effacer l'historique",
"llm_chat.orch_log": "Log Orch",
"llm_chat.orch_title": "Voir le log de raisonnement de l'orchestrateur LLM",
"llm_chat.back_chat": "← Retour au chat",
"llm_chat.session_started": "Session démarrée. Posez une question ou une commande.",
"llm_chat.history_cleared": "Historique effacé.",
"llm_chat.back_to_chat": "Retour au chat. Posez une question ou une commande.",
"llm_chat.loading_log": "Chargement du log de raisonnement…",
"llm_chat.no_log": "Aucun log de raisonnement. Activez llm_orchestrator_log_reasoning dans la config.",
"llm_chat.log_header": "Log orchestrateur",
"llm_chat.log_error": "Erreur lors du chargement du log",
"llm_chat.error": "Erreur",
"llm_chat.net_error": "Erreur réseau",
"llm_cfg.enable_bridge": "Activer le LLM Bridge",
"llm_cfg.epd_comments": "Commentaires LLM sur l'écran EPD",
"llm_cfg.backend": "PRIORITÉ BACKEND",
"llm_cfg.laruche_discovery": "Découverte automatique LaRuche via mDNS",
"llm_cfg.laruche_url": "URL NŒUD LARUCHE (optionnel — override découverte)",
"llm_cfg.ollama_url": "URL OLLAMA",
"llm_cfg.ollama_model": "MODÈLE",
"llm_cfg.provider": "FOURNISSEUR",
"llm_cfg.api_model": "MODÈLE",
"llm_cfg.api_key": "CLÉ API",
"llm_cfg.api_key_placeholder":"Laisser vide pour conserver la clé actuelle",
"llm_cfg.base_url": "URL DE BASE PERSONNALISÉE (OpenRouter / proxy)",
"llm_cfg.timeout": "TIMEOUT (s)",
"llm_cfg.max_tokens_chat": "TOKENS MAX (chat)",
"llm_cfg.max_tokens_epd": "TOKENS MAX (commentaire EPD)",
"llm_cfg.api_key_set": "Clé API : définie",
"llm_cfg.api_key_not_set": "Clé API : non définie",
"llm_cfg.save_llm": "SAUVEGARDER CONFIG LLM",
"llm_cfg.test_connection": "TESTER LA CONNEXION",
"llm_cfg.enable_mcp": "Activer le serveur MCP",
"llm_cfg.transport": "TRANSPORT",
"llm_cfg.mcp_port": "PORT (HTTP SSE uniquement)",
"llm_cfg.exposed_tools": "OUTILS EXPOSÉS",
"llm_cfg.mcp_running": "Serveur actif sur le port",
"llm_cfg.mcp_stopped": "Serveur arrêté.",
"llm_cfg.save_mcp": "SAUVEGARDER CONFIG MCP",
"llm_cfg.saved_llm": "Configuration LLM sauvegardée.",
"llm_cfg.saved_mcp": "Configuration MCP sauvegardée.",
"llm_cfg.mcp_enabled": "Serveur MCP activé.",
"llm_cfg.mcp_disabled": "Serveur MCP désactivé.",
"llm_cfg.testing": "Test en cours…",
"llm_cfg.test_failed": "Échec",
"llm_cfg.error": "Erreur",
"llm_cfg.save_error": "Erreur de sauvegarde"
}