Add LLM configuration and MCP server management UI and backend functionality

- Implemented a new SPA page for LLM Bridge and MCP Server settings in `llm-config.js`.
- Added functionality for managing LLM and MCP configurations, including toggling, saving settings, and testing connections.
- Created HTTP endpoints in `llm_utils.py` for handling LLM chat, status checks, and MCP server configuration.
- Integrated model fetching from LaRuche and Ollama backends.
- Enhanced error handling and logging for better debugging and user feedback.
This commit is contained in:
infinition
2026-03-16 20:33:22 +01:00
parent aac77a3e76
commit b759ab6d4b
41 changed files with 9991 additions and 397 deletions

View File

@@ -932,5 +932,66 @@
"loki.quick_placeholder": "Taper du texte ici...",
"loki.quick_send": "Taper",
"loki.quick_sent": "Texte envoyé à la cible",
"loki.quick_error": "Échec de l'envoi"
"loki.quick_error": "Échec de l'envoi",
"nav.llm_chat": "Chat LLM",
"nav.llm_config": "LLM & MCP",
"llm_chat.checking": "vérification...",
"llm_chat.disabled": "LLM désactivé",
"llm_chat.online": "en ligne",
"llm_chat.unavailable": "indisponible",
"llm_chat.disabled_msg": "Le LLM est désactivé. Activez-le dans",
"llm_chat.settings_link": "Paramètres → LLM Bridge",
"llm_chat.thinking": "Bjorn réfléchit...",
"llm_chat.placeholder": "Demandez à Bjorn n'importe quoi sur le réseau...",
"llm_chat.send": "ENVOYER",
"llm_chat.clear_history": "Effacer l'historique",
"llm_chat.orch_log": "Log Orch",
"llm_chat.orch_title": "Voir le log de raisonnement de l'orchestrateur LLM",
"llm_chat.back_chat": "← Retour au chat",
"llm_chat.session_started": "Session démarrée. Posez une question ou une commande.",
"llm_chat.history_cleared": "Historique effacé.",
"llm_chat.back_to_chat": "Retour au chat. Posez une question ou une commande.",
"llm_chat.loading_log": "Chargement du log de raisonnement…",
"llm_chat.no_log": "Aucun log de raisonnement. Activez llm_orchestrator_log_reasoning dans la config.",
"llm_chat.log_header": "Log orchestrateur",
"llm_chat.log_error": "Erreur lors du chargement du log",
"llm_chat.error": "Erreur",
"llm_chat.net_error": "Erreur réseau",
"llm_cfg.enable_bridge": "Activer le LLM Bridge",
"llm_cfg.epd_comments": "Commentaires LLM sur l'écran EPD",
"llm_cfg.backend": "PRIORITÉ BACKEND",
"llm_cfg.laruche_discovery": "Découverte automatique LaRuche via mDNS",
"llm_cfg.laruche_url": "URL NŒUD LARUCHE (optionnel — override découverte)",
"llm_cfg.ollama_url": "URL OLLAMA",
"llm_cfg.ollama_model": "MODÈLE",
"llm_cfg.provider": "FOURNISSEUR",
"llm_cfg.api_model": "MODÈLE",
"llm_cfg.api_key": "CLÉ API",
"llm_cfg.api_key_placeholder":"Laisser vide pour conserver la clé actuelle",
"llm_cfg.base_url": "URL DE BASE PERSONNALISÉE (OpenRouter / proxy)",
"llm_cfg.timeout": "TIMEOUT (s)",
"llm_cfg.max_tokens_chat": "TOKENS MAX (chat)",
"llm_cfg.max_tokens_epd": "TOKENS MAX (commentaire EPD)",
"llm_cfg.api_key_set": "Clé API : définie",
"llm_cfg.api_key_not_set": "Clé API : non définie",
"llm_cfg.save_llm": "SAUVEGARDER CONFIG LLM",
"llm_cfg.test_connection": "TESTER LA CONNEXION",
"llm_cfg.enable_mcp": "Activer le serveur MCP",
"llm_cfg.transport": "TRANSPORT",
"llm_cfg.mcp_port": "PORT (HTTP SSE uniquement)",
"llm_cfg.exposed_tools": "OUTILS EXPOSÉS",
"llm_cfg.mcp_running": "Serveur actif sur le port",
"llm_cfg.mcp_stopped": "Serveur arrêté.",
"llm_cfg.save_mcp": "SAUVEGARDER CONFIG MCP",
"llm_cfg.saved_llm": "Configuration LLM sauvegardée.",
"llm_cfg.saved_mcp": "Configuration MCP sauvegardée.",
"llm_cfg.mcp_enabled": "Serveur MCP activé.",
"llm_cfg.mcp_disabled": "Serveur MCP désactivé.",
"llm_cfg.testing": "Test en cours…",
"llm_cfg.test_failed": "Échec",
"llm_cfg.error": "Erreur",
"llm_cfg.save_error": "Erreur de sauvegarde"
}