diff --git a/Bjorn.py b/Bjorn.py index bac559e..236708f 100644 --- a/Bjorn.py +++ b/Bjorn.py @@ -586,6 +586,25 @@ if __name__ == "__main__": except Exception as e: logger.warning("Loki init skipped: %s", e) + # LLM Bridge — warm up singleton (starts LaRuche mDNS discovery if enabled) + try: + from llm_bridge import LLMBridge + LLMBridge() # Initialise singleton, kicks off background discovery + logger.info("LLM Bridge initialised") + except Exception as e: + logger.warning("LLM Bridge init skipped: %s", e) + + # MCP Server — start if enabled in config + try: + import mcp_server + if shared_data.config.get("mcp_enabled", False): + mcp_server.start() + logger.info("MCP server started") + else: + logger.info("MCP server loaded (disabled — enable via Settings)") + except Exception as e: + logger.warning("MCP server init skipped: %s", e) + # Signal Handlers exit_handler = lambda s, f: handle_exit( s, diff --git a/LLM_MCP_ARCHITECTURE.md b/LLM_MCP_ARCHITECTURE.md new file mode 100644 index 0000000..fb4acd7 --- /dev/null +++ b/LLM_MCP_ARCHITECTURE.md @@ -0,0 +1,916 @@ +# BJORN — LLM Bridge, MCP Server & LLM Orchestrator +## Complete architecture, operation, commands, fallbacks + +--- + +## Table of contents + +1. [Overview](#1-overview) +2. [Created / modified files](#2-created--modified-files) +3. [LLM Bridge (`llm_bridge.py`)](#3-llm-bridge-llm_bridgepy) +4. [MCP Server (`mcp_server.py`)](#4-mcp-server-mcp_serverpy) +5. [LLM Orchestrator (`llm_orchestrator.py`)](#5-llm-orchestrator-llm_orchestratorpy) +6. [Orchestrator & Scheduler integration](#6-orchestrator--scheduler-integration) +7. [Web Utils LLM (`web_utils/llm_utils.py`)](#7-web-utils-llm-web_utilsllm_utilspy) +8. [EPD comment integration (`comment.py`)](#8-epd-comment-integration-commentpy) +9. [Configuration (`shared.py`)](#9-configuration-sharedpy) +10. [HTTP Routes (`webapp.py`)](#10-http-routes-webapppy) +11. [Web interfaces](#11-web-interfaces) +12. [Startup (`Bjorn.py`)](#12-startup-bjornpy) +13. [LaRuche / LAND Protocol compatibility](#13-laruche--land-protocol-compatibility) +14. [Optional dependencies](#14-optional-dependencies) +15. [Quick activation & configuration](#15-quick-activation--configuration) +16. [Complete API endpoint reference](#16-complete-api-endpoint-reference) +17. [Queue priority system](#17-queue-priority-system) +18. [Fallbacks & graceful degradation](#18-fallbacks--graceful-degradation) +19. [Call sequences](#19-call-sequences) + +--- + +## 1. Overview + +``` +┌─────────────────────────────────────────────────────────────────────┐ +│ BJORN (RPi) │ +│ │ +│ ┌─────────────┐ ┌──────────────────┐ ┌─────────────────────┐ │ +│ │ Core BJORN │ │ MCP Server │ │ Web UI │ │ +│ │ (unchanged) │ │ (mcp_server.py) │ │ /chat.html │ │ +│ │ │ │ 7 exposed tools │ │ /mcp-config.html │ │ +│ │ comment.py │ │ HTTP SSE / stdio │ │ ↳ Orch Log button │ │ +│ │ ↕ LLM hook │ │ │ │ │ │ +│ └──────┬──────┘ └────────┬─────────┘ └──────────┬──────────┘ │ +│ └─────────────────────────────────────────────┘ │ +│ │ │ +│ ┌──────────────────────────▼─────────────────────────────────┐ │ +│ │ LLM Bridge (llm_bridge.py) │ │ +│ │ Singleton · Thread-safe │ │ +│ │ │ │ +│ │ Automatic cascade: │ │ +│ │ 1. LaRuche node (LAND/mDNS → HTTP POST /infer) │ │ +│ │ 2. Local Ollama (HTTP POST /api/chat) │ │ +│ │ 3. External API (Anthropic / OpenAI / OpenRouter) │ │ +│ │ 4. None (→ fallback templates in comment.py) │ │ +│ │ │ │ +│ │ Agentic tool-calling loop (stop_reason=tool_use, ≤6 turns) │ │ +│ │ _BJORN_TOOLS: 7 tools in Anthropic format │ │ +│ └─────────────────────────────────────────────────────────────┘ │ +│ │ │ +│ ┌──────────────────────────▼─────────────────────────────────┐ │ +│ │ LLM Orchestrator (llm_orchestrator.py) │ │ +│ │ │ │ +│ │ mode = none → LLM has no role in scheduling │ │ +│ │ mode = advisor → LLM suggests 1 action/cycle (prio 85) │ │ +│ │ mode = autonomous→ own thread, loop + tools (prio 82) │ │ +│ │ │ │ +│ │ Fingerprint (hosts↑, vulns↑, creds↑, queue_id↑) │ │ +│ │ → skip LLM if nothing new (token savings) │ │ +│ └─────────────────────────────────────────────────────────────┘ │ +│ │ │ +│ ┌──────────────────────────▼─────────────────────────────────┐ │ +│ │ Action Queue (SQLite) │ │ +│ │ scheduler=40 normal=50 MCP=80 autonomous=82 advisor=85│ │ +│ └─────────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────┘ + ↕ mDNS _ai-inference._tcp.local. (zeroconf) +┌──────────────────────────────────────────┐ +│ LaRuche Swarm (LAN) │ +│ Node A → Mistral 7B :8419 │ +│ Node B → DeepSeek Coder :8419 │ +│ Node C → Phi-3 Mini :8419 │ +└──────────────────────────────────────────┘ +``` + +**Design principles:** +- Everything is **disabled by default** — zero impact if not configured +- All dependencies are **optional** — silent import if missing +- **Systematic fallback** at every level — Bjorn never crashes because of the LLM +- The bridge is a **singleton** — one instance per process, thread-safe +- EPD comments preserve their **exact original behaviour** if LLM is disabled +- The LLM is the **brain** (decides what to do), the orchestrator is the **arms** (executes) + +--- + +## 2. Created / modified files + +### Created files + +| File | Approx. size | Role | +|------|-------------|------| +| `llm_bridge.py` | ~450 lines | LLM Singleton — backend cascade + agentic tool-calling loop | +| `mcp_server.py` | ~280 lines | FastMCP MCP Server — 7 Bjorn tools | +| `web_utils/llm_utils.py` | ~220 lines | LLM/MCP HTTP endpoints (web_utils pattern) | +| `llm_orchestrator.py` | ~410 lines | LLM Orchestrator — advisor & autonomous modes | +| `web/chat.html` | ~300 lines | Chat interface + Orch Log button | +| `web/mcp-config.html` | ~400 lines | LLM & MCP configuration page | + +### Modified files + +| File | What changed | +|------|-------------| +| `shared.py` | +45 config keys (LLM bridge, MCP, orchestrator) | +| `comment.py` | LLM hook in `get_comment()` — 12 lines added | +| `utils.py` | +1 entry in lazy WebUtils registry: `"llm_utils"` | +| `webapp.py` | +9 GET/POST routes in `_register_routes_once()` | +| `Bjorn.py` | LLM Bridge warm-up + conditional MCP server start | +| `orchestrator.py` | +`LLMOrchestrator` lifecycle + advisor call in background tasks | +| `action_scheduler.py` | +skip scheduler if LLM autonomous only (`llm_orchestrator_skip_scheduler`) | +| `requirements.txt` | +3 comment lines (optional dependencies documented) | + +--- + +## 3. LLM Bridge (`llm_bridge.py`) + +### Internal architecture + +``` +LLMBridge (Singleton) +├── __init__() Initialises singleton, launches LaRuche discovery +├── complete() Main API — cascades all backends +│ └── tools=None/[...] Optional param to enable tool-calling +├── generate_comment() Generates a short EPD comment (≤80 tokens) +├── chat() Stateful chat with per-session history +│ └── tools=_BJORN_TOOLS if llm_chat_tools_enabled=True +├── clear_history() Clears a session's history +├── status() Returns bridge state (for the UI) +│ +├── _start_laruche_discovery() Starts mDNS thread in background +├── _discover_laruche_mdns() Listens to _ai-inference._tcp.local. continuously +│ +├── _call_laruche() Backend 1 — POST http://[node]:8419/infer +├── _call_ollama() Backend 2 — POST http://localhost:11434/api/chat +├── _call_anthropic() Backend 3a — POST api.anthropic.com + AGENTIC LOOP +│ └── loop ≤6 turns: send → tool_use → execute → feed result → repeat +├── _call_openai_compat() Backend 3b — POST [base_url]/v1/chat/completions +│ +├── _execute_tool(name, inputs) Dispatches to mcp_server._impl_* +│ └── gate: checks mcp_allowed_tools before executing +│ +└── _build_system_prompt() Builds system prompt with live Bjorn context + +_BJORN_TOOLS : List[Dict] Anthropic-format definitions for the 7 MCP tools +``` + +### _BJORN_TOOLS — full list + +```python +_BJORN_TOOLS = [ + {"name": "get_hosts", "description": "...", "input_schema": {...}}, + {"name": "get_vulnerabilities", ...}, + {"name": "get_credentials", ...}, + {"name": "get_action_history", ...}, + {"name": "get_status", ...}, + {"name": "run_action", ...}, # gated by mcp_allowed_tools + {"name": "query_db", ...}, # SELECT only +] +``` + +### Backend cascade + +``` +llm_backend = "auto" → LaRuche → Ollama → API → None +llm_backend = "laruche" → LaRuche only +llm_backend = "ollama" → Ollama only +llm_backend = "api" → External API only +``` + +At each step, if a backend fails (timeout, network error, missing model), the next one is tried **silently**. If all fail, `complete()` returns `None`. + +### Agentic tool-calling loop (`_call_anthropic`) + +When `tools` is passed to `complete()`, the Anthropic backend enters agentic mode: + +``` +_call_anthropic(messages, system, tools, max_tokens, timeout) + │ + ├─ POST /v1/messages {tools: [...]} + │ + ├─ [stop_reason = "tool_use"] + │ for each tool_use block: + │ result = _execute_tool(name, inputs) + │ append {role: "tool", tool_use_id: ..., content: result} + │ POST /v1/messages [messages + tool results] ← next turn + │ + └─ [stop_reason = "end_turn"] → returns final text + [≥6 turns] → returns partial text + warning +``` + +`_execute_tool()` dispatches directly to `mcp_server._impl_*` (no network), checking `mcp_allowed_tools` for `run_action`. + +### Tool-calling in chat (`chat()`) + +If `llm_chat_tools_enabled = True`, the chat passes `tools=_BJORN_TOOLS` to the backend, letting the LLM answer with real-time data (hosts, vulns, creds…) rather than relying only on its training knowledge. + +### Chat history + +- Each session has its own history (key = `session_id`) +- Special session `"llm_orchestrator"`: contains the autonomous orchestrator's reasoning +- Max size configurable: `llm_chat_history_size` (default: 20 messages) +- History is **in-memory only** — not persisted across restarts +- Thread-safe via `_hist_lock` + +--- + +## 4. MCP Server (`mcp_server.py`) + +### What is MCP? + +The **Model Context Protocol** (Anthropic) is an open-source protocol that lets AI agents (Claude Desktop, custom agents, etc.) use external tools via a standardised interface. + +By enabling Bjorn's MCP server, **any MCP client can query and control Bjorn** — without knowing the internal DB structure. + +### Exposed tools + +| Tool | Arguments | Description | +|------|-----------|-------------| +| `get_hosts` | `alive_only: bool = True` | Returns discovered hosts (IP, MAC, hostname, OS, ports) | +| `get_vulnerabilities` | `host_ip: str = ""`, `limit: int = 100` | Returns discovered CVE vulnerabilities | +| `get_credentials` | `service: str = ""`, `limit: int = 100` | Returns captured credentials (SSH, FTP, SMB…) | +| `get_action_history` | `limit: int = 50`, `action_name: str = ""` | History of executed actions | +| `get_status` | *(none)* | Real-time state: mode, active action, counters | +| `run_action` | `action_name: str`, `target_ip: str`, `target_mac: str = ""` | Queues a Bjorn action (MCP priority = 80) | +| `query_db` | `sql: str`, `params: str = "[]"` | Free SELECT against the SQLite DB (read-only) | + +**Security:** each tool checks `mcp_allowed_tools` — unlisted tools return a clean error. `query_db` rejects anything that is not a `SELECT`. + +### `_impl_run_action` — priority detail + +```python +_MCP_PRIORITY = 80 # > scheduler(40) > normal(50) + +sd.db.queue_action( + action_name=action_name, + mac=mac, # resolved from hosts WHERE ip=? if not supplied + ip=target_ip, + priority=_MCP_PRIORITY, + trigger="mcp", + metadata={"decision_method": "mcp", "decision_origin": "mcp"}, +) +sd.queue_event.set() # wakes the orchestrator immediately +``` + +### Available transports + +| Transport | Config | Usage | +|-----------|--------|-------| +| `http` (default) | `mcp_transport: "http"`, `mcp_port: 8765` | Accessible from any MCP client on LAN via SSE | +| `stdio` | `mcp_transport: "stdio"` | Claude Desktop, CLI agents | + +--- + +## 5. LLM Orchestrator (`llm_orchestrator.py`) + +The LLM Orchestrator transforms Bjorn from a scriptable tool into an autonomous agent. It is **completely optional and disableable** via `llm_orchestrator_mode = "none"`. + +### Operating modes + +| Mode | Config value | Operation | +|------|-------------|-----------| +| Disabled | `"none"` (default) | LLM plays no role in planning | +| Advisor | `"advisor"` | LLM consulted periodically, suggests 1 action | +| Autonomous | `"autonomous"` | Own thread, LLM observes + plans with tools | + +### Internal architecture + +``` +LLMOrchestrator +├── start() Starts autonomous thread if mode=autonomous +├── stop() Stops thread (join 15s max) +├── restart_if_mode_changed() Called from orchestrator.run() each iteration +├── is_active() True if autonomous thread is alive +│ +├── [ADVISOR MODE] +│ advise() → called from orchestrator._process_background_tasks() +│ ├── _build_snapshot() → compact dict (hosts, vulns, creds, queue) +│ ├── LLMBridge().complete(prompt, system) +│ └── _apply_advisor_response(raw, allowed) +│ ├── parse JSON {"action": str, "target_ip": str, "reason": str} +│ ├── validate action ∈ allowed +│ └── db.queue_action(priority=85, trigger="llm_advisor") +│ +└── [AUTONOMOUS MODE] + _autonomous_loop() Thread "LLMOrchestrator" (daemon) + └── loop: + _compute_fingerprint() → (hosts, vulns, creds, max_queue_id) + _has_actionable_change() → skip if nothing increased + _run_autonomous_cycle() + ├── filter tools: read-only always + run_action if in allowed + ├── LLMBridge().complete(prompt, system, tools=[...]) + │ └── _call_anthropic() agentic loop + │ → LLM calls run_action via tools + │ → _execute_tool → _impl_run_action → queue + └── if llm_orchestrator_log_reasoning=True: + logger.info("[LLM_ORCH_REASONING]...") + _push_to_chat() → "llm_orchestrator" session in LLMBridge + sleep(llm_orchestrator_interval_s) +``` + +### Fingerprint and smart skip + +```python +def _compute_fingerprint(self) -> tuple: + # (host_count, vuln_count, cred_count, max_completed_queue_id) + return (hosts, vulns, creds, last_id) + +def _has_actionable_change(self, fp: tuple) -> bool: + if self._last_fingerprint is None: + return True # first cycle always runs + # Triggers ONLY if something INCREASED + # hosts going offline → not actionable + return any(fp[i] > self._last_fingerprint[i] for i in range(len(fp))) +``` + +**Token savings:** if `llm_orchestrator_skip_if_no_change = True` (default), the LLM cycle is skipped if no new hosts/vulns/creds and no action completed since the last cycle. + +### LLM priorities vs queue + +```python +_ADVISOR_PRIORITY = 85 # advisor > MCP(80) > normal(50) > scheduler(40) +_AUTONOMOUS_PRIORITY = 82 # autonomous slightly below advisor +``` + +### Autonomous system prompt — example + +``` +"You are Bjorn's autonomous orchestrator, running on a Raspberry Pi network security tool. +Current state: 12 hosts discovered, 3 vulnerabilities, 1 credentials. +Operation mode: ATTACK. Hard limit: at most 3 run_action calls per cycle. +Only these action names may be queued: NmapScan, SSHBruteforce, SMBScan. +Strategy: prioritise unexplored services, hosts with high port counts, and hosts with no recent scans. +Do not queue duplicate actions already pending or recently successful. +Use Norse references occasionally. Be terse and tactical." +``` + +### Advisor response format + +```json +// Action recommended: +{"action": "NmapScan", "target_ip": "192.168.1.42", "reason": "unexplored host, 0 open ports known"} + +// Nothing to do: +{"action": null} +``` + +### Reasoning log + +When `llm_orchestrator_log_reasoning = True`: +- Full reasoning is logged via `logger.info("[LLM_ORCH_REASONING]...")` +- It is also injected into the `"llm_orchestrator"` session in `LLMBridge._chat_histories` +- Viewable in real time in `chat.html` via the **Orch Log** button + +--- + +## 6. Orchestrator & Scheduler integration + +### `orchestrator.py` + +```python +# __init__ +self.llm_orchestrator = None +self._init_llm_orchestrator() + +# _init_llm_orchestrator() +if shared_data.config.get("llm_enabled") and shared_data.config.get("llm_orchestrator_mode") != "none": + from llm_orchestrator import LLMOrchestrator + self.llm_orchestrator = LLMOrchestrator(shared_data) + self.llm_orchestrator.start() + +# run() — each iteration +self._sync_llm_orchestrator() # starts/stops thread according to runtime config + +# _process_background_tasks() +if self.llm_orchestrator and mode == "advisor": + self.llm_orchestrator.advise() +``` + +### `action_scheduler.py` — skip option + +```python +# In run(), each iteration: +_llm_skip = bool( + shared_data.config.get("llm_orchestrator_skip_scheduler", False) + and shared_data.config.get("llm_orchestrator_mode") == "autonomous" + and shared_data.config.get("llm_enabled", False) +) + +if not _llm_skip: + self._publish_all_upcoming() # step 2: publish due actions + self._evaluate_global_actions() # step 3: global evaluation + self.evaluate_all_triggers() # step 4: per-host triggers +# Steps 1 (promote due) and 5 (cleanup/priorities) always run +``` + +When `llm_orchestrator_skip_scheduler = True` + `mode = autonomous` + `llm_enabled = True`: +- The scheduler no longer publishes automatic actions (no more `B_require`, `B_trigger`, etc.) +- The autonomous LLM becomes **sole master of the queue** +- Queue hygiene (promotions, cleanup) remains active + +--- + +## 7. Web Utils LLM (`web_utils/llm_utils.py`) + +Follows the exact **same pattern** as all other `web_utils` (constructor `__init__(self, shared_data)`, methods called by `webapp.py`). + +### Methods + +| Method | Type | Description | +|--------|------|-------------| +| `get_llm_status(handler)` | GET | LLM bridge state (active backend, LaRuche URL…) | +| `get_llm_config(handler)` | GET | Current LLM config (api_key masked) | +| `get_llm_reasoning(handler)` | GET | `llm_orchestrator` session history (reasoning log) | +| `handle_chat(data)` | POST | Sends a message, returns LLM response | +| `clear_chat_history(data)` | POST | Clears a session's history | +| `get_mcp_status(handler)` | GET | MCP server state (running, port, transport) | +| `toggle_mcp(data)` | POST | Enables/disables MCP server + saves config | +| `save_mcp_config(data)` | POST | Saves MCP config (tools, port, transport) | +| `save_llm_config(data)` | POST | Saves LLM config (all parameters) | + +--- + +## 8. EPD comment integration (`comment.py`) + +### Behaviour before modification + +``` +get_comment(status, lang, params) + └── if delay elapsed OR status changed + └── _pick_text(status, lang, params) ← SQLite DB + └── returns weighted text +``` + +### Behaviour after modification + +``` +get_comment(status, lang, params) + └── if delay elapsed OR status changed + │ + ├── [if llm_comments_enabled = True] + │ └── LLMBridge().generate_comment(status, params) + │ ├── success → LLM text (≤12 words, ~8s max) + │ └── failure/timeout → text = None + │ + └── [if text = None] ← SYSTEMATIC FALLBACK + └── _pick_text(status, lang, params) ← original behaviour + └── returns weighted DB text +``` + +**Original behaviour preserved 100% if LLM disabled or failing.** + +--- + +## 9. Configuration (`shared.py`) + +### LLM Bridge section (`__title_llm__`) + +| Key | Default | Type | Description | +|-----|---------|------|-------------| +| `llm_enabled` | `False` | bool | **Master toggle** — activates the entire bridge | +| `llm_comments_enabled` | `False` | bool | Use LLM for EPD comments | +| `llm_chat_enabled` | `True` | bool | Enable /chat.html interface | +| `llm_chat_tools_enabled` | `False` | bool | Enable tool-calling in web chat | +| `llm_backend` | `"auto"` | str | `auto` \| `laruche` \| `ollama` \| `api` | +| `llm_laruche_discovery` | `True` | bool | Auto-discover LaRuche nodes via mDNS | +| `llm_laruche_url` | `""` | str | Manual LaRuche URL (overrides discovery) | +| `llm_ollama_url` | `"http://127.0.0.1:11434"` | str | Local Ollama URL | +| `llm_ollama_model` | `"phi3:mini"` | str | Ollama model to use | +| `llm_api_provider` | `"anthropic"` | str | `anthropic` \| `openai` \| `openrouter` | +| `llm_api_key` | `""` | str | API key (masked in UI) | +| `llm_api_model` | `"claude-haiku-4-5-20251001"` | str | External API model | +| `llm_api_base_url` | `""` | str | Custom base URL (OpenRouter, proxy…) | +| `llm_timeout_s` | `30` | int | Global LLM call timeout (seconds) | +| `llm_max_tokens` | `500` | int | Max tokens for chat | +| `llm_comment_max_tokens` | `80` | int | Max tokens for EPD comments | +| `llm_chat_history_size` | `20` | int | Max messages per chat session | + +### MCP Server section (`__title_mcp__`) + +| Key | Default | Type | Description | +|-----|---------|------|-------------| +| `mcp_enabled` | `False` | bool | Enable MCP server | +| `mcp_transport` | `"http"` | str | `http` (SSE) \| `stdio` | +| `mcp_port` | `8765` | int | HTTP SSE port | +| `mcp_allowed_tools` | `[all]` | list | List of authorised MCP tools | + +### LLM Orchestrator section (`__title_llm_orch__`) + +| Key | Default | Type | Description | +|-----|---------|------|-------------| +| `llm_orchestrator_mode` | `"none"` | str | `none` \| `advisor` \| `autonomous` | +| `llm_orchestrator_interval_s` | `60` | int | Delay between autonomous cycles (min 30s) | +| `llm_orchestrator_max_actions` | `3` | int | Max actions per autonomous cycle | +| `llm_orchestrator_allowed_actions` | `[]` | list | Actions the LLM may queue (empty = mcp_allowed_tools) | +| `llm_orchestrator_skip_scheduler` | `False` | bool | Disable scheduler when autonomous is active | +| `llm_orchestrator_skip_if_no_change` | `True` | bool | Skip cycle if fingerprint unchanged | +| `llm_orchestrator_log_reasoning` | `False` | bool | Log full LLM reasoning | + +--- + +## 10. HTTP Routes (`webapp.py`) + +### GET routes + +| Route | Handler | Description | +|-------|---------|-------------| +| `GET /api/llm/status` | `llm_utils.get_llm_status` | LLM bridge state | +| `GET /api/llm/config` | `llm_utils.get_llm_config` | LLM config (api_key masked) | +| `GET /api/llm/reasoning` | `llm_utils.get_llm_reasoning` | Orchestrator reasoning log | +| `GET /api/mcp/status` | `llm_utils.get_mcp_status` | MCP server state | + +### POST routes (JSON data-only) + +| Route | Handler | Description | +|-------|---------|-------------| +| `POST /api/llm/chat` | `llm_utils.handle_chat` | Send a message to the LLM | +| `POST /api/llm/clear_history` | `llm_utils.clear_chat_history` | Clear a session's history | +| `POST /api/llm/config` | `llm_utils.save_llm_config` | Save LLM config | +| `POST /api/mcp/toggle` | `llm_utils.toggle_mcp` | Enable/disable MCP | +| `POST /api/mcp/config` | `llm_utils.save_mcp_config` | Save MCP config | + +All routes respect Bjorn's existing authentication (`webauth`). + +--- + +## 11. Web interfaces + +### `/chat.html` + +Terminal-style chat interface (black/red, consistent with Bjorn). + +**Features:** +- Auto-detects LLM state on load (`GET /api/llm/status`) +- Displays active backend (LaRuche URL, or mode) +- "Bjorn is thinking..." indicator during response +- Unique session ID per browser tab +- `Enter` = send, `Shift+Enter` = new line +- Textarea auto-resize +- **"Clear history"** button — clears server-side session +- **"Orch Log"** button — loads the autonomous orchestrator's reasoning + - Calls `GET /api/llm/reasoning` + - Renders each message (cycle prompt + LLM response) as chat bubbles + - "← Back to chat" to return to normal chat + - Helper message if log is empty (hint: enable `llm_orchestrator_log_reasoning`) + +**Access:** `http://[bjorn-ip]:8000/chat.html` + +### `/mcp-config.html` + +Full LLM & MCP configuration page. + +**LLM Bridge section:** +- Master enable/disable toggle +- EPD comments, chat, chat tool-calling toggles +- Backend selector (auto / laruche / ollama / api) +- LaRuche mDNS discovery toggle + manual URL +- Ollama configuration (URL + model) +- External API configuration (provider, key, model, custom URL) +- Timeout and token parameters +- "TEST CONNECTION" button + +**MCP Server section:** +- Enable toggle with live start/stop +- Transport selector (HTTP SSE / stdio) +- HTTP port +- Per-tool checkboxes +- "RUNNING" / "OFF" indicator + +**Access:** `http://[bjorn-ip]:8000/mcp-config.html` + +--- + +## 12. Startup (`Bjorn.py`) + +```python +# LLM Bridge — warm up singleton +try: + from llm_bridge import LLMBridge + LLMBridge() # Starts mDNS discovery if llm_laruche_discovery=True + logger.info("LLM Bridge initialised") +except Exception as e: + logger.warning("LLM Bridge init skipped: %s", e) + +# MCP Server +try: + import mcp_server + if shared_data.config.get("mcp_enabled", False): + mcp_server.start() # Daemon thread "MCPServer" + logger.info("MCP server started") + else: + logger.info("MCP server loaded (disabled)") +except Exception as e: + logger.warning("MCP server init skipped: %s", e) +``` + +The LLM Orchestrator is initialised inside `orchestrator.py` (not `Bjorn.py`), since it depends on the orchestrator loop cycle. + +--- + +## 13. LaRuche / LAND Protocol compatibility + +### LAND Protocol + +LAND (Local AI Network Discovery) is the LaRuche protocol: +- **Discovery:** mDNS service type `_ai-inference._tcp.local.` +- **Inference:** `POST http://[node]:8419/infer` + +### What Bjorn implements on the Python side + +```python +# mDNS listening (zeroconf) +from zeroconf import Zeroconf, ServiceBrowser +ServiceBrowser(zc, "_ai-inference._tcp.local.", listener) +# → Auto-detects LaRuche nodes + +# Inference call (urllib stdlib, zero dependency) +payload = {"prompt": "...", "capability": "llm", "max_tokens": 500} +urllib.request.urlopen(f"{url}/infer", data=json.dumps(payload)) +``` + +### Scenarios + +| Scenario | Behaviour | +|----------|-----------| +| LaRuche node detected on LAN | Used automatically as priority backend | +| Multiple LaRuche nodes | First discovered is used | +| Manual URL configured | Used directly, discovery ignored | +| LaRuche node absent | Cascades to Ollama or external API | +| `zeroconf` not installed | Discovery silently disabled, DEBUG log | + +--- + +## 14. Optional dependencies + +| Package | Min version | Feature unlocked | Install command | +|---------|------------|------------------|----------------| +| `mcp[cli]` | ≥ 1.0.0 | Full MCP server | `pip install "mcp[cli]"` | +| `zeroconf` | ≥ 0.131.0 | LaRuche mDNS discovery | `pip install zeroconf` | + +**No new dependencies** added for LLM backends: +- **LaRuche / Ollama**: uses `urllib.request` (Python stdlib) +- **Anthropic / OpenAI**: REST API via `urllib` — no SDK needed + +--- + +## 15. Quick activation & configuration + +### Basic LLM chat + +```bash +curl -X POST http://[bjorn-ip]:8000/api/llm/config \ + -H "Content-Type: application/json" \ + -d '{"llm_enabled": true, "llm_backend": "ollama", "llm_ollama_model": "phi3:mini"}' +# → http://[bjorn-ip]:8000/chat.html +``` + +### Chat with tool-calling (LLM accesses live network data) + +```bash +curl -X POST http://[bjorn-ip]:8000/api/llm/config \ + -d '{"llm_enabled": true, "llm_chat_tools_enabled": true}' +``` + +### LLM Orchestrator — advisor mode + +```bash +curl -X POST http://[bjorn-ip]:8000/api/llm/config \ + -d '{ + "llm_enabled": true, + "llm_orchestrator_mode": "advisor", + "llm_orchestrator_allowed_actions": ["NmapScan", "SSHBruteforce"] + }' +``` + +### LLM Orchestrator — autonomous mode (LLM as sole planner) + +```bash +curl -X POST http://[bjorn-ip]:8000/api/llm/config \ + -d '{ + "llm_enabled": true, + "llm_orchestrator_mode": "autonomous", + "llm_orchestrator_skip_scheduler": true, + "llm_orchestrator_max_actions": 5, + "llm_orchestrator_interval_s": 120, + "llm_orchestrator_allowed_actions": ["NmapScan", "SSHBruteforce", "SMBScan"], + "llm_orchestrator_log_reasoning": true + }' +# → View reasoning: http://[bjorn-ip]:8000/chat.html → Orch Log button +``` + +### With Anthropic API + +```bash +curl -X POST http://[bjorn-ip]:8000/api/llm/config \ + -d '{ + "llm_enabled": true, + "llm_backend": "api", + "llm_api_provider": "anthropic", + "llm_api_key": "sk-ant-...", + "llm_api_model": "claude-haiku-4-5-20251001" + }' +``` + +### With OpenRouter (access to all models) + +```bash +curl -X POST http://[bjorn-ip]:8000/api/llm/config \ + -d '{ + "llm_enabled": true, + "llm_backend": "api", + "llm_api_provider": "openrouter", + "llm_api_key": "sk-or-...", + "llm_api_model": "meta-llama/llama-3.2-3b-instruct", + "llm_api_base_url": "https://openrouter.ai/api" + }' +``` + +### Model recommendations by scenario + +| Scenario | Backend | Recommended model | Pi RAM | +|----------|---------|-------------------|--------| +| Autonomous orchestrator + LaRuche on LAN | laruche | Mistral/Phi on the node | 0 (remote inference) | +| Autonomous orchestrator offline | ollama | `qwen2.5:3b` | ~3 GB | +| Autonomous orchestrator cloud | api | `claude-haiku-4-5-20251001` | 0 | +| Chat + tools | ollama | `phi3:mini` | ~2 GB | +| EPD comments only | ollama | `smollm2:360m` | ~400 MB | + +--- + +## 16. Complete API endpoint reference + +### GET + +``` +GET /api/llm/status +→ {"enabled": bool, "backend": str, "laruche_url": str|null, + "laruche_discovery": bool, "ollama_url": str, "ollama_model": str, + "api_provider": str, "api_model": str, "api_key_set": bool} + +GET /api/llm/config +→ {all llm_* keys except api_key, + "llm_api_key_set": bool} + +GET /api/llm/reasoning +→ {"status": "ok", "messages": [{"role": str, "content": str}, ...], "count": int} +→ {"status": "error", "message": str, "messages": [], "count": 0} + +GET /api/mcp/status +→ {"enabled": bool, "running": bool, "transport": str, + "port": int, "allowed_tools": [str]} +``` + +### POST + +``` +POST /api/llm/chat +Body: {"message": str, "session_id": str?} +→ {"status": "ok", "response": str, "session_id": str} +→ {"status": "error", "message": str} + +POST /api/llm/clear_history +Body: {"session_id": str?} +→ {"status": "ok"} + +POST /api/llm/config +Body: {any subset of llm_* and llm_orchestrator_* keys} +→ {"status": "ok"} +→ {"status": "error", "message": str} + +POST /api/mcp/toggle +Body: {"enabled": bool} +→ {"status": "ok", "enabled": bool, "started": bool?} + +POST /api/mcp/config +Body: {"allowed_tools": [str]?, "port": int?, "transport": str?} +→ {"status": "ok", "config": {...}} +``` + +--- + +## 17. Queue priority system + +``` +Priority Source Trigger +────────────────────────────────────────────────────────────── + 85 LLM Advisor llm_orchestrator.advise() + 82 LLM Autonomous _run_autonomous_cycle() via run_action tool + 80 External MCP _impl_run_action() via MCP client or chat + 50 Normal / manual queue_action() without explicit priority + 40 Scheduler action_scheduler evaluates triggers +``` + +The scheduler always processes the highest-priority pending item first. LLM and MCP actions therefore preempt scheduler actions. + +--- + +## 18. Fallbacks & graceful degradation + +| Condition | Behaviour | +|-----------|-----------| +| `llm_enabled = False` | `complete()` returns `None` immediately — zero overhead | +| `llm_orchestrator_mode = "none"` | LLMOrchestrator not instantiated | +| `mcp` not installed | `_build_mcp_server()` returns `None`, WARNING log | +| `zeroconf` not installed | LaRuche discovery silently disabled, DEBUG log | +| LaRuche node timeout | Exception caught, cascade to next backend | +| Ollama not running | `URLError` caught, cascade to API | +| API key missing | `_call_api()` returns `None`, cascade | +| All backends fail | `complete()` returns `None` | +| LLM returns `None` for EPD | `comment.py` uses `_pick_text()` (original behaviour) | +| LLM advisor: invalid JSON | DEBUG log, returns `None`, next cycle | +| LLM advisor: disallowed action | WARNING log, ignored | +| LLM autonomous: no change | cycle skipped, zero API call | +| LLM autonomous: ≥6 tool turns | returns partial text + warning | +| Exception in LLM Bridge | `try/except` at every level, DEBUG log | + +### Timeouts + +``` +Chat / complete() → llm_timeout_s (default: 30s) +EPD comments → 8s (hardcoded, short to avoid blocking render) +Autonomous cycle → 90s (long: may chain multiple tool calls) +Advisor → 20s (short prompt + JSON response) +``` + +--- + +## 19. Call sequences + +### Web chat with tool-calling + +``` +Browser → POST /api/llm/chat {"message": "which hosts are vulnerable?"} + └── LLMUtils.handle_chat(data) + └── LLMBridge().chat(message, session_id) + └── complete(messages, system, tools=_BJORN_TOOLS) + └── _call_anthropic(messages, tools=[...]) + ├── POST /v1/messages → stop_reason=tool_use + │ └── tool: get_hosts(alive_only=true) + │ → _execute_tool → _impl_get_hosts() + │ → JSON of hosts + ├── POST /v1/messages [+ tool result] → end_turn + └── returns "3 exposed SSH hosts: 192.168.1.10, ..." +← {"status": "ok", "response": "3 exposed SSH hosts..."} +``` + +### LLM autonomous cycle + +``` +Thread "LLMOrchestrator" (daemon, interval=60s) + └── _run_autonomous_cycle() + ├── fp = _compute_fingerprint() → (12, 3, 1, 47) + ├── _has_actionable_change(fp) → True (vuln_count 2→3) + ├── self._last_fingerprint = fp + │ + └── LLMBridge().complete(prompt, system, tools=[read-only + run_action]) + └── _call_anthropic(tools=[...]) + ├── POST → tool_use: get_hosts() + │ → [{ip: "192.168.1.20", ports: "22,80,443"}] + ├── POST → tool_use: get_action_history() + │ → [...] + ├── POST → tool_use: run_action("SSHBruteforce", "192.168.1.20") + │ → _execute_tool → _impl_run_action() + │ → db.queue_action(priority=82, trigger="llm_autonomous") + │ → queue_event.set() + └── POST → end_turn + → "Queued SSHBruteforce on 192.168.1.20 (Mjolnir strikes the unguarded gate)" + → [if log_reasoning=True] logger.info("[LLM_ORCH_REASONING]...") + → [if log_reasoning=True] _push_to_chat(bridge, prompt, response) +``` + +### Reading reasoning from chat.html + +``` +User clicks "Orch Log" + └── fetch GET /api/llm/reasoning + └── LLMUtils.get_llm_reasoning(handler) + └── LLMBridge()._chat_histories["llm_orchestrator"] + → [{"role": "user", "content": "[Autonomous cycle]..."}, + {"role": "assistant", "content": "Queued SSHBruteforce..."}] +← {"status": "ok", "messages": [...], "count": 2} +→ Rendered as chat bubbles in #messages +``` + +### MCP from external client (Claude Desktop) + +``` +Claude Desktop → tool_call: run_action("NmapScan", "192.168.1.0/24") + └── FastMCP dispatch + └── mcp_server.run_action(action_name, target_ip) + └── _impl_run_action() + ├── db.queue_action(priority=80, trigger="mcp") + └── queue_event.set() +← {"status": "queued", "action": "NmapScan", "target": "192.168.1.0/24", "priority": 80} +``` + +### EPD comment with LLM + +``` +display.py → CommentAI.get_comment("SSHBruteforce", params={...}) + └── delay elapsed OR status changed → proceed + ├── llm_comments_enabled = True ? + │ └── LLMBridge().generate_comment("SSHBruteforce", params) + │ └── complete([{role:user, content:"Status: SSHBruteforce..."}], + │ max_tokens=80, timeout=8) + │ ├── LaRuche → "Norse gods smell SSH credentials..." ✓ + │ └── [or timeout 8s] → None + └── text = None → _pick_text("SSHBruteforce", lang, params) + └── SELECT FROM comments WHERE status='SSHBruteforce' + → "Processing authentication attempts..." +``` diff --git a/action_scheduler.py b/action_scheduler.py index d369f81..d6a75b9 100644 --- a/action_scheduler.py +++ b/action_scheduler.py @@ -133,19 +133,44 @@ class ActionScheduler: # Keep queue consistent with current enable/disable flags. self._cancel_queued_disabled_actions() - # 1) Promote scheduled actions that are due + # 1) Promote scheduled actions that are due (always — queue hygiene) self._promote_scheduled_to_pending() - # 2) Publish next scheduled occurrences for interval actions - self._publish_all_upcoming() + # When LLM autonomous mode owns scheduling, skip trigger evaluation + # so it doesn't compete with or duplicate LLM decisions. + # BUT: if the queue is empty, the heuristic scheduler resumes as fallback + # to prevent deadlock when the LLM fails to produce valid actions. + _llm_wants_skip = bool( + self.shared_data.config.get("llm_orchestrator_skip_scheduler", False) + and self.shared_data.config.get("llm_orchestrator_mode") == "autonomous" + and self.shared_data.config.get("llm_enabled", False) + ) + _queue_empty = False + if _llm_wants_skip: + try: + row = self.shared_data.db.query_one( + "SELECT COUNT(*) AS cnt FROM action_queue WHERE status IN ('pending','running','scheduled')" + ) + _queue_empty = (row and int(row["cnt"]) == 0) + except Exception: + pass + _llm_skip = _llm_wants_skip and not _queue_empty - # 3) Evaluate global on_start actions - self._evaluate_global_actions() + if not _llm_skip: + if _llm_wants_skip and _queue_empty: + logger.info("Scheduler: LLM queue empty — heuristic fallback active") + # 2) Publish next scheduled occurrences for interval actions + self._publish_all_upcoming() - # 4) Evaluate per-host triggers - self.evaluate_all_triggers() + # 3) Evaluate global on_start actions + self._evaluate_global_actions() - # 5) Queue maintenance + # 4) Evaluate per-host triggers + self.evaluate_all_triggers() + else: + logger.debug("Scheduler: trigger evaluation skipped (LLM autonomous owns scheduling)") + + # 5) Queue maintenance (always — starvation prevention + cleanup) self.cleanup_queue() self.update_priorities() diff --git a/comment.py b/comment.py index 024fd2f..2f04dc1 100644 --- a/comment.py +++ b/comment.py @@ -319,6 +319,9 @@ class CommentAI: """ Return a comment if status changed or delay expired. + When llm_comments_enabled=True in config, tries LLM first; + falls back to the database/template system on any failure. + Args: status: logical status name (e.g., "IDLE", "SSHBruteforce", "NetworkScanner"). lang: language override (e.g., "fr"); if None, auto priority is used. @@ -331,14 +334,36 @@ class CommentAI: status = status or "IDLE" status_changed = (status != self.last_status) - if status_changed or (current_time - self.last_comment_time >= self.comment_delay): + if not status_changed and (current_time - self.last_comment_time < self.comment_delay): + return None + + # --- Try LLM if enabled --- + text: Optional[str] = None + llm_generated = False + if getattr(self.shared_data, "llm_comments_enabled", False): + try: + from llm_bridge import LLMBridge + text = LLMBridge().generate_comment(status, params) + if text: + llm_generated = True + except Exception as e: + logger.debug(f"LLM comment failed, using fallback: {e}") + + # --- Fallback: database / template system (original behaviour) --- + if not text: text = self._pick_text(status, lang, params) - if text: - self.last_status = status - self.last_comment_time = current_time - self.comment_delay = self._new_delay() - logger.debug(f"Next comment delay: {self.comment_delay}s") - return text + + if text: + self.last_status = status + self.last_comment_time = current_time + self.comment_delay = self._new_delay() + logger.debug(f"Next comment delay: {self.comment_delay}s") + # Log comments + if llm_generated: + logger.info(f"[LLM_COMMENT] ({status}) {text}") + else: + logger.info(f"[COMMENT] ({status}) {text}") + return text return None diff --git a/land_protocol.py b/land_protocol.py new file mode 100644 index 0000000..26e362c --- /dev/null +++ b/land_protocol.py @@ -0,0 +1,169 @@ +# land_protocol.py +# Python client for the LAND Protocol (Local AI Network Discovery). +# https://github.com/infinition/land-protocol +# +# Replace this file to update LAND protocol compatibility. +# Imported by llm_bridge.py — no other Bjorn code touches this. +# +# Protocol summary: +# Discovery : mDNS service type _ai-inference._tcp.local. (port 5353) +# Transport : TCP HTTP on port 8419 by default +# Infer : POST /infer {"prompt": str, "capability": "llm", "max_tokens": int} +# Response : {"response": str} or {"text": str} + +import json +import threading +import time +import urllib.request +import urllib.error +from typing import Optional, Callable + +# mDNS service type broadcast by all LAND-compatible nodes (LaRuche, etc.) +LAND_SERVICE_TYPE = "_ai-inference._tcp.local." + +# Default inference port +LAND_DEFAULT_PORT = 8419 + + +def discover_node( + on_found: Callable[[str], None], + stop_event: threading.Event, + logger=None, +) -> None: + """ + Background mDNS listener for LAND nodes. + + Calls on_found(url) whenever a new node is discovered. + Runs until stop_event is set. + + Requires: pip install zeroconf + """ + try: + from zeroconf import Zeroconf, ServiceBrowser, ServiceListener + except ImportError: + if logger: + logger.warning( + "zeroconf not installed — LAND mDNS discovery disabled. " + "Run: pip install zeroconf" + ) + else: + print("[LAND] zeroconf not installed — mDNS discovery disabled") + return + + class _Listener(ServiceListener): + def add_service(self, zc: Zeroconf, type_: str, name: str) -> None: # type: ignore[override] + info = zc.get_service_info(type_, name) + if not info: + return + addresses = info.parsed_scoped_addresses() + if not addresses: + return + port = info.port or LAND_DEFAULT_PORT + url = f"http://{addresses[0]}:{port}" + on_found(url) + + def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None: # type: ignore[override] + pass + + def update_service(self, zc: Zeroconf, type_: str, name: str) -> None: # type: ignore[override] + self.add_service(zc, type_, name) + + zc = Zeroconf() + try: + ServiceBrowser(zc, LAND_SERVICE_TYPE, _Listener()) + if logger: + logger.info(f"LAND: mDNS discovery active ({LAND_SERVICE_TYPE})") + while not stop_event.is_set(): + time.sleep(5) + finally: + zc.close() + + +def infer( + base_url: str, + prompt: str, + max_tokens: int = 500, + capability: str = "llm", + model: Optional[str] = None, + timeout: int = 30, +) -> Optional[str]: + """ + Send an inference request to a LAND node. + + POST {base_url}/infer + Body: {"prompt": str, "capability": str, "max_tokens": int, "model": str|null} + + If model is None, the node uses its default model. + Returns the response text, or None on failure. + """ + payload = { + "prompt": prompt, + "capability": capability, + "max_tokens": max_tokens, + } + if model: + payload["model"] = model + data = json.dumps(payload).encode() + req = urllib.request.Request( + f"{base_url.rstrip('/')}/infer", + data=data, + headers={"Content-Type": "application/json"}, + method="POST", + ) + with urllib.request.urlopen(req, timeout=timeout) as resp: + body = json.loads(resp.read().decode()) + # LAND response may use "response" or "text" key + return body.get("response") or body.get("text") or None + + +def get_default_model(base_url: str, timeout: int = 10) -> Optional[str]: + """ + Get the current default model from a LAND node. + + GET {base_url}/config/default_model + Returns the model name string, or None on failure. + """ + try: + req = urllib.request.Request( + f"{base_url.rstrip('/')}/config/default_model", + headers={"Accept": "application/json"}, + method="GET", + ) + with urllib.request.urlopen(req, timeout=timeout) as resp: + body = json.loads(resp.read().decode()) + return body.get("default_model") or None + except Exception: + return None + + +def list_models(base_url: str, timeout: int = 10) -> dict: + """ + List available models on a LAND node. + + GET {base_url}/models + Returns a dict with: + - "models": list of model dicts + - "default_model": str or None (the node's current default model) + + Example: {"models": [{"name": "mistral:latest", ...}], "default_model": "mistral:latest"} + Returns {"models": [], "default_model": None} on failure. + """ + try: + req = urllib.request.Request( + f"{base_url.rstrip('/')}/models", + headers={"Accept": "application/json"}, + method="GET", + ) + with urllib.request.urlopen(req, timeout=timeout) as resp: + body = json.loads(resp.read().decode()) + # LaRuche returns {"models": [...], "default_model": "..."} or a flat list + if isinstance(body, list): + return {"models": body, "default_model": None} + if isinstance(body, dict): + return { + "models": body.get("models", []), + "default_model": body.get("default_model") or None, + } + return {"models": [], "default_model": None} + except Exception: + return {"models": [], "default_model": None} diff --git a/llm_bridge.py b/llm_bridge.py new file mode 100644 index 0000000..a9886ed --- /dev/null +++ b/llm_bridge.py @@ -0,0 +1,629 @@ +# llm_bridge.py +# LLM backend cascade for Bjorn. +# Priority: LaRuche (LAND/mDNS) → Ollama local → External API → None (template fallback) +# All external deps are optional — graceful degradation at every level. + +import json +import threading +import time +import urllib.request +import urllib.error +from typing import Optional, List, Dict, Any + +from logger import Logger +import land_protocol + +logger = Logger(name="llm_bridge.py", level=20) # INFO + +# --------------------------------------------------------------------------- +# Tool definitions (Anthropic Messages API format). +# Mirrors the tools exposed by mcp_server.py — add new tools here too. +# --------------------------------------------------------------------------- +_BJORN_TOOLS: List[Dict] = [ + { + "name": "get_hosts", + "description": "Return all network hosts discovered by Bjorn's scanner.", + "input_schema": { + "type": "object", + "properties": { + "alive_only": {"type": "boolean", "description": "Only return alive hosts. Default: true."}, + }, + }, + }, + { + "name": "get_vulnerabilities", + "description": "Return discovered vulnerabilities, optionally filtered by host IP.", + "input_schema": { + "type": "object", + "properties": { + "host_ip": {"type": "string", "description": "Filter by IP address. Empty = all hosts."}, + "limit": {"type": "integer", "description": "Max results. Default: 100."}, + }, + }, + }, + { + "name": "get_credentials", + "description": "Return captured credentials, optionally filtered by service name.", + "input_schema": { + "type": "object", + "properties": { + "service": {"type": "string", "description": "Service filter (ssh, ftp, smb…). Empty = all."}, + "limit": {"type": "integer", "description": "Max results. Default: 100."}, + }, + }, + }, + { + "name": "get_action_history", + "description": "Return the history of executed Bjorn actions, most recent first.", + "input_schema": { + "type": "object", + "properties": { + "limit": {"type": "integer", "description": "Max results. Default: 50."}, + "action_name": {"type": "string", "description": "Filter by action name. Empty = all."}, + }, + }, + }, + { + "name": "get_status", + "description": "Return Bjorn's current operational status, scan counters, and active action.", + "input_schema": {"type": "object", "properties": {}}, + }, + { + "name": "run_action", + "description": "Queue a Bjorn action (e.g. port_scan, ssh_bruteforce) against a target IP address.", + "input_schema": { + "type": "object", + "properties": { + "action_name": {"type": "string", "description": "Action module name (e.g. port_scan)."}, + "target_ip": {"type": "string", "description": "Target IP address."}, + "target_mac": {"type": "string", "description": "Target MAC address (optional)."}, + }, + "required": ["action_name", "target_ip"], + }, + }, + { + "name": "query_db", + "description": "Run a read-only SELECT query against Bjorn's SQLite database.", + "input_schema": { + "type": "object", + "properties": { + "sql": {"type": "string", "description": "SELECT SQL statement."}, + "params": {"type": "array", "items": {"type": "string"}, "description": "Bind parameters."}, + }, + "required": ["sql"], + }, + }, +] + + +class LLMBridge: + """ + Unified LLM backend with automatic cascade: + 1. LaRuche node discovered via LAND protocol (mDNS _ai-inference._tcp.local.) + 2. Ollama running locally (http://localhost:11434) + 3. External API (Anthropic / OpenAI / OpenRouter) + 4. None → caller falls back to templates + + Singleton — one instance per process, thread-safe. + """ + + _instance: Optional["LLMBridge"] = None + _init_lock = threading.Lock() + + def __new__(cls) -> "LLMBridge": + with cls._init_lock: + if cls._instance is None: + inst = super().__new__(cls) + inst._ready = False + cls._instance = inst + return cls._instance + + # ------------------------------------------------------------------ + # Init + # ------------------------------------------------------------------ + + def __init__(self) -> None: + if self._ready: + return + with self._init_lock: + if self._ready: + return + from init_shared import shared_data + self._sd = shared_data + self._laruche_url: Optional[str] = None + self._laruche_lock = threading.Lock() + self._discovery_active = False + self._chat_histories: Dict[str, List[Dict]] = {} # session_id → messages + self._hist_lock = threading.Lock() + self._ready = True + + # Always start mDNS discovery — even if LLM is disabled. + # This way LaRuche URL is ready the moment the user enables LLM. + if self._cfg("llm_laruche_discovery", True): + self._start_laruche_discovery() + + # ------------------------------------------------------------------ + # Config helpers + # ------------------------------------------------------------------ + + def _cfg(self, key: str, default=None): + return self._sd.config.get(key, getattr(self._sd, key, default)) + + def _is_enabled(self) -> bool: + return bool(self._cfg("llm_enabled", False)) + + def _lang_instruction(self) -> str: + """Return a prompt sentence that forces the LLM to reply in the configured language.""" + _LANG_NAMES = { + "en": "English", "fr": "French", "es": "Spanish", "de": "German", + "it": "Italian", "pt": "Portuguese", "nl": "Dutch", "ru": "Russian", + "zh": "Chinese", "ja": "Japanese", "ko": "Korean", "ar": "Arabic", + "pl": "Polish", "sv": "Swedish", "no": "Norwegian", "da": "Danish", + "fi": "Finnish", "cs": "Czech", "tr": "Turkish", + } + code = self._cfg("lang", "en") + name = _LANG_NAMES.get(code, code) + if code == "en": + return "" # No extra instruction needed for English (default) + return f"Always respond in {name}." + + # ------------------------------------------------------------------ + # LaRuche / LAND discovery + # ------------------------------------------------------------------ + + def _start_laruche_discovery(self) -> None: + """Launch background mDNS discovery for LaRuche/LAND nodes (non-blocking).""" + manual_url = self._cfg("llm_laruche_url", "") + if manual_url: + with self._laruche_lock: + self._laruche_url = manual_url.rstrip("/") + logger.info(f"LaRuche: manual URL configured → {self._laruche_url}") + return + + stop_event = threading.Event() + self._discovery_stop = stop_event + + def _on_found(url: str) -> None: + with self._laruche_lock: + if self._laruche_url != url: + self._laruche_url = url + logger.info(f"LaRuche: discovered LAND node → {url}") + self._discovery_active = True + + def _run() -> None: + try: + land_protocol.discover_node(_on_found, stop_event, logger=logger) + except Exception as e: + logger.warning(f"LAND discovery error: {e}") + + threading.Thread(target=_run, daemon=True, name="LANDDiscovery").start() + + # ------------------------------------------------------------------ + # Public API + # ------------------------------------------------------------------ + + def complete( + self, + messages: List[Dict[str, str]], + max_tokens: Optional[int] = None, + system: Optional[str] = None, + timeout: Optional[int] = None, + tools: Optional[List[Dict]] = None, + ) -> Optional[str]: + """ + Send a chat completion request through the configured cascade. + + Args: + messages: List of {"role": "user"|"assistant", "content": "..."} + max_tokens: Override llm_max_tokens config value + system: System prompt (prepended if supported by backend) + timeout: Override llm_timeout_s config value + + Returns: + str response, or None if all backends fail / LLM disabled + """ + if not self._is_enabled(): + return None + + max_tok = max_tokens or int(self._cfg("llm_max_tokens", 500)) + tout = timeout or int(self._cfg("llm_timeout_s", 30)) + backend = self._cfg("llm_backend", "auto") + + if backend == "auto": + order = ["laruche", "ollama", "api"] + else: + order = [backend] + + for b in order: + try: + result = self._dispatch(b, messages, max_tok, tout, system, tools) + if result: + logger.info(f"LLM response from [{b}] (len={len(result)})") + return result + else: + logger.warning(f"LLM backend [{b}] returned empty response — skipping") + except Exception as exc: + logger.warning(f"LLM backend [{b}] failed: {exc}") + + logger.debug("All LLM backends failed — returning None (template fallback)") + return None + + def generate_comment( + self, + status: str, + params: Optional[Dict[str, Any]] = None, + ) -> Optional[str]: + """ + Generate a short EPD status comment (≤ ~12 words). + Used by comment.py when llm_comments_enabled=True. + """ + if not self._is_enabled(): + return None + + lang = self._lang_instruction() + custom_comment = str(self._cfg("llm_system_prompt_comment", "") or "").strip() + if custom_comment: + system = custom_comment + (f" {lang}" if lang else "") + else: + system = ( + "You are Bjorn, a terse Norse-themed autonomous security AI. " + "Reply with ONE sentence of at most 12 words as a status comment. " + "Be cryptic, dark, and technical. No punctuation at the end." + + (f" {lang}" if lang else "") + ) + params_str = f" Context: {json.dumps(params)}" if params else "" + prompt = f"Current status: {status}.{params_str} Write a brief status comment." + + return self.complete( + [{"role": "user", "content": prompt}], + max_tokens=int(self._cfg("llm_comment_max_tokens", 80)), + system=system, + timeout=8, # Short timeout for EPD — fall back fast + ) + + def chat( + self, + user_message: str, + session_id: str = "default", + system: Optional[str] = None, + ) -> Optional[str]: + """ + Stateful chat with Bjorn — maintains conversation history per session. + """ + if not self._is_enabled(): + return "LLM is disabled. Enable it in Settings → LLM Bridge." + + max_hist = int(self._cfg("llm_chat_history_size", 20)) + + if system is None: + system = self._build_system_prompt() + + with self._hist_lock: + history = self._chat_histories.setdefault(session_id, []) + history.append({"role": "user", "content": user_message}) + # Keep history bounded + if len(history) > max_hist: + history[:] = history[-max_hist:] + messages = list(history) + + tools = _BJORN_TOOLS if self._cfg("llm_chat_tools_enabled", False) else None + response = self.complete(messages, system=system, tools=tools) + + if response: + with self._hist_lock: + self._chat_histories[session_id].append( + {"role": "assistant", "content": response} + ) + + return response or "No LLM backend available. Check Settings → LLM Bridge." + + def clear_history(self, session_id: str = "default") -> None: + with self._hist_lock: + self._chat_histories.pop(session_id, None) + + def status(self) -> Dict[str, Any]: + """Return current bridge status for the web UI.""" + with self._laruche_lock: + laruche = self._laruche_url + + return { + "enabled": self._is_enabled(), + "backend": self._cfg("llm_backend", "auto"), + "laruche_url": laruche, + "laruche_discovery": self._discovery_active, + "ollama_url": self._cfg("llm_ollama_url", "http://127.0.0.1:11434"), + "ollama_model": self._cfg("llm_ollama_model", "phi3:mini"), + "api_provider": self._cfg("llm_api_provider", "anthropic"), + "api_model": self._cfg("llm_api_model", "claude-haiku-4-5-20251001"), + "api_key_set": bool(self._cfg("llm_api_key", "")), + } + + # ------------------------------------------------------------------ + # Backend dispatcher + # ------------------------------------------------------------------ + + def _dispatch( + self, + backend: str, + messages: List[Dict], + max_tokens: int, + timeout: int, + system: Optional[str], + tools: Optional[List[Dict]] = None, + ) -> Optional[str]: + if backend == "laruche": + return self._call_laruche(messages, max_tokens, timeout, system) + if backend == "ollama": + return self._call_ollama(messages, max_tokens, timeout, system) + if backend == "api": + return self._call_api(messages, max_tokens, timeout, system, tools) + return None + + # ------------------------------------------------------------------ + # LaRuche backend (LAND /infer endpoint) + # ------------------------------------------------------------------ + + def _call_laruche( + self, + messages: List[Dict], + max_tokens: int, + timeout: int, + system: Optional[str], + ) -> Optional[str]: + with self._laruche_lock: + url = self._laruche_url + if not url: + return None + + # Build flat prompt string (LAND /infer expects a single prompt) + prompt_parts = [] + if system: + prompt_parts.append(f"[System]: {system}") + for m in messages: + role = m.get("role", "user").capitalize() + prompt_parts.append(f"[{role}]: {m.get('content', '')}") + prompt = "\n".join(prompt_parts) + + model = self._cfg("llm_laruche_model", "") or None + return land_protocol.infer(url, prompt, max_tokens=max_tokens, capability="llm", model=model, timeout=timeout) + + # ------------------------------------------------------------------ + # Ollama backend (/api/chat) + # ------------------------------------------------------------------ + + def _call_ollama( + self, + messages: List[Dict], + max_tokens: int, + timeout: int, + system: Optional[str], + ) -> Optional[str]: + base = self._cfg("llm_ollama_url", "http://127.0.0.1:11434").rstrip("/") + model = self._cfg("llm_ollama_model", "phi3:mini") + + # Ollama /api/chat supports system messages natively + ollama_messages = [] + if system: + ollama_messages.append({"role": "system", "content": system}) + ollama_messages.extend(messages) + + payload = { + "model": model, + "messages": ollama_messages, + "stream": False, + "options": {"num_predict": max_tokens}, + } + data = json.dumps(payload).encode() + req = urllib.request.Request( + f"{base}/api/chat", + data=data, + headers={"Content-Type": "application/json"}, + method="POST", + ) + with urllib.request.urlopen(req, timeout=timeout) as resp: + body = json.loads(resp.read().decode()) + return body.get("message", {}).get("content") or None + + # ------------------------------------------------------------------ + # External API backend (Anthropic / OpenAI / OpenRouter) + # ------------------------------------------------------------------ + + def _call_api( + self, + messages: List[Dict], + max_tokens: int, + timeout: int, + system: Optional[str], + tools: Optional[List[Dict]] = None, + ) -> Optional[str]: + provider = self._cfg("llm_api_provider", "anthropic") + api_key = self._cfg("llm_api_key", "") + if not api_key: + return None + + if provider == "anthropic": + return self._call_anthropic(messages, max_tokens, timeout, system, api_key, tools) + else: + # OpenAI-compatible (openai / openrouter) + return self._call_openai_compat(messages, max_tokens, timeout, system, api_key) + + def _call_anthropic( + self, + messages: List[Dict], + max_tokens: int, + timeout: int, + system: Optional[str], + api_key: str, + tools: Optional[List[Dict]] = None, + ) -> Optional[str]: + """Call Anthropic Messages API with optional agentic tool-calling loop.""" + model = self._cfg("llm_api_model", "claude-haiku-4-5-20251001") + base_url = self._cfg("llm_api_base_url", "") or "https://api.anthropic.com" + api_url = f"{base_url.rstrip('/')}/v1/messages" + headers = { + "Content-Type": "application/json", + "x-api-key": api_key, + "anthropic-version": "2023-06-01", + } + + current_messages = list(messages) + + for _round in range(6): # max 5 tool-call rounds + 1 final + payload: Dict[str, Any] = { + "model": model, + "max_tokens": max_tokens, + "messages": current_messages, + } + if system: + payload["system"] = system + if tools: + payload["tools"] = tools + + data = json.dumps(payload).encode() + req = urllib.request.Request(api_url, data=data, headers=headers, method="POST") + with urllib.request.urlopen(req, timeout=timeout) as resp: + body = json.loads(resp.read().decode()) + + stop_reason = body.get("stop_reason") + content = body.get("content", []) + + if stop_reason != "tool_use" or not tools: + # Final text response + for block in content: + if isinstance(block, dict) and block.get("type") == "text": + return block.get("text") or None + return None + + # ---- tool_use round ---- + current_messages.append({"role": "assistant", "content": content}) + tool_results = [] + for block in content: + if isinstance(block, dict) and block.get("type") == "tool_use": + result_text = self._execute_tool(block["name"], block.get("input", {})) + logger.debug(f"Tool [{block['name']}] → {result_text[:200]}") + tool_results.append({ + "type": "tool_result", + "tool_use_id": block["id"], + "content": result_text, + }) + if not tool_results: + break + current_messages.append({"role": "user", "content": tool_results}) + + return None + + def _execute_tool(self, name: str, inputs: Dict) -> str: + """Execute a Bjorn tool by name and return a JSON string result.""" + try: + import mcp_server + except Exception as e: + return json.dumps({"error": f"mcp_server unavailable: {e}"}) + + allowed: List[str] = self._cfg("mcp_allowed_tools", []) + if name not in allowed: + return json.dumps({"error": f"Tool '{name}' is not enabled in Bjorn MCP config."}) + + try: + if name == "get_hosts": + return mcp_server._impl_get_hosts(inputs.get("alive_only", True)) + if name == "get_vulnerabilities": + return mcp_server._impl_get_vulnerabilities( + inputs.get("host_ip") or None, inputs.get("limit", 100) + ) + if name == "get_credentials": + return mcp_server._impl_get_credentials( + inputs.get("service") or None, inputs.get("limit", 100) + ) + if name == "get_action_history": + return mcp_server._impl_get_action_history( + inputs.get("limit", 50), inputs.get("action_name") or None + ) + if name == "get_status": + return mcp_server._impl_get_status() + if name == "run_action": + return mcp_server._impl_run_action( + inputs["action_name"], inputs["target_ip"], inputs.get("target_mac", "") + ) + if name == "query_db": + return mcp_server._impl_query_db(inputs["sql"], inputs.get("params")) + return json.dumps({"error": f"Unknown tool: {name}"}) + except Exception as e: + return json.dumps({"error": str(e)}) + + def _call_openai_compat( + self, + messages: List[Dict], + max_tokens: int, + timeout: int, + system: Optional[str], + api_key: str, + ) -> Optional[str]: + """Call OpenAI-compatible API (OpenAI / OpenRouter / local).""" + model = self._cfg("llm_api_model", "gpt-4o-mini") + base_url = ( + self._cfg("llm_api_base_url", "") + or "https://api.openai.com" + ) + + oai_messages = [] + if system: + oai_messages.append({"role": "system", "content": system}) + oai_messages.extend(messages) + + payload = { + "model": model, + "messages": oai_messages, + "max_tokens": max_tokens, + } + data = json.dumps(payload).encode() + req = urllib.request.Request( + f"{base_url.rstrip('/')}/v1/chat/completions", + data=data, + headers={ + "Content-Type": "application/json", + "Authorization": f"Bearer {api_key}", + }, + method="POST", + ) + with urllib.request.urlopen(req, timeout=timeout) as resp: + body = json.loads(resp.read().decode()) + return body.get("choices", [{}])[0].get("message", {}).get("content") or None + + # ------------------------------------------------------------------ + # System prompt builder + # ------------------------------------------------------------------ + + def _build_system_prompt(self) -> str: + try: + hosts = self._sd.target_count + vulns = self._sd.vuln_count + creds = self._sd.cred_count + mode = self._sd.operation_mode + status = getattr(self._sd, "bjorn_status_text", "IDLE") + except Exception: + hosts, vulns, creds, mode, status = "?", "?", "?", "?", "IDLE" + + # Use custom prompt if configured, otherwise default + custom = str(self._cfg("llm_system_prompt_chat", "") or "").strip() + if custom: + base = custom + else: + base = ( + f"You are Bjorn, an autonomous network security AI assistant running on a Raspberry Pi. " + f"Current state: {hosts} hosts discovered, {vulns} vulnerabilities, {creds} credentials captured. " + f"Operation mode: {mode}. Current action: {status}. " + f"Answer security questions concisely and technically. " + f"You can discuss network topology, vulnerabilities, and suggest next steps. " + f"Use brief Norse references occasionally. Never break character." + ) + + # Inject user profile if set + user_name = str(self._cfg("llm_user_name", "") or "").strip() + user_bio = str(self._cfg("llm_user_bio", "") or "").strip() + if user_name: + base += f"\nThe operator's name is {user_name}." + if user_bio: + base += f" {user_bio}" + + lang = self._lang_instruction() + return base + (f" {lang}" if lang else "") diff --git a/llm_orchestrator.py b/llm_orchestrator.py new file mode 100644 index 0000000..2c4375d --- /dev/null +++ b/llm_orchestrator.py @@ -0,0 +1,757 @@ +# llm_orchestrator.py +# LLM-based orchestration layer for Bjorn. +# +# Modes (llm_orchestrator_mode in config): +# none — disabled (default); LLM has no role in scheduling +# advisor — LLM reviews state periodically and injects ONE priority action +# autonomous — LLM runs its own agentic cycle, observes via MCP tools, queues actions +# +# Prerequisites: llm_enabled=True, llm_orchestrator_mode != "none" +# +# Guard rails: +# llm_orchestrator_allowed_actions — whitelist for run_action (empty = mcp_allowed_tools) +# llm_orchestrator_max_actions — hard cap on actions per autonomous cycle +# llm_orchestrator_interval_s — cooldown between autonomous cycles +# Falls back silently when LLM unavailable (no crash, no spam) + +import json +import threading +import time +from typing import Any, Dict, List, Optional + +from logger import Logger + +logger = Logger(name="llm_orchestrator.py", level=20) + +# Priority levels (must stay above normal scheduler/queue to be useful) +_ADVISOR_PRIORITY = 85 # advisor > MCP (80) > normal (50) > scheduler (40) +_AUTONOMOUS_PRIORITY = 82 + + +class LLMOrchestrator: + """ + LLM-based orchestration layer. + + advisor mode — called from orchestrator background tasks; LLM suggests one action. + autonomous mode — runs its own thread; LLM loops with full tool-calling. + """ + + def __init__(self, shared_data): + self._sd = shared_data + self._thread: Optional[threading.Thread] = None + self._stop = threading.Event() + self._last_fingerprint: Optional[tuple] = None + + # ------------------------------------------------------------------ + # Lifecycle + # ------------------------------------------------------------------ + + def start(self) -> None: + mode = self._mode() + if mode == "autonomous": + if self._thread and self._thread.is_alive(): + return + self._stop.clear() + self._thread = threading.Thread( + target=self._autonomous_loop, daemon=True, name="LLMOrchestrator" + ) + self._thread.start() + logger.info("LLM Orchestrator started (autonomous)") + elif mode == "advisor": + logger.info("LLM Orchestrator ready (advisor — called from background tasks)") + + def stop(self) -> None: + self._stop.set() + if self._thread and self._thread.is_alive(): + self._thread.join(timeout=15) + self._thread = None + + def restart_if_mode_changed(self) -> None: + """ + Call from the orchestrator main loop to react to runtime config changes. + Starts/stops the autonomous thread when the mode changes. + """ + mode = self._mode() + running = self._thread is not None and self._thread.is_alive() + + if mode == "autonomous" and not running and self._is_llm_enabled(): + self.start() + elif mode != "autonomous" and running: + self.stop() + + def is_active(self) -> bool: + return self._thread is not None and self._thread.is_alive() + + # ------------------------------------------------------------------ + # Config helpers + # ------------------------------------------------------------------ + + def _mode(self) -> str: + return str(self._sd.config.get("llm_orchestrator_mode", "none")) + + def _is_llm_enabled(self) -> bool: + return bool(self._sd.config.get("llm_enabled", False)) + + def _allowed_actions(self) -> List[str]: + """ + Bjorn action module names the LLM may queue via run_action. + Falls back to all loaded action names if empty. + NOTE: These are action MODULE names (e.g. 'NetworkScanner', 'SSHBruteforce'), + NOT MCP tool names (get_hosts, run_action, etc.). + """ + custom = self._sd.config.get("llm_orchestrator_allowed_actions", []) + if custom: + return list(custom) + # Auto-discover from loaded actions + try: + loaded = getattr(self._sd, 'loaded_action_names', None) + if loaded: + return list(loaded) + except Exception: + pass + # Fallback: ask the DB for known action names + try: + rows = self._sd.db.query( + "SELECT DISTINCT action_name FROM action_queue ORDER BY action_name" + ) + if rows: + return [r["action_name"] for r in rows] + except Exception: + pass + return [] + + def _max_actions(self) -> int: + return max(1, int(self._sd.config.get("llm_orchestrator_max_actions", 3))) + + def _interval(self) -> int: + return max(30, int(self._sd.config.get("llm_orchestrator_interval_s", 60))) + + # ------------------------------------------------------------------ + # Advisor mode (called externally from orchestrator background tasks) + # ------------------------------------------------------------------ + + def advise(self) -> Optional[str]: + """ + Ask the LLM for ONE tactical action recommendation. + Returns the action name if one was queued, else None. + """ + if not self._is_llm_enabled() or self._mode() != "advisor": + return None + + try: + from llm_bridge import LLMBridge + + allowed = self._allowed_actions() + if not allowed: + return None + + snapshot = self._build_snapshot() + real_ips = snapshot.get("VALID_TARGET_IPS", []) + ip_list_str = ", ".join(real_ips) if real_ips else "(none)" + + system = ( + "You are Bjorn's tactical advisor. Review the current network state " + "and suggest ONE action to queue, or nothing if the queue is sufficient. " + "Reply ONLY with valid JSON — no markdown, no commentary.\n" + 'Format when action needed: {"action": "ActionName", "target_ip": "1.2.3.4", "reason": "brief"}\n' + 'Format when nothing needed: {"action": null}\n' + "action must be exactly one of: " + ", ".join(allowed) + "\n" + f"target_ip MUST be one of these exact IPs: {ip_list_str}\n" + "NEVER use placeholder IPs. Only use IPs from the hosts_alive list." + ) + prompt = ( + f"Current Bjorn state:\n{json.dumps(snapshot, indent=2)}\n\n" + "Suggest one action or null." + ) + + raw = LLMBridge().complete( + [{"role": "user", "content": prompt}], + system=system, + max_tokens=150, + timeout=20, + ) + if not raw: + return None + + return self._apply_advisor_response(raw, allowed) + + except Exception as e: + logger.debug(f"LLM advisor error: {e}") + return None + + def _apply_advisor_response(self, raw: str, allowed: List[str]) -> Optional[str]: + """Parse advisor JSON and queue the suggested action. Returns action name or None.""" + try: + text = raw.strip() + # Strip markdown fences if the model added them + if "```" in text: + parts = text.split("```") + text = parts[1] if len(parts) > 1 else parts[0] + if text.startswith("json"): + text = text[4:] + + data = json.loads(text.strip()) + action = data.get("action") + if not action: + logger.debug("LLM advisor: no action suggested this cycle") + return None + + if action not in allowed: + logger.warning(f"LLM advisor suggested disallowed action '{action}' — ignored") + return None + + target_ip = str(data.get("target_ip", "")).strip() + reason = str(data.get("reason", "llm_advisor"))[:120] + + mac = self._resolve_mac(target_ip) + + self._sd.db.queue_action( + action_name=action, + mac=mac, + ip=target_ip, + priority=_ADVISOR_PRIORITY, + trigger="llm_advisor", + metadata={ + "decision_method": "llm_advisor", + "decision_origin": "llm", + "ai_reason": reason, + }, + ) + try: + self._sd.queue_event.set() + except Exception: + pass + + logger.info(f"[LLM_ADVISOR] → {action} @ {target_ip}: {reason}") + return action + + except json.JSONDecodeError: + logger.debug(f"LLM advisor: invalid JSON: {raw[:200]}") + return None + except Exception as e: + logger.debug(f"LLM advisor apply error: {e}") + return None + + # ------------------------------------------------------------------ + # Autonomous mode (own thread) + # ------------------------------------------------------------------ + + def _autonomous_loop(self) -> None: + logger.info("LLM Orchestrator autonomous loop starting") + while not self._stop.is_set(): + try: + if self._is_llm_enabled() and self._mode() == "autonomous": + self._run_autonomous_cycle() + else: + # Mode was switched off at runtime — stop thread + break + except Exception as e: + logger.error(f"LLM autonomous cycle error: {e}") + + self._stop.wait(self._interval()) + + logger.info("LLM Orchestrator autonomous loop stopped") + + def _compute_fingerprint(self) -> tuple: + """ + Compact state fingerprint: (hosts, vulns, creds, last_completed_queue_id). + Only increases are meaningful — a host going offline is not an opportunity. + """ + try: + hosts = int(getattr(self._sd, "target_count", 0)) + vulns = int(getattr(self._sd, "vuln_count", 0)) + creds = int(getattr(self._sd, "cred_count", 0)) + row = self._sd.db.query_one( + "SELECT MAX(id) AS mid FROM action_queue WHERE status IN ('success','failed')" + ) + last_id = int(row["mid"]) if row and row["mid"] is not None else 0 + return (hosts, vulns, creds, last_id) + except Exception: + return (0, 0, 0, 0) + + def _has_actionable_change(self, fp: tuple) -> bool: + """ + Return True only if something *increased* since the last cycle: + - new host discovered (hosts ↑) + - new vulnerability found (vulns ↑) + - new credential captured (creds ↑) + - an action completed (last_id ↑) + A host going offline (hosts ↓) is not an actionable event. + """ + if self._last_fingerprint is None: + return True # first cycle always runs + return any(fp[i] > self._last_fingerprint[i] for i in range(len(fp))) + + def _run_autonomous_cycle(self) -> None: + """ + One autonomous cycle. + + Two paths based on backend capability: + A) API backend (Anthropic) → agentic tool-calling loop + B) LaRuche / Ollama → snapshot-based JSON prompt (no tool-calling) + + Path B injects the full network state into the prompt and asks the LLM + to reply with a JSON array of actions. This works with any text-only LLM. + """ + # Skip if nothing actionable changed (save tokens) + if self._sd.config.get("llm_orchestrator_skip_if_no_change", True): + fp = self._compute_fingerprint() + if not self._has_actionable_change(fp): + logger.debug("LLM autonomous: no actionable change, skipping cycle (no tokens used)") + return + self._last_fingerprint = fp + + try: + from llm_bridge import LLMBridge, _BJORN_TOOLS + except ImportError as e: + logger.warning(f"LLM Orchestrator: cannot import llm_bridge: {e}") + return + + bridge = LLMBridge() + allowed = self._allowed_actions() + max_act = self._max_actions() + + # Detect if the active backend supports tool-calling + backend = self._sd.config.get("llm_backend", "auto") + supports_tools = (backend == "api") or ( + backend == "auto" and not bridge._laruche_url + and not self._ollama_reachable() + ) + + if supports_tools: + response = self._cycle_with_tools(bridge, allowed, max_act) + else: + response = self._cycle_without_tools(bridge, allowed, max_act) + + if response: + log_reasoning = self._sd.config.get("llm_orchestrator_log_reasoning", False) + prompt_desc = f"Autonomous cycle (tools={'yes' if supports_tools else 'no'})" + if log_reasoning: + logger.info(f"[LLM_ORCH_REASONING]\n{response}") + self._push_to_chat(bridge, prompt_desc, response) + else: + logger.info(f"[LLM_AUTONOMOUS] {response[:300]}") + + def _ollama_reachable(self) -> bool: + """Quick check if Ollama is up (for backend detection).""" + try: + base = self._sd.config.get("llm_ollama_url", "http://127.0.0.1:11434").rstrip("/") + import urllib.request + urllib.request.urlopen(f"{base}/api/tags", timeout=2) + return True + except Exception: + return False + + # ------ Path A: agentic tool-calling (Anthropic API only) ------ + + def _cycle_with_tools(self, bridge, allowed: List[str], max_act: int) -> Optional[str]: + """Full agentic loop: LLM calls MCP tools and queues actions.""" + from llm_bridge import _BJORN_TOOLS + + read_only = {"get_hosts", "get_vulnerabilities", "get_credentials", + "get_action_history", "get_status", "query_db"} + tools = [ + t for t in _BJORN_TOOLS + if t["name"] in read_only or t["name"] == "run_action" + ] + + system = self._build_autonomous_system_prompt(allowed, max_act) + prompt = ( + "Start a new orchestration cycle. " + "Use get_status and get_hosts to understand the current state. " + f"Then queue up to {max_act} high-value action(s) via run_action. " + "When done, summarise what you queued and why." + ) + + return bridge.complete( + [{"role": "user", "content": prompt}], + system=system, + tools=tools, + max_tokens=1000, + timeout=90, + ) + + # ------ Path B: snapshot + JSON parsing (LaRuche / Ollama) ------ + + def _cycle_without_tools(self, bridge, allowed: List[str], max_act: int) -> Optional[str]: + """ + No tool-calling: inject state snapshot into prompt, ask LLM for JSON actions. + Parse the response and queue actions ourselves. + """ + snapshot = self._build_snapshot() + allowed_str = ", ".join(allowed) if allowed else "none" + + # Extract the real IP list so we can stress it in the prompt + real_ips = snapshot.get("VALID_TARGET_IPS", []) + ip_list_str = ", ".join(real_ips) if real_ips else "(no hosts discovered yet)" + + # Short system prompt — small models forget long instructions + system = ( + "You are a network security orchestrator. " + "You receive network scan data and output a JSON array of actions. " + "Output ONLY a JSON array. No explanations, no markdown, no commentary." + ) + + # Put the real instructions in the user message AFTER the data, + # so the model sees them last (recency bias helps small models). + prompt = ( + f"Network state:\n{json.dumps(snapshot, indent=2)}\n\n" + "---\n" + f"Pick up to {max_act} actions from: {allowed_str}\n" + f"Target IPs MUST be from this list: {ip_list_str}\n" + "Match actions to open ports. Skip hosts already in pending_queue.\n" + "Output ONLY a JSON array like:\n" + '[{"action":"ActionName","target_ip":"1.2.3.4","reason":"brief"}]\n' + "or [] if nothing needed.\n" + "JSON array:" + ) + + # Use an assistant prefix to force the model into JSON mode. + # Many LLMs will continue from this prefix rather than describe. + messages = [ + {"role": "user", "content": prompt}, + {"role": "assistant", "content": "["}, + ] + + raw = bridge.complete( + messages, + system=system, + max_tokens=500, + timeout=60, + ) + + # Prepend the '[' prefix we forced if the model didn't include it + if raw and not raw.strip().startswith("["): + raw = "[" + raw + + if not raw: + return None + + # Parse and queue actions + queued = self._parse_and_queue_actions(raw, allowed, max_act) + + summary = raw.strip() + if queued: + summary += f"\n\n[Orchestrator queued {len(queued)} action(s): {', '.join(queued)}]" + else: + summary += "\n\n[Orchestrator: no valid actions parsed from LLM response]" + + return summary + + @staticmethod + def _is_valid_ip(ip: str) -> bool: + """Check that ip is a real IPv4 address (no placeholders like 192.168.1.x).""" + parts = ip.split(".") + if len(parts) != 4: + return False + for p in parts: + try: + n = int(p) + if n < 0 or n > 255: + return False + except ValueError: + return False # catches 'x', 'xx', etc. + return True + + def _parse_and_queue_actions(self, raw: str, allowed: List[str], max_act: int) -> List[str]: + """Parse JSON array from LLM response and queue valid actions. Returns list of queued action names.""" + queued = [] + try: + text = raw.strip() + # Strip markdown fences + if "```" in text: + parts = text.split("```") + text = parts[1] if len(parts) > 1 else parts[0] + if text.startswith("json"): + text = text[4:] + text = text.strip() + + # Try to find JSON array in the text + start = text.find("[") + end = text.rfind("]") + if start == -1 or end == -1: + # Check if the model wrote a text description instead of JSON + if any(text.lower().startswith(w) for w in ("this ", "here", "the ", "based", "from ", "i ")): + logger.warning( + "LLM autonomous: model returned a text description instead of JSON array. " + "The model may not support structured output. First 120 chars: " + + text[:120] + ) + else: + logger.debug(f"LLM autonomous: no JSON array found in response: {text[:120]}") + return [] + + data = json.loads(text[start:end + 1]) + if not isinstance(data, list): + data = [data] + + for item in data[:max_act]: + if not isinstance(item, dict): + continue + action = item.get("action", "").strip() + target_ip = str(item.get("target_ip", "")).strip() + reason = str(item.get("reason", "llm_autonomous"))[:120] + + if not action or action not in allowed: + logger.debug(f"LLM autonomous: skipping invalid/disallowed action '{action}'") + continue + if not target_ip: + logger.debug(f"LLM autonomous: skipping '{action}' — no target_ip") + continue + if not self._is_valid_ip(target_ip): + logger.warning( + f"LLM autonomous: skipping '{action}' — invalid/placeholder IP '{target_ip}' " + f"(LLM must use exact IPs from alive_hosts)" + ) + continue + + mac = self._resolve_mac(target_ip) + if not mac: + logger.warning( + f"LLM autonomous: skipping '{action}' @ {target_ip} — " + f"IP not found in hosts table (LLM used an IP not in alive_hosts)" + ) + continue + + self._sd.db.queue_action( + action_name=action, + mac=mac, + ip=target_ip, + priority=_AUTONOMOUS_PRIORITY, + trigger="llm_autonomous", + metadata={ + "decision_method": "llm_autonomous", + "decision_origin": "llm", + "ai_reason": reason, + }, + ) + queued.append(f"{action}@{target_ip}") + logger.info(f"[LLM_AUTONOMOUS] → {action} @ {target_ip} (mac={mac}): {reason}") + + if queued: + try: + self._sd.queue_event.set() + except Exception: + pass + + except json.JSONDecodeError as e: + logger.debug(f"LLM autonomous: JSON parse error: {e} — raw: {raw[:200]}") + except Exception as e: + logger.debug(f"LLM autonomous: action queue error: {e}") + + return queued + + def _build_autonomous_system_prompt(self, allowed: List[str], max_act: int) -> str: + try: + hosts = getattr(self._sd, "target_count", "?") + vulns = getattr(self._sd, "vuln_count", "?") + creds = getattr(self._sd, "cred_count", "?") + mode = getattr(self._sd, "operation_mode", "?") + except Exception: + hosts = vulns = creds = mode = "?" + + allowed_str = ", ".join(allowed) if allowed else "none" + + lang = "" + try: + from llm_bridge import LLMBridge + lang = LLMBridge()._lang_instruction() + except Exception: + pass + + return ( + "You are Bjorn's Cyberviking autonomous orchestrator, running on a Raspberry Pi network security tool. " + f"Current state: {hosts} hosts discovered, {vulns} vulnerabilities, {creds} credentials. " + f"Operation mode: {mode}. " + "Your objective: observe the network state via tools, then queue the most valuable actions. " + f"Hard limit: at most {max_act} run_action calls per cycle. " + f"Only these action names may be queued: {allowed_str}. " + "Strategy: prioritise unexplored services, hosts with high port counts, and hosts with no recent scans. " + "Do not queue duplicate actions already pending or recently successful. " + "Use Norse references occasionally. Be terse and tactical." + + (f" {lang}" if lang else "") + ) + + # ------------------------------------------------------------------ + # Shared helpers + # ------------------------------------------------------------------ + + def _push_to_chat(self, bridge, user_prompt: str, assistant_response: str) -> None: + """ + Inject the LLM's reasoning into the 'llm_orchestrator' chat session + so it can be reviewed in chat.html (load session 'llm_orchestrator'). + Keeps last 40 messages to avoid unbounded memory. + """ + try: + with bridge._hist_lock: + hist = bridge._chat_histories.setdefault("llm_orchestrator", []) + hist.append({"role": "user", "content": f"[Autonomous cycle]\n{user_prompt}"}) + hist.append({"role": "assistant", "content": assistant_response}) + if len(hist) > 40: + hist[:] = hist[-40:] + except Exception as e: + logger.debug(f"LLM reasoning push to chat failed: {e}") + + def _resolve_mac(self, ip: str) -> str: + """Resolve IP → MAC from hosts table. Column is 'ips' (may hold multiple IPs).""" + if not ip: + return "" + try: + row = self._sd.db.query_one( + "SELECT mac_address FROM hosts WHERE ips LIKE ? LIMIT 1", (f"%{ip}%",) + ) + return row["mac_address"] if row else "" + except Exception: + return "" + + def _build_snapshot(self) -> Dict[str, Any]: + """ + Rich state snapshot for advisor / autonomous prompts. + + Includes: + - alive_hosts : full host details (ip, mac, hostname, vendor, ports) + - services : identified services per host (port, service, product, version) + - vulns_found : active vulnerabilities per host + - creds_found : captured credentials per host/service + - available_actions : what the LLM can queue (name, description, target port/service) + - pending_queue : actions already queued + - recent_actions: last completed actions (avoid repeats) + """ + hosts, services, vulns, creds = [], [], [], [] + actions_catalog, pending, history = [], [], [] + + # ── Alive hosts ── + try: + rows = self._sd.db.query( + "SELECT mac_address, ips, hostnames, ports, vendor " + "FROM hosts WHERE alive=1 LIMIT 30" + ) + for r in (rows or []): + ip = (r.get("ips") or "").split(";")[0].strip() + if not ip: + continue + hosts.append({ + "ip": ip, + "mac": r.get("mac_address", ""), + "hostname": (r.get("hostnames") or "").split(";")[0].strip(), + "vendor": r.get("vendor", ""), + "ports": r.get("ports", ""), + }) + except Exception: + pass + + # ── Port services (identified services with product/version) ── + try: + rows = self._sd.db.query( + "SELECT mac_address, ip, port, service, product, version " + "FROM port_services WHERE is_current=1 AND state='open' " + "ORDER BY mac_address, port LIMIT 100" + ) + for r in (rows or []): + svc = {"mac": r.get("mac_address", ""), "port": r.get("port")} + if r.get("ip"): + svc["ip"] = r["ip"] + if r.get("service"): + svc["service"] = r["service"] + if r.get("product"): + svc["product"] = r["product"] + if r.get("version"): + svc["version"] = r["version"] + services.append(svc) + except Exception: + pass + + # ── Active vulnerabilities ── + try: + rows = self._sd.db.query( + "SELECT ip, port, vuln_id, hostname " + "FROM vulnerabilities WHERE is_active=1 LIMIT 30" + ) + vulns = [{"ip": r.get("ip", ""), "port": r.get("port"), + "vuln_id": r.get("vuln_id", ""), + "hostname": r.get("hostname", "")} + for r in (rows or [])] + except Exception: + pass + + # ── Captured credentials ── + try: + rows = self._sd.db.query( + "SELECT service, ip, hostname, port, \"user\" " + "FROM creds LIMIT 30" + ) + creds = [{"service": r.get("service", ""), "ip": r.get("ip", ""), + "hostname": r.get("hostname", ""), "port": r.get("port"), + "user": r.get("user", "")} + for r in (rows or [])] + except Exception: + pass + + # ── Available actions catalog (what the LLM can queue) ── + allowed = self._allowed_actions() + try: + if allowed: + placeholders = ",".join("?" * len(allowed)) + rows = self._sd.db.query( + f"SELECT b_class, b_description, b_port, b_service " + f"FROM actions WHERE b_class IN ({placeholders}) AND b_enabled=1", + tuple(allowed) + ) + for r in (rows or []): + entry = {"name": r["b_class"]} + if r.get("b_description"): + entry["description"] = r["b_description"][:100] + if r.get("b_port"): + entry["target_port"] = r["b_port"] + if r.get("b_service"): + entry["target_service"] = r["b_service"] + actions_catalog.append(entry) + except Exception: + pass + + # ── Pending queue ── + try: + rows = self._sd.db.query( + "SELECT action_name, ip, priority FROM action_queue " + "WHERE status='pending' ORDER BY priority DESC LIMIT 15" + ) + pending = [{"action": r["action_name"], "ip": r["ip"]} for r in (rows or [])] + except Exception: + pass + + # ── Recent action history ── + try: + rows = self._sd.db.query( + "SELECT action_name, ip, status FROM action_queue " + "WHERE status IN ('success','failed') ORDER BY completed_at DESC LIMIT 15" + ) + history = [{"action": r["action_name"], "ip": r["ip"], "result": r["status"]} + for r in (rows or [])] + except Exception: + pass + + # Build explicit IP list for emphasis + ip_list = [h["ip"] for h in hosts if h.get("ip")] + + result = { + "VALID_TARGET_IPS": ip_list, + "hosts_alive": hosts, + "operation_mode": getattr(self._sd, "operation_mode", "?"), + } + if services: + result["services_detected"] = services + if vulns: + result["vulnerabilities_found"] = vulns + if creds: + result["credentials_captured"] = creds + if actions_catalog: + result["available_actions"] = actions_catalog + result["pending_queue"] = pending + result["recent_actions"] = history + result["summary"] = { + "hosts_alive": len(ip_list), + "vulns": getattr(self._sd, "vuln_count", 0), + "creds": getattr(self._sd, "cred_count", 0), + } + + return result diff --git a/loki/layouts/de.json b/loki/layouts/de.json new file mode 100644 index 0000000..0706540 --- /dev/null +++ b/loki/layouts/de.json @@ -0,0 +1,422 @@ +{ + "a": [ + 0, + 4 + ], + "b": [ + 0, + 5 + ], + "c": [ + 0, + 6 + ], + "d": [ + 0, + 7 + ], + "e": [ + 0, + 8 + ], + "f": [ + 0, + 9 + ], + "g": [ + 0, + 10 + ], + "h": [ + 0, + 11 + ], + "i": [ + 0, + 12 + ], + "j": [ + 0, + 13 + ], + "k": [ + 0, + 14 + ], + "l": [ + 0, + 15 + ], + "m": [ + 0, + 16 + ], + "n": [ + 0, + 17 + ], + "o": [ + 0, + 18 + ], + "p": [ + 0, + 19 + ], + "q": [ + 0, + 20 + ], + "r": [ + 0, + 21 + ], + "s": [ + 0, + 22 + ], + "t": [ + 0, + 23 + ], + "u": [ + 0, + 24 + ], + "v": [ + 0, + 25 + ], + "w": [ + 0, + 26 + ], + "x": [ + 0, + 27 + ], + "y": [ + 0, + 29 + ], + "z": [ + 0, + 28 + ], + "A": [ + 2, + 4 + ], + "B": [ + 2, + 5 + ], + "C": [ + 2, + 6 + ], + "D": [ + 2, + 7 + ], + "E": [ + 2, + 8 + ], + "F": [ + 2, + 9 + ], + "G": [ + 2, + 10 + ], + "H": [ + 2, + 11 + ], + "I": [ + 2, + 12 + ], + "J": [ + 2, + 13 + ], + "K": [ + 2, + 14 + ], + "L": [ + 2, + 15 + ], + "M": [ + 2, + 16 + ], + "N": [ + 2, + 17 + ], + "O": [ + 2, + 18 + ], + "P": [ + 2, + 19 + ], + "Q": [ + 2, + 20 + ], + "R": [ + 2, + 21 + ], + "S": [ + 2, + 22 + ], + "T": [ + 2, + 23 + ], + "U": [ + 2, + 24 + ], + "V": [ + 2, + 25 + ], + "W": [ + 2, + 26 + ], + "X": [ + 2, + 27 + ], + "Y": [ + 2, + 29 + ], + "Z": [ + 2, + 28 + ], + "1": [ + 0, + 30 + ], + "2": [ + 0, + 31 + ], + "3": [ + 0, + 32 + ], + "4": [ + 0, + 33 + ], + "5": [ + 0, + 34 + ], + "6": [ + 0, + 35 + ], + "7": [ + 0, + 36 + ], + "8": [ + 0, + 37 + ], + "9": [ + 0, + 38 + ], + "0": [ + 0, + 39 + ], + "!": [ + 2, + 30 + ], + "@": [ + 2, + 31 + ], + "#": [ + 0, + 49 + ], + "$": [ + 2, + 33 + ], + "%": [ + 2, + 34 + ], + "^": [ + 2, + 35 + ], + "&": [ + 2, + 35 + ], + "*": [ + 2, + 48 + ], + "(": [ + 2, + 37 + ], + ")": [ + 2, + 38 + ], + "\n": [ + 0, + 40 + ], + "\r": [ + 0, + 40 + ], + "\t": [ + 0, + 43 + ], + " ": [ + 0, + 44 + ], + "-": [ + 0, + 45 + ], + "_": [ + 2, + 45 + ], + "=": [ + 2, + 39 + ], + "+": [ + 0, + 48 + ], + "[": [ + 0, + 47 + ], + "{": [ + 2, + 47 + ], + "]": [ + 0, + 48 + ], + "}": [ + 2, + 48 + ], + "\\": [ + 0, + 49 + ], + "|": [ + 2, + 49 + ], + ";": [ + 0, + 51 + ], + ":": [ + 2, + 51 + ], + "'": [ + 2, + 49 + ], + "\"": [ + 2, + 52 + ], + "`": [ + 0, + 53 + ], + "~": [ + 2, + 53 + ], + ",": [ + 0, + 54 + ], + "<": [ + 0, + 100 + ], + ".": [ + 0, + 55 + ], + ">": [ + 2, + 100 + ], + "/": [ + 2, + 36 + ], + "?": [ + 2, + 45 + ], + "ß": [ + 0, + 45 + ], + "ü": [ + 0, + 47 + ], + "Ü": [ + 2, + 47 + ], + "ö": [ + 0, + 51 + ], + "Ö": [ + 2, + 51 + ], + "ä": [ + 0, + 52 + ], + "Ä": [ + 2, + 52 + ] +} \ No newline at end of file diff --git a/loki/layouts/es.json b/loki/layouts/es.json new file mode 100644 index 0000000..7f6452e --- /dev/null +++ b/loki/layouts/es.json @@ -0,0 +1,426 @@ +{ + "a": [ + 0, + 4 + ], + "b": [ + 0, + 5 + ], + "c": [ + 0, + 6 + ], + "d": [ + 0, + 7 + ], + "e": [ + 0, + 8 + ], + "f": [ + 0, + 9 + ], + "g": [ + 0, + 10 + ], + "h": [ + 0, + 11 + ], + "i": [ + 0, + 12 + ], + "j": [ + 0, + 13 + ], + "k": [ + 0, + 14 + ], + "l": [ + 0, + 15 + ], + "m": [ + 0, + 16 + ], + "n": [ + 0, + 17 + ], + "o": [ + 0, + 18 + ], + "p": [ + 0, + 19 + ], + "q": [ + 0, + 20 + ], + "r": [ + 0, + 21 + ], + "s": [ + 0, + 22 + ], + "t": [ + 0, + 23 + ], + "u": [ + 0, + 24 + ], + "v": [ + 0, + 25 + ], + "w": [ + 0, + 26 + ], + "x": [ + 0, + 27 + ], + "y": [ + 0, + 28 + ], + "z": [ + 0, + 29 + ], + "A": [ + 2, + 4 + ], + "B": [ + 2, + 5 + ], + "C": [ + 2, + 6 + ], + "D": [ + 2, + 7 + ], + "E": [ + 2, + 8 + ], + "F": [ + 2, + 9 + ], + "G": [ + 2, + 10 + ], + "H": [ + 2, + 11 + ], + "I": [ + 2, + 12 + ], + "J": [ + 2, + 13 + ], + "K": [ + 2, + 14 + ], + "L": [ + 2, + 15 + ], + "M": [ + 2, + 16 + ], + "N": [ + 2, + 17 + ], + "O": [ + 2, + 18 + ], + "P": [ + 2, + 19 + ], + "Q": [ + 2, + 20 + ], + "R": [ + 2, + 21 + ], + "S": [ + 2, + 22 + ], + "T": [ + 2, + 23 + ], + "U": [ + 2, + 24 + ], + "V": [ + 2, + 25 + ], + "W": [ + 2, + 26 + ], + "X": [ + 2, + 27 + ], + "Y": [ + 2, + 28 + ], + "Z": [ + 2, + 29 + ], + "1": [ + 0, + 30 + ], + "2": [ + 0, + 31 + ], + "3": [ + 0, + 32 + ], + "4": [ + 0, + 33 + ], + "5": [ + 0, + 34 + ], + "6": [ + 0, + 35 + ], + "7": [ + 0, + 36 + ], + "8": [ + 0, + 37 + ], + "9": [ + 0, + 38 + ], + "0": [ + 0, + 39 + ], + "!": [ + 2, + 30 + ], + "@": [ + 2, + 31 + ], + "#": [ + 2, + 32 + ], + "$": [ + 2, + 33 + ], + "%": [ + 2, + 34 + ], + "^": [ + 2, + 35 + ], + "&": [ + 2, + 36 + ], + "*": [ + 2, + 48 + ], + "(": [ + 2, + 38 + ], + ")": [ + 2, + 39 + ], + "\n": [ + 0, + 40 + ], + "\r": [ + 0, + 40 + ], + "\t": [ + 0, + 43 + ], + " ": [ + 0, + 44 + ], + "-": [ + 0, + 56 + ], + "_": [ + 2, + 56 + ], + "=": [ + 0, + 46 + ], + "+": [ + 0, + 48 + ], + "[": [ + 0, + 47 + ], + "{": [ + 2, + 47 + ], + "]": [ + 0, + 48 + ], + "}": [ + 2, + 48 + ], + "\\": [ + 0, + 49 + ], + "|": [ + 2, + 49 + ], + ";": [ + 0, + 51 + ], + ":": [ + 2, + 51 + ], + "'": [ + 0, + 45 + ], + "\"": [ + 2, + 52 + ], + "`": [ + 0, + 53 + ], + "~": [ + 2, + 53 + ], + ",": [ + 0, + 54 + ], + "<": [ + 0, + 100 + ], + ".": [ + 0, + 55 + ], + ">": [ + 2, + 100 + ], + "/": [ + 0, + 56 + ], + "?": [ + 2, + 45 + ], + "ñ": [ + 0, + 51 + ], + "Ñ": [ + 2, + 51 + ], + "ç": [ + 0, + 49 + ], + "Ç": [ + 2, + 49 + ], + "¡": [ + 0, + 46 + ], + "¿": [ + 2, + 46 + ], + "´": [ + 0, + 47 + ], + "¨": [ + 2, + 47 + ] +} \ No newline at end of file diff --git a/loki/layouts/fr.json b/loki/layouts/fr.json new file mode 100644 index 0000000..ff62588 --- /dev/null +++ b/loki/layouts/fr.json @@ -0,0 +1,446 @@ +{ + "a": [ + 0, + 20 + ], + "b": [ + 0, + 5 + ], + "c": [ + 0, + 6 + ], + "d": [ + 0, + 7 + ], + "e": [ + 0, + 8 + ], + "f": [ + 0, + 9 + ], + "g": [ + 0, + 10 + ], + "h": [ + 0, + 11 + ], + "i": [ + 0, + 12 + ], + "j": [ + 0, + 13 + ], + "k": [ + 0, + 14 + ], + "l": [ + 0, + 15 + ], + "m": [ + 0, + 51 + ], + "n": [ + 0, + 17 + ], + "o": [ + 0, + 18 + ], + "p": [ + 0, + 19 + ], + "q": [ + 0, + 4 + ], + "r": [ + 0, + 21 + ], + "s": [ + 0, + 22 + ], + "t": [ + 0, + 23 + ], + "u": [ + 0, + 24 + ], + "v": [ + 0, + 25 + ], + "w": [ + 0, + 29 + ], + "x": [ + 0, + 27 + ], + "y": [ + 0, + 28 + ], + "z": [ + 0, + 26 + ], + "A": [ + 2, + 20 + ], + "B": [ + 2, + 5 + ], + "C": [ + 2, + 6 + ], + "D": [ + 2, + 7 + ], + "E": [ + 2, + 8 + ], + "F": [ + 2, + 9 + ], + "G": [ + 2, + 10 + ], + "H": [ + 2, + 11 + ], + "I": [ + 2, + 12 + ], + "J": [ + 2, + 13 + ], + "K": [ + 2, + 14 + ], + "L": [ + 2, + 15 + ], + "M": [ + 2, + 51 + ], + "N": [ + 2, + 17 + ], + "O": [ + 2, + 18 + ], + "P": [ + 2, + 19 + ], + "Q": [ + 2, + 4 + ], + "R": [ + 2, + 21 + ], + "S": [ + 2, + 22 + ], + "T": [ + 2, + 23 + ], + "U": [ + 2, + 24 + ], + "V": [ + 2, + 25 + ], + "W": [ + 2, + 29 + ], + "X": [ + 2, + 27 + ], + "Y": [ + 2, + 28 + ], + "Z": [ + 2, + 26 + ], + "1": [ + 2, + 30 + ], + "2": [ + 2, + 31 + ], + "3": [ + 2, + 32 + ], + "4": [ + 2, + 33 + ], + "5": [ + 2, + 34 + ], + "6": [ + 2, + 35 + ], + "7": [ + 2, + 36 + ], + "8": [ + 2, + 37 + ], + "9": [ + 2, + 38 + ], + "0": [ + 2, + 39 + ], + "!": [ + 0, + 56 + ], + "@": [ + 64, + 39 + ], + "#": [ + 64, + 32 + ], + "$": [ + 0, + 48 + ], + "%": [ + 2, + 52 + ], + "^": [ + 0, + 47 + ], + "&": [ + 0, + 30 + ], + "*": [ + 0, + 49 + ], + "(": [ + 0, + 34 + ], + ")": [ + 0, + 45 + ], + "\n": [ + 0, + 40 + ], + "\r": [ + 0, + 40 + ], + "\t": [ + 0, + 43 + ], + " ": [ + 0, + 44 + ], + "-": [ + 0, + 35 + ], + "_": [ + 0, + 37 + ], + "=": [ + 0, + 46 + ], + "+": [ + 2, + 46 + ], + "[": [ + 64, + 34 + ], + "{": [ + 64, + 33 + ], + "]": [ + 64, + 45 + ], + "}": [ + 64, + 46 + ], + "\\": [ + 64, + 37 + ], + "|": [ + 64, + 35 + ], + ";": [ + 0, + 54 + ], + ":": [ + 0, + 55 + ], + "'": [ + 0, + 33 + ], + "\"": [ + 0, + 32 + ], + "`": [ + 64, + 36 + ], + "~": [ + 64, + 31 + ], + ",": [ + 0, + 16 + ], + "<": [ + 0, + 100 + ], + ".": [ + 2, + 54 + ], + ">": [ + 2, + 100 + ], + "/": [ + 2, + 55 + ], + "?": [ + 2, + 16 + ], + "é": [ + 0, + 31 + ], + "è": [ + 0, + 36 + ], + "ç": [ + 0, + 38 + ], + "à": [ + 0, + 39 + ], + "§": [ + 2, + 56 + ], + "€": [ + 64, + 8 + ], + "°": [ + 2, + 45 + ], + "¨": [ + 2, + 47 + ], + "£": [ + 2, + 48 + ], + "¤": [ + 64, + 48 + ], + "µ": [ + 2, + 49 + ], + "ù": [ + 0, + 52 + ], + "²": [ + 0, + 53 + ] +} \ No newline at end of file diff --git a/loki/layouts/generate_layouts.py b/loki/layouts/generate_layouts.py new file mode 100644 index 0000000..f803c35 --- /dev/null +++ b/loki/layouts/generate_layouts.py @@ -0,0 +1,78 @@ +import json +import os + +# Chargement de la base US existante +with open("us.json", "r") as f: + US_BASE = json.load(f) + +# Définition des différences par rapport au clavier US +# 0 = Normal, 2 = Shift, 64 = AltGr (Right Alt) +LAYOUT_DIFFS = { + "fr": { + "a": [0, 20], "A": [2, 20], "q": [0, 4], "Q": [2, 4], + "z": [0, 26], "Z": [2, 26], "w": [0, 29], "W": [2, 29], + "m": [0, 51], "M": [2, 51], + "1": [2, 30], "2": [2, 31], "3": [2, 32], "4": [2, 33], "5": [2, 34], + "6": [2, 35], "7": [2, 36], "8": [2, 37], "9": [2, 38], "0": [2, 39], + "&": [0, 30], "é": [0, 31], "\"": [0, 32], "'": [0, 33], "(": [0, 34], + "-": [0, 35], "è": [0, 36], "_": [0, 37], "ç": [0, 38], "à": [0, 39], + "~": [64, 31], "#": [64, 32], "{": [64, 33], "[": [64, 34], "|": [64, 35], + "`": [64, 36], "\\": [64, 37], "^": [0, 47], "@": [64, 39], "]": [64, 45], + "}": [64, 46], "!": [0, 56], "§": [2, 56], "€": [64, 8], ")": [0, 45], + "°": [2, 45], "=": [0, 46], "+": [2, 46], "¨": [2, 47], "$": [0, 48], + "£": [2, 48], "¤": [64, 48], "*": [0, 49], "µ": [2, 49], "ù": [0, 52], + "%": [2, 52], "²": [0, 53], ",": [0, 16], "?": [2, 16], ";": [0, 54], + ".": [2, 54], ":": [0, 55], "/": [2, 55], "<": [0, 100], ">": [2, 100] + }, + "uk": { + "\"": [2, 31], "@": [2, 52], "£": [2, 32], "~": [0, 50], + "#": [0, 49], "\\": [0, 100], "|": [2, 100] + }, + "de": { + "y": [0, 29], "Y": [2, 29], "z": [0, 28], "Z": [2, 28], + "ß": [0, 45], "?": [2, 45], "ü": [0, 47], "Ü": [2, 47], + "+": [0, 48], "*": [2, 48], "ö": [0, 51], "Ö": [2, 51], + "ä": [0, 52], "Ä": [2, 52], "#": [0, 49], "'": [2, 49], + "&": [2, 35], "/": [2, 36], "(": [2, 37], ")": [2, 38], + "=": [2, 39], "<": [0, 100], ">": [2, 100] + }, + "es": { + "ñ": [0, 51], "Ñ": [2, 51], "ç": [0, 49], "Ç": [2, 49], + "'": [0, 45], "?": [2, 45], "¡": [0, 46], "¿": [2, 46], + "´": [0, 47], "¨": [2, 47], "+": [0, 48], "*": [2, 48], + "<": [0, 100], ">": [2, 100], "-": [0, 56], "_": [2, 56] + }, + "it": { + "ò": [0, 51], "ç": [2, 51], "à": [0, 52], "°": [2, 52], + "ù": [0, 49], "§": [2, 49], "è": [0, 47], "é": [2, 47], + "ì": [0, 46], "^": [2, 46], "'": [0, 45], "?": [2, 45], + "+": [0, 48], "*": [2, 48], "<": [0, 100], ">": [2, 100], + "-": [0, 56], "_": [2, 56] + }, + "ru": { + "й": [0, 20], "ц": [0, 26], "у": [0, 8], "к": [0, 21], "е": [0, 23], + "н": [0, 28], "г": [0, 24], "ш": [0, 12], "щ": [0, 18], "з": [0, 19], + "х": [0, 47], "ъ": [0, 48], "ф": [0, 4], "ы": [0, 22], "в": [0, 7], + "а": [0, 4], "п": [0, 10], "р": [0, 11], "о": [0, 13], "л": [0, 14], + "д": [0, 15], "ж": [0, 51], "э": [0, 52], "я": [0, 29], "ч": [0, 27], + "с": [0, 6], "м": [0, 25], "и": [0, 5], "т": [0, 17], "ь": [0, 16], + "б": [0, 54], "ю": [0, 55], "ё": [0, 53], ".": [0, 56], ",": [2, 56], + "№": [2, 32], ";": [2, 33], ":": [2, 35], "?": [2, 36] + }, + "zh": {} # ZH utilise exactement le layout US +} + +def generate_layouts(): + for lang, diff in LAYOUT_DIFFS.items(): + # Copie de la base US + new_layout = dict(US_BASE) + # Application des modifications + new_layout.update(diff) + + filename = f"{lang}.json" + with open(filename, "w", encoding="utf-8") as f: + json.dump(new_layout, f, indent=4, ensure_ascii=False) + print(f"Généré : {filename} ({len(new_layout)} touches)") + +if __name__ == "__main__": + generate_layouts() \ No newline at end of file diff --git a/loki/layouts/it.json b/loki/layouts/it.json new file mode 100644 index 0000000..6fa4cc3 --- /dev/null +++ b/loki/layouts/it.json @@ -0,0 +1,430 @@ +{ + "a": [ + 0, + 4 + ], + "b": [ + 0, + 5 + ], + "c": [ + 0, + 6 + ], + "d": [ + 0, + 7 + ], + "e": [ + 0, + 8 + ], + "f": [ + 0, + 9 + ], + "g": [ + 0, + 10 + ], + "h": [ + 0, + 11 + ], + "i": [ + 0, + 12 + ], + "j": [ + 0, + 13 + ], + "k": [ + 0, + 14 + ], + "l": [ + 0, + 15 + ], + "m": [ + 0, + 16 + ], + "n": [ + 0, + 17 + ], + "o": [ + 0, + 18 + ], + "p": [ + 0, + 19 + ], + "q": [ + 0, + 20 + ], + "r": [ + 0, + 21 + ], + "s": [ + 0, + 22 + ], + "t": [ + 0, + 23 + ], + "u": [ + 0, + 24 + ], + "v": [ + 0, + 25 + ], + "w": [ + 0, + 26 + ], + "x": [ + 0, + 27 + ], + "y": [ + 0, + 28 + ], + "z": [ + 0, + 29 + ], + "A": [ + 2, + 4 + ], + "B": [ + 2, + 5 + ], + "C": [ + 2, + 6 + ], + "D": [ + 2, + 7 + ], + "E": [ + 2, + 8 + ], + "F": [ + 2, + 9 + ], + "G": [ + 2, + 10 + ], + "H": [ + 2, + 11 + ], + "I": [ + 2, + 12 + ], + "J": [ + 2, + 13 + ], + "K": [ + 2, + 14 + ], + "L": [ + 2, + 15 + ], + "M": [ + 2, + 16 + ], + "N": [ + 2, + 17 + ], + "O": [ + 2, + 18 + ], + "P": [ + 2, + 19 + ], + "Q": [ + 2, + 20 + ], + "R": [ + 2, + 21 + ], + "S": [ + 2, + 22 + ], + "T": [ + 2, + 23 + ], + "U": [ + 2, + 24 + ], + "V": [ + 2, + 25 + ], + "W": [ + 2, + 26 + ], + "X": [ + 2, + 27 + ], + "Y": [ + 2, + 28 + ], + "Z": [ + 2, + 29 + ], + "1": [ + 0, + 30 + ], + "2": [ + 0, + 31 + ], + "3": [ + 0, + 32 + ], + "4": [ + 0, + 33 + ], + "5": [ + 0, + 34 + ], + "6": [ + 0, + 35 + ], + "7": [ + 0, + 36 + ], + "8": [ + 0, + 37 + ], + "9": [ + 0, + 38 + ], + "0": [ + 0, + 39 + ], + "!": [ + 2, + 30 + ], + "@": [ + 2, + 31 + ], + "#": [ + 2, + 32 + ], + "$": [ + 2, + 33 + ], + "%": [ + 2, + 34 + ], + "^": [ + 2, + 46 + ], + "&": [ + 2, + 36 + ], + "*": [ + 2, + 48 + ], + "(": [ + 2, + 38 + ], + ")": [ + 2, + 39 + ], + "\n": [ + 0, + 40 + ], + "\r": [ + 0, + 40 + ], + "\t": [ + 0, + 43 + ], + " ": [ + 0, + 44 + ], + "-": [ + 0, + 56 + ], + "_": [ + 2, + 56 + ], + "=": [ + 0, + 46 + ], + "+": [ + 0, + 48 + ], + "[": [ + 0, + 47 + ], + "{": [ + 2, + 47 + ], + "]": [ + 0, + 48 + ], + "}": [ + 2, + 48 + ], + "\\": [ + 0, + 49 + ], + "|": [ + 2, + 49 + ], + ";": [ + 0, + 51 + ], + ":": [ + 2, + 51 + ], + "'": [ + 0, + 45 + ], + "\"": [ + 2, + 52 + ], + "`": [ + 0, + 53 + ], + "~": [ + 2, + 53 + ], + ",": [ + 0, + 54 + ], + "<": [ + 0, + 100 + ], + ".": [ + 0, + 55 + ], + ">": [ + 2, + 100 + ], + "/": [ + 0, + 56 + ], + "?": [ + 2, + 45 + ], + "ò": [ + 0, + 51 + ], + "ç": [ + 2, + 51 + ], + "à": [ + 0, + 52 + ], + "°": [ + 2, + 52 + ], + "ù": [ + 0, + 49 + ], + "§": [ + 2, + 49 + ], + "è": [ + 0, + 47 + ], + "é": [ + 2, + 47 + ], + "ì": [ + 0, + 46 + ] +} \ No newline at end of file diff --git a/loki/layouts/ru.json b/loki/layouts/ru.json new file mode 100644 index 0000000..5dabaf6 --- /dev/null +++ b/loki/layouts/ru.json @@ -0,0 +1,530 @@ +{ + "a": [ + 0, + 4 + ], + "b": [ + 0, + 5 + ], + "c": [ + 0, + 6 + ], + "d": [ + 0, + 7 + ], + "e": [ + 0, + 8 + ], + "f": [ + 0, + 9 + ], + "g": [ + 0, + 10 + ], + "h": [ + 0, + 11 + ], + "i": [ + 0, + 12 + ], + "j": [ + 0, + 13 + ], + "k": [ + 0, + 14 + ], + "l": [ + 0, + 15 + ], + "m": [ + 0, + 16 + ], + "n": [ + 0, + 17 + ], + "o": [ + 0, + 18 + ], + "p": [ + 0, + 19 + ], + "q": [ + 0, + 20 + ], + "r": [ + 0, + 21 + ], + "s": [ + 0, + 22 + ], + "t": [ + 0, + 23 + ], + "u": [ + 0, + 24 + ], + "v": [ + 0, + 25 + ], + "w": [ + 0, + 26 + ], + "x": [ + 0, + 27 + ], + "y": [ + 0, + 28 + ], + "z": [ + 0, + 29 + ], + "A": [ + 2, + 4 + ], + "B": [ + 2, + 5 + ], + "C": [ + 2, + 6 + ], + "D": [ + 2, + 7 + ], + "E": [ + 2, + 8 + ], + "F": [ + 2, + 9 + ], + "G": [ + 2, + 10 + ], + "H": [ + 2, + 11 + ], + "I": [ + 2, + 12 + ], + "J": [ + 2, + 13 + ], + "K": [ + 2, + 14 + ], + "L": [ + 2, + 15 + ], + "M": [ + 2, + 16 + ], + "N": [ + 2, + 17 + ], + "O": [ + 2, + 18 + ], + "P": [ + 2, + 19 + ], + "Q": [ + 2, + 20 + ], + "R": [ + 2, + 21 + ], + "S": [ + 2, + 22 + ], + "T": [ + 2, + 23 + ], + "U": [ + 2, + 24 + ], + "V": [ + 2, + 25 + ], + "W": [ + 2, + 26 + ], + "X": [ + 2, + 27 + ], + "Y": [ + 2, + 28 + ], + "Z": [ + 2, + 29 + ], + "1": [ + 0, + 30 + ], + "2": [ + 0, + 31 + ], + "3": [ + 0, + 32 + ], + "4": [ + 0, + 33 + ], + "5": [ + 0, + 34 + ], + "6": [ + 0, + 35 + ], + "7": [ + 0, + 36 + ], + "8": [ + 0, + 37 + ], + "9": [ + 0, + 38 + ], + "0": [ + 0, + 39 + ], + "!": [ + 2, + 30 + ], + "@": [ + 2, + 31 + ], + "#": [ + 2, + 32 + ], + "$": [ + 2, + 33 + ], + "%": [ + 2, + 34 + ], + "^": [ + 2, + 35 + ], + "&": [ + 2, + 36 + ], + "*": [ + 2, + 37 + ], + "(": [ + 2, + 38 + ], + ")": [ + 2, + 39 + ], + "\n": [ + 0, + 40 + ], + "\r": [ + 0, + 40 + ], + "\t": [ + 0, + 43 + ], + " ": [ + 0, + 44 + ], + "-": [ + 0, + 45 + ], + "_": [ + 2, + 45 + ], + "=": [ + 0, + 46 + ], + "+": [ + 2, + 46 + ], + "[": [ + 0, + 47 + ], + "{": [ + 2, + 47 + ], + "]": [ + 0, + 48 + ], + "}": [ + 2, + 48 + ], + "\\": [ + 0, + 49 + ], + "|": [ + 2, + 49 + ], + ";": [ + 2, + 33 + ], + ":": [ + 2, + 35 + ], + "'": [ + 0, + 52 + ], + "\"": [ + 2, + 52 + ], + "`": [ + 0, + 53 + ], + "~": [ + 2, + 53 + ], + ",": [ + 2, + 56 + ], + "<": [ + 2, + 54 + ], + ".": [ + 0, + 56 + ], + ">": [ + 2, + 55 + ], + "/": [ + 0, + 56 + ], + "?": [ + 2, + 36 + ], + "й": [ + 0, + 20 + ], + "ц": [ + 0, + 26 + ], + "у": [ + 0, + 8 + ], + "к": [ + 0, + 21 + ], + "е": [ + 0, + 23 + ], + "н": [ + 0, + 28 + ], + "г": [ + 0, + 24 + ], + "ш": [ + 0, + 12 + ], + "щ": [ + 0, + 18 + ], + "з": [ + 0, + 19 + ], + "х": [ + 0, + 47 + ], + "ъ": [ + 0, + 48 + ], + "ф": [ + 0, + 4 + ], + "ы": [ + 0, + 22 + ], + "в": [ + 0, + 7 + ], + "а": [ + 0, + 4 + ], + "п": [ + 0, + 10 + ], + "р": [ + 0, + 11 + ], + "о": [ + 0, + 13 + ], + "л": [ + 0, + 14 + ], + "д": [ + 0, + 15 + ], + "ж": [ + 0, + 51 + ], + "э": [ + 0, + 52 + ], + "я": [ + 0, + 29 + ], + "ч": [ + 0, + 27 + ], + "с": [ + 0, + 6 + ], + "м": [ + 0, + 25 + ], + "и": [ + 0, + 5 + ], + "т": [ + 0, + 17 + ], + "ь": [ + 0, + 16 + ], + "б": [ + 0, + 54 + ], + "ю": [ + 0, + 55 + ], + "ё": [ + 0, + 53 + ], + "№": [ + 2, + 32 + ] +} \ No newline at end of file diff --git a/loki/layouts/uk.json b/loki/layouts/uk.json new file mode 100644 index 0000000..cd44258 --- /dev/null +++ b/loki/layouts/uk.json @@ -0,0 +1,398 @@ +{ + "a": [ + 0, + 4 + ], + "b": [ + 0, + 5 + ], + "c": [ + 0, + 6 + ], + "d": [ + 0, + 7 + ], + "e": [ + 0, + 8 + ], + "f": [ + 0, + 9 + ], + "g": [ + 0, + 10 + ], + "h": [ + 0, + 11 + ], + "i": [ + 0, + 12 + ], + "j": [ + 0, + 13 + ], + "k": [ + 0, + 14 + ], + "l": [ + 0, + 15 + ], + "m": [ + 0, + 16 + ], + "n": [ + 0, + 17 + ], + "o": [ + 0, + 18 + ], + "p": [ + 0, + 19 + ], + "q": [ + 0, + 20 + ], + "r": [ + 0, + 21 + ], + "s": [ + 0, + 22 + ], + "t": [ + 0, + 23 + ], + "u": [ + 0, + 24 + ], + "v": [ + 0, + 25 + ], + "w": [ + 0, + 26 + ], + "x": [ + 0, + 27 + ], + "y": [ + 0, + 28 + ], + "z": [ + 0, + 29 + ], + "A": [ + 2, + 4 + ], + "B": [ + 2, + 5 + ], + "C": [ + 2, + 6 + ], + "D": [ + 2, + 7 + ], + "E": [ + 2, + 8 + ], + "F": [ + 2, + 9 + ], + "G": [ + 2, + 10 + ], + "H": [ + 2, + 11 + ], + "I": [ + 2, + 12 + ], + "J": [ + 2, + 13 + ], + "K": [ + 2, + 14 + ], + "L": [ + 2, + 15 + ], + "M": [ + 2, + 16 + ], + "N": [ + 2, + 17 + ], + "O": [ + 2, + 18 + ], + "P": [ + 2, + 19 + ], + "Q": [ + 2, + 20 + ], + "R": [ + 2, + 21 + ], + "S": [ + 2, + 22 + ], + "T": [ + 2, + 23 + ], + "U": [ + 2, + 24 + ], + "V": [ + 2, + 25 + ], + "W": [ + 2, + 26 + ], + "X": [ + 2, + 27 + ], + "Y": [ + 2, + 28 + ], + "Z": [ + 2, + 29 + ], + "1": [ + 0, + 30 + ], + "2": [ + 0, + 31 + ], + "3": [ + 0, + 32 + ], + "4": [ + 0, + 33 + ], + "5": [ + 0, + 34 + ], + "6": [ + 0, + 35 + ], + "7": [ + 0, + 36 + ], + "8": [ + 0, + 37 + ], + "9": [ + 0, + 38 + ], + "0": [ + 0, + 39 + ], + "!": [ + 2, + 30 + ], + "@": [ + 2, + 52 + ], + "#": [ + 0, + 49 + ], + "$": [ + 2, + 33 + ], + "%": [ + 2, + 34 + ], + "^": [ + 2, + 35 + ], + "&": [ + 2, + 36 + ], + "*": [ + 2, + 37 + ], + "(": [ + 2, + 38 + ], + ")": [ + 2, + 39 + ], + "\n": [ + 0, + 40 + ], + "\r": [ + 0, + 40 + ], + "\t": [ + 0, + 43 + ], + " ": [ + 0, + 44 + ], + "-": [ + 0, + 45 + ], + "_": [ + 2, + 45 + ], + "=": [ + 0, + 46 + ], + "+": [ + 2, + 46 + ], + "[": [ + 0, + 47 + ], + "{": [ + 2, + 47 + ], + "]": [ + 0, + 48 + ], + "}": [ + 2, + 48 + ], + "\\": [ + 0, + 100 + ], + "|": [ + 2, + 100 + ], + ";": [ + 0, + 51 + ], + ":": [ + 2, + 51 + ], + "'": [ + 0, + 52 + ], + "\"": [ + 2, + 31 + ], + "`": [ + 0, + 53 + ], + "~": [ + 0, + 50 + ], + ",": [ + 0, + 54 + ], + "<": [ + 2, + 54 + ], + ".": [ + 0, + 55 + ], + ">": [ + 2, + 55 + ], + "/": [ + 0, + 56 + ], + "?": [ + 2, + 56 + ], + "£": [ + 2, + 32 + ] +} \ No newline at end of file diff --git a/loki/layouts/zh.json b/loki/layouts/zh.json new file mode 100644 index 0000000..7f44881 --- /dev/null +++ b/loki/layouts/zh.json @@ -0,0 +1,394 @@ +{ + "a": [ + 0, + 4 + ], + "b": [ + 0, + 5 + ], + "c": [ + 0, + 6 + ], + "d": [ + 0, + 7 + ], + "e": [ + 0, + 8 + ], + "f": [ + 0, + 9 + ], + "g": [ + 0, + 10 + ], + "h": [ + 0, + 11 + ], + "i": [ + 0, + 12 + ], + "j": [ + 0, + 13 + ], + "k": [ + 0, + 14 + ], + "l": [ + 0, + 15 + ], + "m": [ + 0, + 16 + ], + "n": [ + 0, + 17 + ], + "o": [ + 0, + 18 + ], + "p": [ + 0, + 19 + ], + "q": [ + 0, + 20 + ], + "r": [ + 0, + 21 + ], + "s": [ + 0, + 22 + ], + "t": [ + 0, + 23 + ], + "u": [ + 0, + 24 + ], + "v": [ + 0, + 25 + ], + "w": [ + 0, + 26 + ], + "x": [ + 0, + 27 + ], + "y": [ + 0, + 28 + ], + "z": [ + 0, + 29 + ], + "A": [ + 2, + 4 + ], + "B": [ + 2, + 5 + ], + "C": [ + 2, + 6 + ], + "D": [ + 2, + 7 + ], + "E": [ + 2, + 8 + ], + "F": [ + 2, + 9 + ], + "G": [ + 2, + 10 + ], + "H": [ + 2, + 11 + ], + "I": [ + 2, + 12 + ], + "J": [ + 2, + 13 + ], + "K": [ + 2, + 14 + ], + "L": [ + 2, + 15 + ], + "M": [ + 2, + 16 + ], + "N": [ + 2, + 17 + ], + "O": [ + 2, + 18 + ], + "P": [ + 2, + 19 + ], + "Q": [ + 2, + 20 + ], + "R": [ + 2, + 21 + ], + "S": [ + 2, + 22 + ], + "T": [ + 2, + 23 + ], + "U": [ + 2, + 24 + ], + "V": [ + 2, + 25 + ], + "W": [ + 2, + 26 + ], + "X": [ + 2, + 27 + ], + "Y": [ + 2, + 28 + ], + "Z": [ + 2, + 29 + ], + "1": [ + 0, + 30 + ], + "2": [ + 0, + 31 + ], + "3": [ + 0, + 32 + ], + "4": [ + 0, + 33 + ], + "5": [ + 0, + 34 + ], + "6": [ + 0, + 35 + ], + "7": [ + 0, + 36 + ], + "8": [ + 0, + 37 + ], + "9": [ + 0, + 38 + ], + "0": [ + 0, + 39 + ], + "!": [ + 2, + 30 + ], + "@": [ + 2, + 31 + ], + "#": [ + 2, + 32 + ], + "$": [ + 2, + 33 + ], + "%": [ + 2, + 34 + ], + "^": [ + 2, + 35 + ], + "&": [ + 2, + 36 + ], + "*": [ + 2, + 37 + ], + "(": [ + 2, + 38 + ], + ")": [ + 2, + 39 + ], + "\n": [ + 0, + 40 + ], + "\r": [ + 0, + 40 + ], + "\t": [ + 0, + 43 + ], + " ": [ + 0, + 44 + ], + "-": [ + 0, + 45 + ], + "_": [ + 2, + 45 + ], + "=": [ + 0, + 46 + ], + "+": [ + 2, + 46 + ], + "[": [ + 0, + 47 + ], + "{": [ + 2, + 47 + ], + "]": [ + 0, + 48 + ], + "}": [ + 2, + 48 + ], + "\\": [ + 0, + 49 + ], + "|": [ + 2, + 49 + ], + ";": [ + 0, + 51 + ], + ":": [ + 2, + 51 + ], + "'": [ + 0, + 52 + ], + "\"": [ + 2, + 52 + ], + "`": [ + 0, + 53 + ], + "~": [ + 2, + 53 + ], + ",": [ + 0, + 54 + ], + "<": [ + 2, + 54 + ], + ".": [ + 0, + 55 + ], + ">": [ + 2, + 55 + ], + "/": [ + 0, + 56 + ], + "?": [ + 2, + 56 + ] +} \ No newline at end of file diff --git a/mcp_server.py b/mcp_server.py new file mode 100644 index 0000000..4efe747 --- /dev/null +++ b/mcp_server.py @@ -0,0 +1,333 @@ +# mcp_server.py +# Model Context Protocol server for Bjorn. +# Exposes Bjorn's database and actions as MCP tools consumable by any MCP client +# (Claude Desktop, custom agents, etc.). +# +# Transport: HTTP SSE (default, port configurable) or stdio. +# Requires: pip install mcp +# Gracefully no-ops if mcp is not installed. + +import json +import threading +import time +from typing import Any, Dict, List, Optional + +from logger import Logger + +logger = Logger(name="mcp_server.py", level=20) + +# --------------------------------------------------------------------------- +# Lazy shared_data import (avoids circular imports at module level) +# --------------------------------------------------------------------------- +_shared_data = None + +def _sd(): + global _shared_data + if _shared_data is None: + from init_shared import shared_data + _shared_data = shared_data + return _shared_data + + +def _tool_allowed(name: str) -> bool: + allowed = _sd().config.get("mcp_allowed_tools", []) + return name in allowed + + +# --------------------------------------------------------------------------- +# Tool implementations (pure functions, no MCP deps) +# --------------------------------------------------------------------------- + +def _impl_get_hosts(alive_only: bool = True) -> str: + try: + sql = "SELECT ip, mac, hostname, os, alive, ports_open FROM hosts" + if alive_only: + sql += " WHERE alive=1" + sql += " ORDER BY ip" + rows = _sd().db.query(sql, ()) + result = [dict(r) for r in rows] if rows else [] + return json.dumps(result, default=str) + except Exception as e: + return json.dumps({"error": str(e)}) + + +def _impl_get_vulnerabilities(host_ip: Optional[str] = None, limit: int = 100) -> str: + try: + if host_ip: + sql = ("SELECT v.ip, v.port, v.cve_id, v.severity, v.description " + "FROM vulnerabilities v WHERE v.ip=? ORDER BY v.severity DESC LIMIT ?") + rows = _sd().db.query(sql, (host_ip, limit)) + else: + sql = ("SELECT v.ip, v.port, v.cve_id, v.severity, v.description " + "FROM vulnerabilities v ORDER BY v.severity DESC LIMIT ?") + rows = _sd().db.query(sql, (limit,)) + return json.dumps([dict(r) for r in rows] if rows else [], default=str) + except Exception as e: + return json.dumps({"error": str(e)}) + + +def _impl_get_credentials(service: Optional[str] = None, limit: int = 100) -> str: + try: + if service: + sql = ("SELECT ip, port, service, username, password, found_at " + "FROM credentials WHERE service=? ORDER BY found_at DESC LIMIT ?") + rows = _sd().db.query(sql, (service, limit)) + else: + sql = ("SELECT ip, port, service, username, password, found_at " + "FROM credentials ORDER BY found_at DESC LIMIT ?") + rows = _sd().db.query(sql, (limit,)) + return json.dumps([dict(r) for r in rows] if rows else [], default=str) + except Exception as e: + return json.dumps({"error": str(e)}) + + +def _impl_get_action_history(limit: int = 50, action_name: Optional[str] = None) -> str: + try: + if action_name: + sql = ("SELECT action_name, target_ip, status, result, started_at, finished_at " + "FROM action_history WHERE action_name=? ORDER BY started_at DESC LIMIT ?") + rows = _sd().db.query(sql, (action_name, limit)) + else: + sql = ("SELECT action_name, target_ip, status, result, started_at, finished_at " + "FROM action_history ORDER BY started_at DESC LIMIT ?") + rows = _sd().db.query(sql, (limit,)) + return json.dumps([dict(r) for r in rows] if rows else [], default=str) + except Exception as e: + return json.dumps({"error": str(e)}) + + +def _impl_get_status() -> str: + try: + sd = _sd() + return json.dumps({ + "operation_mode": sd.operation_mode, + "active_action": getattr(sd, "active_action", None), + "bjorn_status": getattr(sd, "bjorn_status_text", "IDLE"), + "bjorn_says": getattr(sd, "bjorn_says", ""), + "hosts_discovered": getattr(sd, "target_count", 0), + "vulnerabilities": getattr(sd, "vuln_count", 0), + "credentials": getattr(sd, "cred_count", 0), + "current_ip": getattr(sd, "current_ip", ""), + "current_ssid": getattr(sd, "current_ssid", ""), + }) + except Exception as e: + return json.dumps({"error": str(e)}) + + +_MCP_PRIORITY = 80 # Higher than scheduler default (40) and queue_action default (50) + + +def _impl_run_action(action_name: str, target_ip: str, target_mac: str = "") -> str: + """Queue a Bjorn action with MCP priority boost. Returns queue confirmation.""" + try: + sd = _sd() + + # Resolve MAC from IP if not supplied + mac = target_mac or "" + if not mac and target_ip: + try: + row = sd.db.query_one( + "SELECT mac_address FROM hosts WHERE ip=? LIMIT 1", (target_ip,) + ) + if row: + mac = row["mac_address"] + except Exception: + pass + + sd.db.queue_action( + action_name=action_name, + mac=mac, + ip=target_ip, + priority=_MCP_PRIORITY, + trigger="mcp", + metadata={"decision_method": "mcp", "decision_origin": "mcp"}, + ) + + # Wake the orchestrator immediately (it sleeps up to 5 s when idle) + try: + sd.queue_event.set() + except Exception: + pass + + return json.dumps({ + "status": "queued", + "action": action_name, + "target": target_ip, + "priority": _MCP_PRIORITY, + }) + except Exception as e: + return json.dumps({"error": str(e)}) + + +def _impl_query_db(sql: str, params: Optional[List] = None) -> str: + """Run a read-only SELECT query. Non-SELECT statements are rejected.""" + try: + stripped = sql.strip().upper() + if not stripped.startswith("SELECT"): + return json.dumps({"error": "Only SELECT queries are allowed."}) + rows = _sd().db.query(sql, tuple(params or [])) + return json.dumps([dict(r) for r in rows] if rows else [], default=str) + except Exception as e: + return json.dumps({"error": str(e)}) + + +# --------------------------------------------------------------------------- +# MCP Server setup (requires `pip install mcp`) +# --------------------------------------------------------------------------- + +def _build_mcp_server(): + """Build and return a FastMCP server instance, or None if mcp not available.""" + try: + from mcp.server.fastmcp import FastMCP + except ImportError: + logger.warning("mcp package not installed — MCP server disabled. " + "Run: pip install mcp") + return None + + mcp = FastMCP( + name="bjorn", + version="1.0.0", + instructions=( + "Bjorn is a Raspberry Pi network security tool. " + "Use these tools to query discovered hosts, vulnerabilities, credentials, " + "and action history, or to queue new actions." + ), + ) + + # ---- Tool registrations ---------------------------------------- + + @mcp.tool() + def get_hosts(alive_only: bool = True) -> str: + """Return all network hosts discovered by Bjorn's scanner. + Set alive_only=false to include hosts that are currently offline.""" + if not _tool_allowed("get_hosts"): + return json.dumps({"error": "Tool disabled in Bjorn MCP config."}) + return _impl_get_hosts(alive_only) + + @mcp.tool() + def get_vulnerabilities(host_ip: str = "", limit: int = 100) -> str: + """Return discovered vulnerabilities. Optionally filter by host_ip.""" + if not _tool_allowed("get_vulnerabilities"): + return json.dumps({"error": "Tool disabled in Bjorn MCP config."}) + return _impl_get_vulnerabilities(host_ip or None, limit) + + @mcp.tool() + def get_credentials(service: str = "", limit: int = 100) -> str: + """Return captured credentials. Optionally filter by service (ssh, ftp, smb…).""" + if not _tool_allowed("get_credentials"): + return json.dumps({"error": "Tool disabled in Bjorn MCP config."}) + return _impl_get_credentials(service or None, limit) + + @mcp.tool() + def get_action_history(limit: int = 50, action_name: str = "") -> str: + """Return the history of executed actions, most recent first.""" + if not _tool_allowed("get_action_history"): + return json.dumps({"error": "Tool disabled in Bjorn MCP config."}) + return _impl_get_action_history(limit, action_name or None) + + @mcp.tool() + def get_status() -> str: + """Return Bjorn's current operational status, counters, and active action.""" + if not _tool_allowed("get_status"): + return json.dumps({"error": "Tool disabled in Bjorn MCP config."}) + return _impl_get_status() + + @mcp.tool() + def run_action(action_name: str, target_ip: str, target_mac: str = "") -> str: + """Queue a Bjorn action (e.g. ssh_bruteforce) against target_ip. + The action will be executed by Bjorn's orchestrator.""" + if not _tool_allowed("run_action"): + return json.dumps({"error": "Tool disabled in Bjorn MCP config."}) + return _impl_run_action(action_name, target_ip, target_mac) + + @mcp.tool() + def query_db(sql: str, params: str = "[]") -> str: + """Run a read-only SELECT query against Bjorn's SQLite database. + params must be a JSON array of bind parameters.""" + if not _tool_allowed("query_db"): + return json.dumps({"error": "Tool disabled in Bjorn MCP config."}) + try: + p = json.loads(params) + except Exception: + p = [] + return _impl_query_db(sql, p) + + return mcp + + +# --------------------------------------------------------------------------- +# Server lifecycle +# --------------------------------------------------------------------------- + +_server_thread: Optional[threading.Thread] = None +_mcp_instance = None + + +def start(block: bool = False) -> bool: + """ + Start the MCP server in a daemon thread. + + Args: + block: If True, run in the calling thread (for stdio mode). + + Returns: + True if started successfully, False otherwise. + """ + global _server_thread, _mcp_instance + + sd = _sd() + if not sd.config.get("mcp_enabled", False): + logger.debug("MCP server disabled in config (mcp_enabled=False)") + return False + + mcp = _build_mcp_server() + if mcp is None: + return False + + _mcp_instance = mcp + transport = sd.config.get("mcp_transport", "http") + port = int(sd.config.get("mcp_port", 8765)) + + def _run(): + try: + if transport == "stdio": + logger.info("MCP server starting (stdio transport)") + mcp.run(transport="stdio") + else: + logger.info(f"MCP server starting (HTTP SSE transport, port {port})") + # FastMCP HTTP SSE — runs uvicorn internally + mcp.run(transport="sse", port=port) + except Exception as e: + logger.error(f"MCP server error: {e}") + + if block: + _run() + return True + + _server_thread = threading.Thread(target=_run, daemon=True, name="MCPServer") + _server_thread.start() + logger.info(f"MCP server thread started (transport={transport})") + return True + + +def stop() -> None: + """Signal MCP server to stop (best-effort — FastMCP handles cleanup).""" + global _server_thread + if _server_thread and _server_thread.is_alive(): + logger.info("MCP server thread stopping (daemon — will exit with process)") + _server_thread = None + + +def is_running() -> bool: + return _server_thread is not None and _server_thread.is_alive() + + +def server_status() -> Dict[str, Any]: + sd = _sd() + return { + "enabled": sd.config.get("mcp_enabled", False), + "running": is_running(), + "transport": sd.config.get("mcp_transport", "http"), + "port": sd.config.get("mcp_port", 8765), + "allowed_tools": sd.config.get("mcp_allowed_tools", []), + } diff --git a/orchestrator.py b/orchestrator.py index 8b053ce..60c1cae 100644 --- a/orchestrator.py +++ b/orchestrator.py @@ -70,9 +70,17 @@ class Orchestrator: self.data_consolidator = None self.ai_enabled = False + # ┌─────────────────────────────────────────────────────────┐ + # │ LLM Orchestrator (advisor / autonomous) │ + # └─────────────────────────────────────────────────────────┘ + self.llm_orchestrator = None + self._init_llm_orchestrator() + # Load all available actions self.load_actions() logger.info(f"Actions loaded: {list(self.actions.keys())}") + # Expose loaded action names so LLM orchestrator can discover them + self.shared_data.loaded_action_names = list(self.actions.keys()) def _is_enabled_value(self, value: Any) -> bool: """Robust parser for b_enabled values coming from DB.""" @@ -218,6 +226,35 @@ class Orchestrator: interval_s=300.0, ) + def _init_llm_orchestrator(self) -> None: + """Initialise LLMOrchestrator if a mode is configured and LLM is enabled.""" + try: + mode = self.shared_data.config.get("llm_orchestrator_mode", "none") + enabled = self.shared_data.config.get("llm_enabled", False) + if mode == "none" or not enabled: + return + from llm_orchestrator import LLMOrchestrator + self.llm_orchestrator = LLMOrchestrator(self.shared_data) + self.llm_orchestrator.start() + except Exception as e: + logger.debug(f"LLM Orchestrator init skipped: {e}") + + def _sync_llm_orchestrator(self) -> None: + """React to runtime changes of llm_orchestrator_mode / llm_enabled.""" + mode = self.shared_data.config.get("llm_orchestrator_mode", "none") + enabled = self.shared_data.config.get("llm_enabled", False) + + if mode == "none" or not enabled: + if self.llm_orchestrator: + self.llm_orchestrator.stop() + self.llm_orchestrator = None + return + + if self.llm_orchestrator is None: + self._init_llm_orchestrator() + else: + self.llm_orchestrator.restart_if_mode_changed() + def _disable_ai_components(self) -> None: """Drop AI-specific helpers when leaving AI mode. FeatureLogger is kept alive so AUTO mode still collects data.""" @@ -765,6 +802,7 @@ class Orchestrator: try: # Allow live mode switching from the UI without restarting the process. self._sync_ai_components() + self._sync_llm_orchestrator() # Get next action from queue next_action = self.get_next_action() @@ -827,6 +865,9 @@ class Orchestrator: self.shared_data.update_status("IDLE", "") # Cleanup on exit (OUTSIDE while loop) + if self.llm_orchestrator: + self.llm_orchestrator.stop() + if self.scheduler: self.scheduler.stop() self.shared_data.queue_event.set() @@ -839,6 +880,13 @@ class Orchestrator: def _process_background_tasks(self): """Run periodic tasks like consolidation, upload retries, and model updates (AI mode only).""" + # LLM advisor mode — runs regardless of AI mode + if self.llm_orchestrator and self.shared_data.config.get("llm_orchestrator_mode") == "advisor": + try: + self.llm_orchestrator.advise() + except Exception as e: + logger.debug(f"LLM advisor background call error: {e}") + if not (self.ai_enabled and self.shared_data.operation_mode == "AI"): return diff --git a/requirements.txt b/requirements.txt index 26e1ca0..1308309 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,3 +9,9 @@ pysmb==1.2.10 pymysql==1.1.1 sqlalchemy==2.0.36 python-nmap==0.7.1 + +# ── LLM / MCP / Discovery ───────────────────────────────────────────── +zeroconf>=0.131.0 # LaRuche/LAND auto-discovery via mDNS +# +# Optional — install to unlock extra features: +# mcp[cli]>=1.0.0 # MCP server (pip install "mcp[cli]") diff --git a/shared.py b/shared.py index 401205a..ccffa25 100644 --- a/shared.py +++ b/shared.py @@ -488,6 +488,56 @@ class SharedData: "loki_typing_speed_max": 0, "loki_scripts_path": "/root/loki/scripts", "loki_auto_run": "", + + # LLM Bridge + "__title_llm__": "LLM Bridge", + "llm_enabled": False, + "llm_comments_enabled": False, # Use LLM to generate EPD comments (fallback to DB if disabled/fails) + "llm_comments_log": False, # Log LLM-generated EPD comments to logger.info + "llm_chat_enabled": True, # Enable /chat.html interface + "llm_backend": "auto", # auto | laruche | ollama | api + "llm_laruche_discovery": True, # Auto-discover LaRuche nodes via mDNS + "llm_laruche_url": "", # Manual LaRuche node URL (overrides discovery) + "llm_laruche_model": "", # Model to use on LaRuche (empty = node default) + "llm_ollama_url": "http://127.0.0.1:11434", + "llm_ollama_model": "phi3:mini", + "llm_api_provider": "anthropic", # anthropic | openai | openrouter + "llm_api_key": "", + "llm_api_model": "claude-haiku-4-5-20251001", + "llm_api_base_url": "", # Custom base URL (OpenRouter / local proxy) + "llm_timeout_s": 30, + "llm_max_tokens": 500, + "llm_comment_max_tokens": 80, # Keep short for EPD display + "llm_chat_history_size": 20, + "llm_chat_tools_enabled": False, # Enable MCP tool-calling from chat UI + + # LLM Orchestrator + "__title_llm_orch__": "LLM Orchestrator", + "llm_orchestrator_mode": "none", # none | advisor | autonomous + "llm_orchestrator_interval_s": 60, # Seconds between autonomous cycles + "llm_orchestrator_max_actions": 3, # Max actions queued per autonomous cycle + "llm_orchestrator_allowed_actions": [], # Whitelist (empty = inherit mcp_allowed_tools) + "llm_orchestrator_skip_scheduler": False, # True = disable scheduler trigger eval (LLM-only mode) + "llm_orchestrator_skip_if_no_change": True, # True = skip LLM cycle when nothing new (save tokens) + "llm_orchestrator_log_reasoning": False, # True = log full LLM reasoning + push to chat history + + # MCP Server + "__title_mcp__": "MCP Server", + "mcp_enabled": False, + "mcp_transport": "http", # http | stdio + "mcp_port": 8765, + "mcp_allowed_tools": [ + "get_hosts", "get_vulnerabilities", "get_credentials", + "get_action_history", "get_status", "run_action", "query_db" + ], + + # EPD Buttons (disabled by default — not all users have buttons) + "__title_epd_buttons__": "EPD Buttons", + "epd_buttons_enabled": False, + "epd_button_a_pin": 5, + "epd_button_b_pin": 6, + "epd_button_c_pin": 13, + "epd_button_d_pin": 19, } @property diff --git a/utils.py b/utils.py index 2f750cd..2edbfb1 100644 --- a/utils.py +++ b/utils.py @@ -25,6 +25,7 @@ class WebUtils: "sentinel": ("web_utils.sentinel_utils", "SentinelUtils"), "bifrost": ("web_utils.bifrost_utils", "BifrostUtils"), "loki": ("web_utils.loki_utils", "LokiUtils"), + "llm_utils": ("web_utils.llm_utils", "LLMUtils"), } diff --git a/web/css/pages.css b/web/css/pages.css index bee181f..c517964 100644 --- a/web/css/pages.css +++ b/web/css/pages.css @@ -17,3 +17,4 @@ @import url("./pages/sentinel.css"); @import url("./pages/bifrost.css"); @import url("./pages/loki.css"); +@import url("./pages/llm.css"); diff --git a/web/css/pages/llm.css b/web/css/pages/llm.css new file mode 100644 index 0000000..0763385 --- /dev/null +++ b/web/css/pages/llm.css @@ -0,0 +1,425 @@ +/* ========================================================================== + llm.css — LLM Chat & LLM Config SPA pages + ========================================================================== */ + +/* ── LLM Chat ─────────────────────────────────────────────────────────── */ + +.llmc-page { + display: flex; + flex-direction: column; + height: calc(100vh - var(--h-topbar, 56px) - var(--h-bottombar, 56px)); + font-family: 'Courier New', Courier, monospace; + background: var(--bg); + color: var(--ink); + min-height: 0; +} + +.llmc-header { + background: var(--c-panel); + border-bottom: 1px solid var(--c-border); + padding: 10px 16px; + display: flex; + align-items: center; + gap: 10px; + flex-shrink: 0; +} + +.llmc-dot { + width: 8px; + height: 8px; + border-radius: 50%; + background: var(--muted-off); + flex-shrink: 0; +} +.llmc-dot.online { background: var(--ok); } +.llmc-dot.offline { background: var(--danger); } + +.llmc-title { + font-size: 13px; + color: var(--acid); + letter-spacing: 2px; + font-weight: 700; +} + +.llmc-status { + font-size: 11px; + color: var(--muted); +} + +.llmc-btn-ghost { + background: transparent; + border: 1px solid var(--c-border); + color: var(--muted); + padding: 3px 10px; + font-size: 11px; + cursor: pointer; + font-family: inherit; + transition: border-color .15s, color .15s; +} +.llmc-btn-ghost:hover { border-color: var(--acid); color: var(--ink); } +.llmc-btn-ghost.active { border-color: var(--accent-2); color: var(--accent-2); background: color-mix(in oklab, var(--accent-2) 8%, transparent); } + +.llmc-clear-btn { margin-left: auto; } + +.llmc-messages { + flex: 1; + overflow-y: auto; + padding: 14px 16px; + display: flex; + flex-direction: column; + gap: 10px; + min-height: 0; +} + +.llmc-msg { + max-width: 88%; + padding: 9px 13px; + border-radius: 4px; + font-size: 12px; + line-height: 1.55; + white-space: pre-wrap; + word-break: break-word; +} +.llmc-msg.user { + background: color-mix(in oklab, var(--ok) 8%, transparent); + border: 1px solid color-mix(in oklab, var(--ok) 25%, transparent); + align-self: flex-end; + color: color-mix(in oklab, var(--ok) 85%, var(--ink) 15%); +} +.llmc-msg.assistant { + background: color-mix(in oklab, var(--accent-2) 6%, transparent); + border: 1px solid color-mix(in oklab, var(--accent-2) 20%, transparent); + align-self: flex-start; +} +.llmc-msg.system { + background: transparent; + border: 1px dashed var(--c-border); + align-self: center; + color: var(--muted); + font-size: 11px; + padding: 5px 12px; +} + +.llmc-msg-role { + font-size: 10px; + color: var(--muted); + margin-bottom: 3px; + letter-spacing: 1px; +} +.llmc-msg.user .llmc-msg-role { color: color-mix(in oklab, var(--ok) 70%, var(--muted)); } +.llmc-msg.assistant .llmc-msg-role { color: var(--acid); } + +.llmc-thinking { + align-self: flex-start; + color: var(--muted); + font-size: 12px; + padding: 6px 16px; + border-left: 2px solid var(--danger); + font-family: 'Courier New', monospace; + flex-shrink: 0; +} + +.llmc-disabled-msg { + text-align: center; + padding: 20px; + color: var(--muted); + font-size: 12px; +} +.llmc-disabled-msg a { color: var(--acid); } + +.llmc-input-row { + background: var(--c-panel); + border-top: 1px solid var(--c-border); + padding: 10px 12px; + display: flex; + gap: 8px; + flex-shrink: 0; +} + +.llmc-input { + flex: 1; + background: var(--bg); + border: 1px solid var(--c-border); + color: var(--ink); + padding: 9px 12px; + font-family: 'Courier New', Courier, monospace; + font-size: 12px; + resize: none; + outline: none; + height: 44px; + max-height: 120px; + overflow-y: auto; +} +.llmc-input:focus { border-color: var(--acid); } + +.llmc-send-btn { + background: var(--danger); + border: none; + color: white; + padding: 0 16px; + font-size: 13px; + cursor: pointer; + font-family: inherit; + letter-spacing: 1px; + transition: background .15s; +} +.llmc-send-btn:hover { background: color-mix(in oklab, var(--danger) 80%, white 20%); } +.llmc-send-btn:disabled { background: var(--muted-off); cursor: not-allowed; } + +/* ── LLM Config ───────────────────────────────────────────────────────── */ + +.llmcfg-page { + font-family: 'Courier New', Courier, monospace; + color: var(--ink); + background: var(--bg); + min-height: calc(100vh - var(--h-topbar, 56px) - var(--h-bottombar, 56px)); +} + +.llmcfg-header { + background: var(--c-panel); + border-bottom: 1px solid var(--c-border); + padding: 12px 20px; + display: flex; + align-items: center; + gap: 12px; +} + +.llmcfg-title { + font-size: 13px; + color: var(--acid); + letter-spacing: 2px; + font-weight: 700; +} + +.llmcfg-nav-link { + margin-left: auto; + color: var(--muted); + text-decoration: none; + font-size: 11px; +} +.llmcfg-nav-link:hover { color: var(--ink); } + +.llmcfg-container { + max-width: 780px; + margin: 0 auto; + padding: 20px 16px; + display: flex; + flex-direction: column; + gap: 18px; +} + +.llmcfg-section { + background: var(--c-panel); + border: 1px solid var(--c-border); + border-radius: 4px; + overflow: hidden; +} + +.llmcfg-section-title { + padding: 10px 14px; + background: var(--c-panel-2); + border-bottom: 1px solid var(--c-border); + font-size: 11px; + letter-spacing: 2px; + color: var(--acid); + display: flex; + align-items: center; + gap: 10px; +} + +.llmcfg-badge { + font-size: 10px; + padding: 1px 8px; + border-radius: 2px; +} +.llmcfg-badge.on { background: var(--ok); color: #000; } +.llmcfg-badge.off { background: var(--c-border); color: var(--muted); } + +.llmcfg-body { + padding: 14px; + display: flex; + flex-direction: column; + gap: 12px; +} + +.llmcfg-subsection-title { + font-size: 10px; + letter-spacing: 2px; + color: var(--muted); + border-top: 1px solid var(--c-border); + padding-top: 10px; + margin-top: 2px; +} + +.llmcfg-field { + display: flex; + flex-direction: column; + gap: 4px; +} + +.llmcfg-label { + font-size: 10px; + color: var(--muted); + letter-spacing: 1px; +} + +.llmcfg-input, +.llmcfg-select { + background: var(--bg); + border: 1px solid var(--c-border); + color: var(--ink); + padding: 7px 10px; + font-family: inherit; + font-size: 11px; + outline: none; + width: 100%; +} +.llmcfg-input:focus, +.llmcfg-select:focus { border-color: var(--acid); } +.llmcfg-input[type="password"] { letter-spacing: 2px; } + +.llmcfg-row { + display: flex; + gap: 10px; +} +.llmcfg-row .llmcfg-field { flex: 1; } + +.llmcfg-toggle-row { + display: flex; + align-items: center; + justify-content: space-between; + gap: 10px; +} +.llmcfg-toggle-label { + font-size: 12px; + color: var(--ink); +} + +.llmcfg-toggle { + position: relative; + width: 44px; + height: 24px; + flex-shrink: 0; + cursor: pointer; +} +.llmcfg-toggle input { opacity: 0; width: 0; height: 0; position: absolute; } +.llmcfg-slider { + position: absolute; + inset: 0; + background: var(--c-border-strong); + transition: .2s; + cursor: pointer; +} +.llmcfg-slider::before { + content: ''; + position: absolute; + height: 18px; + width: 18px; + left: 3px; + top: 3px; + background: var(--muted-off); + transition: .2s; +} +.llmcfg-toggle input:checked + .llmcfg-slider { background: var(--danger); } +.llmcfg-toggle input:checked + .llmcfg-slider::before { transform: translateX(20px); background: white; } + +.llmcfg-tools-grid { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(180px, 1fr)); + gap: 7px; + padding-top: 4px; +} +.llmcfg-tool-item { + display: flex; + align-items: center; + gap: 7px; + font-size: 11px; + cursor: pointer; + color: var(--ink); +} +.llmcfg-tool-item input[type="checkbox"] { accent-color: var(--acid); } + +.llmcfg-status-row { + font-size: 11px; + color: var(--muted); + border-top: 1px solid var(--c-border); + padding-top: 8px; + margin-top: 2px; +} + +.llmcfg-actions { + display: flex; + gap: 8px; + flex-wrap: wrap; +} + +.llmcfg-btn { + padding: 7px 16px; + font-family: inherit; + font-size: 11px; + cursor: pointer; + border: 1px solid var(--c-border); + background: transparent; + color: var(--ink); + letter-spacing: 1px; + transition: border-color .15s, color .15s; +} +.llmcfg-btn:hover { border-color: var(--acid); color: var(--acid); } +.llmcfg-btn.primary { background: var(--danger); border-color: var(--danger); color: white; } +.llmcfg-btn.primary:hover { background: color-mix(in oklab, var(--danger) 80%, white 20%); border-color: inherit; color: white; } +.llmcfg-btn.compact { padding: 5px 10px; flex-shrink: 0; } + +/* LaRuche discovery row */ +.llmcfg-url-row { display: flex; gap: 6px; align-items: center; } +.llmcfg-url-row .llmcfg-input { flex: 1; } + +/* Model selector row */ +.llmcfg-model-row { display: flex; gap: 6px; align-items: center; } +.llmcfg-model-row .llmcfg-select { flex: 1; } + +.llmcfg-discovery-row { + font-size: 11px; + padding: 4px 0; + min-height: 18px; +} +.llmcfg-disc-found { color: var(--ok); } +.llmcfg-disc-searching { color: var(--muted); } +.llmcfg-disc-off { color: var(--danger); opacity: .7; } + +/* LaRuche default model info */ +.llmcfg-laruche-default { + font-size: 11px; + padding: 2px 0 0; + min-height: 14px; +} +.llmcfg-laruche-default-label { + color: var(--muted); +} +.llmcfg-laruche-default-value { + color: #facc15; + font-weight: 600; +} + +/* Personality & Prompts textarea */ +.llmcfg-textarea { + width: 100%; + min-height: 70px; + resize: vertical; + background: var(--bg); + border: 1px solid var(--c-border); + border-radius: 4px; + color: var(--ink); + padding: 7px 10px; + font-family: 'Courier New', Courier, monospace; + font-size: 0.75rem; + line-height: 1.4; +} +.llmcfg-textarea:focus { border-color: var(--acid); } +.llmcfg-textarea::placeholder { color: var(--muted); opacity: 0.5; font-size: 0.65rem; } +.llmcfg-reset-btn { + margin-top: 4px; + font-size: 0.6rem; + padding: 2px 8px; + opacity: 0.6; +} +.llmcfg-reset-btn:hover { opacity: 1; } diff --git a/web/css/pages/scheduler.css b/web/css/pages/scheduler.css index c07517e..2df6f03 100644 --- a/web/css/pages/scheduler.css +++ b/web/css/pages/scheduler.css @@ -291,6 +291,42 @@ background: var(--c-cancel); } +/* Origin badge — who queued this action */ +.scheduler-container .originBadge { + display: inline-block; + font-size: .68rem; + letter-spacing: .5px; + padding: .1rem .5rem; + border-radius: 2px; + font-weight: 600; + margin-bottom: .2rem; +} +.scheduler-container .origin-llm { + background: color-mix(in oklab, var(--danger) 18%, transparent); + color: var(--danger); + border: 1px solid color-mix(in oklab, var(--danger) 35%, transparent); +} +.scheduler-container .origin-ai { + background: color-mix(in oklab, var(--accent-2, #a78bfa) 15%, transparent); + color: var(--accent-2, #a78bfa); + border: 1px solid color-mix(in oklab, var(--accent-2, #a78bfa) 30%, transparent); +} +.scheduler-container .origin-mcp { + background: color-mix(in oklab, var(--acid) 12%, transparent); + color: var(--acid); + border: 1px solid color-mix(in oklab, var(--acid) 25%, transparent); +} +.scheduler-container .origin-manual { + background: color-mix(in oklab, var(--ok) 12%, transparent); + color: var(--ok); + border: 1px solid color-mix(in oklab, var(--ok) 25%, transparent); +} +.scheduler-container .origin-heuristic { + background: color-mix(in oklab, var(--muted) 12%, transparent); + color: var(--muted); + border: 1px solid color-mix(in oklab, var(--muted) 25%, transparent); +} + /* Collapsed */ .scheduler-container .card.collapsed .kv, .scheduler-container .card.collapsed .tags, @@ -334,8 +370,7 @@ height: 80px; object-fit: contain; border-radius: 6px; - background: var(--panel); - border: 1px solid var(--c-border); + } .scheduler-container .card.status-running .actionIcon { diff --git a/web/css/pages/sentinel.css b/web/css/pages/sentinel.css index 7ba7df8..f23e9f1 100644 --- a/web/css/pages/sentinel.css +++ b/web/css/pages/sentinel.css @@ -364,3 +364,32 @@ align-items: flex-start; } } + +/* ── AI Sentinel elements ─────────────────────────────── */ +.sentinel-ai-btn { + background: rgba(168, 85, 247, .1) !important; + border-color: rgba(168, 85, 247, .3) !important; + color: #c084fc !important; +} +.sentinel-ai-btn:hover { + background: rgba(168, 85, 247, .2) !important; +} +.sentinel-ai-result { + margin: 4px 0 0; + padding: 6px 8px; + background: rgba(168, 85, 247, .06); + border: 1px solid rgba(168, 85, 247, .15); + border-radius: 4px; + font-size: 0.7rem; + white-space: pre-wrap; + line-height: 1.4; + display: none; +} +.sentinel-ai-result.active { display: block; } +.sentinel-ai-summary { + padding: 8px; + background: rgba(59, 130, 246, .06); + border: 1px solid rgba(59, 130, 246, .15); + border-radius: 6px; + margin-bottom: 8px; +} diff --git a/web/css/shell.css b/web/css/shell.css index 1d09a0b..0e6733c 100644 --- a/web/css/shell.css +++ b/web/css/shell.css @@ -404,6 +404,41 @@ body.console-docked .app-container { white-space: nowrap; } +/* Bjorn comment lines */ +.comment-line { + color: #4ade80; + display: inline-flex; + align-items: center; + gap: 5px; + flex-wrap: wrap; +} +.comment-icon { + display: inline-block; + width: 1.15em; + height: 1.15em; + flex-shrink: 0; + opacity: .85; + vertical-align: middle; + object-fit: contain; +} +.comment-status { + opacity: .55; + font-size: .9em; +} +.comment-llm-badge { + display: inline-block; + font-size: .7em; + font-weight: 700; + letter-spacing: .5px; + padding: 1px 5px; + border-radius: 4px; + background: linear-gradient(180deg, rgba(168,85,247,.35), rgba(168,85,247,.2)); + border: 1px solid rgba(168,85,247,.5); + color: #c4b5fd; + vertical-align: middle; + flex-shrink: 0; +} + /* Console font slider row */ .console-fontrow { flex-basis: 100%; @@ -723,17 +758,121 @@ body.console-docked .app-container { transform: scale(.96); } -/* QuickPanel rows & signal */ +/* ---- QuickPanel Tab Bar ---- */ +.qp-tabs { + display: flex; + gap: 0; + margin: 0 16px 12px; + border-bottom: 1px solid var(--c-border); + position: relative; +} + +.qp-tab { + padding: 8px 20px; + font-size: 13px; + font-weight: 600; + color: var(--muted); + cursor: pointer; + border-bottom: 2px solid transparent; + transition: color .2s ease, border-color .2s ease; + user-select: none; + display: inline-flex; + align-items: center; + gap: 6px; +} + +.qp-tab:hover { + color: var(--ink); +} + +.qp-tab.active { + color: var(--acid); + border-bottom-color: var(--acid); +} + +.qp-tab-icon { + font-size: 15px; + opacity: .7; +} + +.qp-tab.active .qp-tab-icon { + opacity: 1; +} + +/* ---- QuickPanel rows & cards ---- */ .qprow { display: grid; gap: 10px; - padding: 10px; + padding: 12px; border: 1px solid var(--c-border); - border-radius: 10px; - background: var(--grad-qprow); + border-radius: 12px; + background: var(--grad-qprow, color-mix(in oklab, var(--c-panel, #0d1520) 80%, transparent)); margin-bottom: 6px; + transition: transform .15s ease, box-shadow .15s ease, opacity .15s ease, border-color .15s ease; + animation: qpSlideIn .25s ease forwards; + animation-delay: calc(var(--i, 0) * 40ms); + opacity: 0; } +.qprow:hover { + border-color: var(--c-border-strong, var(--c-border)); + box-shadow: 0 2px 8px rgba(0,0,0,.2); +} + +.qprow.connected { + border-left: 3px solid var(--acid); +} + +@keyframes qpSlideIn { + from { transform: translateY(8px); opacity: 0; } + to { transform: translateY(0); opacity: 1; } +} + +/* ---- WiFi card layout ---- */ +.wifi-card { + grid-template-columns: 1fr auto; + align-items: center; +} + +.wifi-card-info { + display: flex; + flex-direction: column; + gap: 2px; + overflow: hidden; + min-width: 0; +} + +.wifi-card-ssid { + font-weight: 600; + font-size: 14px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.wifi-card-meta { + display: flex; + align-items: center; + gap: 8px; + font-size: 12px; +} + +.wifi-card-actions { + display: flex; + align-items: center; + gap: 6px; +} + +.wifi-connected-chip { + font-size: 11px; + color: var(--acid); + font-weight: 600; + display: inline-flex; + align-items: center; + gap: 4px; +} + +/* ---- Signal bars ---- */ .sig { display: inline-grid; grid-auto-flow: column; @@ -742,19 +881,124 @@ body.console-docked .app-container { } .sig i { - width: 4px; + width: 5px; height: 6px; display: block; background: var(--c-slot); border: 1px solid var(--c-border); border-bottom: none; border-radius: 2px 2px 0 0; + transition: background .2s ease; } .sig i.on { background: var(--acid); } +/* ---- Known network cards ---- */ +.known-card { + grid-template-columns: auto 1fr auto; + align-items: center; +} + +.known-card-grip { + cursor: grab; + color: var(--muted); + font-size: 16px; + padding: 4px 2px; + user-select: none; + touch-action: none; + line-height: 1; +} + +.known-card-grip:active { + cursor: grabbing; +} + +.known-card-info { + display: flex; + flex-direction: column; + gap: 2px; + overflow: hidden; + min-width: 0; +} + +.known-card-ssid { + font-weight: 600; + font-size: 14px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.known-card-priority { + font-size: 11px; + color: var(--muted); +} + +.known-card-actions { + display: flex; + gap: 4px; + align-items: center; +} + +/* ---- Drag & Drop ---- */ +.qprow.dragging { + opacity: .4; + transform: scale(.97); + box-shadow: none; +} + +.drag-placeholder { + height: 3px; + background: var(--acid); + border-radius: 99px; + margin: 2px 0; + box-shadow: 0 0 8px color-mix(in oklab, var(--acid) 50%, transparent); + animation: qpSlideIn .15s ease forwards; +} + +/* ---- Multi-select ---- */ +.known-select-cb { + width: 18px; + height: 18px; + accent-color: var(--acid); + cursor: pointer; + flex-shrink: 0; + display: none; +} + +.edit-mode .known-select-cb { + display: block; +} + +.edit-mode .known-card-grip { + display: none; +} + +.qp-batch-bar { + position: sticky; + bottom: 0; + left: 0; + right: 0; + padding: 10px 16px; + background: color-mix(in oklab, var(--c-panel, #0d1520) 95%, transparent); + backdrop-filter: blur(8px); + border-top: 1px solid var(--c-border-strong); + display: flex; + align-items: center; + justify-content: space-between; + gap: 8px; + animation: qpSlideUp .2s ease forwards; + z-index: 5; +} + +@keyframes qpSlideUp { + from { transform: translateY(100%); opacity: 0; } + to { transform: translateY(0); opacity: 1; } +} + +/* ---- Bluetooth cards ---- */ .btlist .qprow { grid-template-columns: 1fr auto; } @@ -763,6 +1007,78 @@ body.console-docked .app-container { display: flex; align-items: center; gap: 10px; + min-width: 0; +} + +.bt-icon { + font-size: 20px; + flex-shrink: 0; + width: 32px; + text-align: center; +} + +.bt-device-info { + display: flex; + flex-direction: column; + gap: 2px; + overflow: hidden; + min-width: 0; +} + +.bt-device-name { + font-weight: 600; + font-size: 14px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.bt-device-mac { + font-size: 11px; + color: var(--muted); + font-family: monospace; +} + +.bt-state-chips { + display: flex; + gap: 4px; + flex-wrap: wrap; + margin-top: 2px; +} + +.bt-chip { + font-size: 10px; + padding: 1px 6px; + border-radius: 99px; + font-weight: 600; + display: inline-flex; + align-items: center; + gap: 3px; +} + +.bt-chip-paired { + background: color-mix(in oklab, var(--acid-2, #18f0ff) 14%, transparent); + border: 1px solid color-mix(in oklab, var(--acid-2, #18f0ff) 40%, transparent); + color: var(--acid-2, #18f0ff); +} + +.bt-chip-trusted { + background: color-mix(in oklab, var(--ok, #2cff7e) 14%, transparent); + border: 1px solid color-mix(in oklab, var(--ok, #2cff7e) 40%, transparent); + color: var(--ok, #2cff7e); +} + +.bt-chip-connected { + background: color-mix(in oklab, var(--acid) 14%, transparent); + border: 1px solid color-mix(in oklab, var(--acid) 40%, transparent); + color: var(--acid); +} + +.bt-actions { + display: flex; + gap: 4px; + align-items: center; + flex-wrap: wrap; } .bt-type { @@ -770,6 +1086,105 @@ body.console-docked .app-container { font-size: 12px; } +/* ---- State dot ---- */ +.state-dot { + width: 8px; + height: 8px; + border-radius: 50%; + display: inline-block; + flex-shrink: 0; +} + +.state-on { + background: var(--acid); + box-shadow: 0 0 6px color-mix(in oklab, var(--acid) 60%, transparent); +} + +.state-off { + background: var(--muted); +} + +/* ---- QP Section headers ---- */ +.qp-section-header { + display: flex; + align-items: center; + justify-content: space-between; + padding: 8px 16px 4px; + font-weight: 700; + font-size: 13px; + color: var(--muted); +} + +.qp-section-actions { + display: flex; + gap: 4px; + align-items: center; +} + +/* ---- QP icon buttons ---- */ +.qp-icon-btn { + width: 28px; + height: 28px; + display: inline-flex; + align-items: center; + justify-content: center; + border-radius: 6px; + cursor: pointer; + background: transparent; + border: 1px solid transparent; + color: var(--muted); + font-size: 14px; + transition: background .15s ease, color .15s ease, border-color .15s ease; + padding: 0; +} + +.qp-icon-btn:hover { + background: var(--white-06); + border-color: var(--white-12); + color: var(--ink); +} + +.qp-icon-btn.danger:hover { + color: var(--danger, #ff3b3b); + border-color: color-mix(in oklab, var(--danger, #ff3b3b) 30%, transparent); +} + +/* ---- QP toolbar ---- */ +.qp-toolbar { + display: flex; + gap: 6px; + align-items: center; + flex-wrap: wrap; + padding: 0 16px 8px; +} + +.qp-toolbar-spacer { + flex: 1; +} + +/* ---- QP loading spinner on buttons ---- */ +.qp-btn-loading { + pointer-events: none; + opacity: .6; +} + +.qp-btn-loading::after { + content: ""; + display: inline-block; + width: 12px; + height: 12px; + border: 2px solid currentColor; + border-top-color: transparent; + border-radius: 50%; + animation: qpSpin .6s linear infinite; + margin-left: 6px; + vertical-align: middle; +} + +@keyframes qpSpin { + to { transform: rotate(360deg); } +} + /* ---- Actions Dropdown ---- */ .actions { position: relative; @@ -2248,3 +2663,69 @@ input[type="color"].theme-input { grid-template-columns: repeat(3, 1fr); } } + +/* ── Console footer (chat input) ────────────────────────── */ +.console-footer { + display: flex; + gap: 4px; + padding: 4px 8px; + border-top: 1px solid var(--c-border); + flex-shrink: 0; + background: var(--c-panel, #1a1a1e); +} +.console-input { + flex: 1; + background: var(--bg, #09090b); + border: 1px solid var(--c-border, #2a2a2e); + border-radius: 4px; + color: var(--ink, #fafafa); + padding: 4px 8px; + resize: none; + font-family: var(--font-mono, 'Courier New', monospace); + font-size: var(--console-font, 11px); + line-height: 1.4; +} +.console-input:focus { border-color: var(--acid, #22c55e); outline: none; } +.console-send-btn { + padding: 4px 10px; + background: var(--acid, #22c55e); + color: var(--bg, #09090b); + border: none; + border-radius: 4px; + cursor: pointer; + font-size: var(--console-font, 11px); + font-weight: 600; +} +.console-send-btn:hover { opacity: 0.8; } + +/* ── Console bubble mode ────────────────────────────────── */ +.console.bubble-mode .console-body { + display: flex; + flex-direction: column; +} +.console-bubble-bjorn { + margin: 3px 8px 3px 4px; + padding: 5px 10px; + border-radius: 12px 12px 12px 4px; + background: rgba(34, 197, 94, .12); + color: var(--ink, #fafafa); + max-width: 85%; + align-self: flex-start; + word-break: break-word; + font-size: var(--console-font, 11px); + line-height: 1.4; +} +.console-bubble-bjorn.llm { background: rgba(168, 85, 247, .12); } +.console-bubble-user { + margin: 3px 4px 3px 8px; + padding: 5px 10px; + border-radius: 12px 12px 4px 12px; + background: rgba(59, 130, 246, .12); + color: var(--ink, #fafafa); + max-width: 85%; + align-self: flex-end; + margin-left: auto; + word-break: break-word; + font-size: var(--console-font, 11px); + line-height: 1.4; +} diff --git a/web/i18n/en.json b/web/i18n/en.json index 99b90a9..d38fec4 100644 --- a/web/i18n/en.json +++ b/web/i18n/en.json @@ -1249,5 +1249,66 @@ "loki.quick_placeholder": "Quick type text here...", "loki.quick_send": "Type", "loki.quick_sent": "Text sent to target", - "loki.quick_error": "Failed to send text" + "loki.quick_error": "Failed to send text", + + "nav.llm_chat": "LLM Chat", + "nav.llm_config": "LLM & MCP", + + "llm_chat.checking": "checking...", + "llm_chat.disabled": "LLM disabled", + "llm_chat.online": "online", + "llm_chat.unavailable": "unavailable", + "llm_chat.disabled_msg": "LLM is disabled. Enable it in", + "llm_chat.settings_link": "Settings → LLM Bridge", + "llm_chat.thinking": "Bjorn is thinking...", + "llm_chat.placeholder": "Ask Bjorn anything about the network...", + "llm_chat.send": "SEND", + "llm_chat.clear_history": "Clear history", + "llm_chat.orch_log": "Orch Log", + "llm_chat.orch_title": "View LLM Orchestrator reasoning log", + "llm_chat.back_chat": "← Back to chat", + "llm_chat.session_started": "Session started. Type a question or command.", + "llm_chat.history_cleared": "History cleared.", + "llm_chat.back_to_chat": "Back to chat. Type a question or command.", + "llm_chat.loading_log": "Loading LLM Orchestrator reasoning log…", + "llm_chat.no_log": "No reasoning log yet. Enable llm_orchestrator_log_reasoning in config and run a cycle.", + "llm_chat.log_header": "Orchestrator log", + "llm_chat.log_error": "Error loading reasoning log", + "llm_chat.error": "Error", + "llm_chat.net_error": "Network error", + + "llm_cfg.enable_bridge": "Enable LLM Bridge", + "llm_cfg.epd_comments": "LLM comments on EPD display", + "llm_cfg.backend": "BACKEND PRIORITY", + "llm_cfg.laruche_discovery": "Auto-discover LaRuche nodes via mDNS", + "llm_cfg.laruche_url": "LARUCHE NODE URL (optional — overrides discovery)", + "llm_cfg.ollama_url": "OLLAMA URL", + "llm_cfg.ollama_model": "MODEL", + "llm_cfg.provider": "PROVIDER", + "llm_cfg.api_model": "MODEL", + "llm_cfg.api_key": "API KEY", + "llm_cfg.api_key_placeholder":"Leave empty to keep current", + "llm_cfg.base_url": "CUSTOM BASE URL (OpenRouter / proxy)", + "llm_cfg.timeout": "TIMEOUT (s)", + "llm_cfg.max_tokens_chat": "MAX TOKENS (chat)", + "llm_cfg.max_tokens_epd": "MAX TOKENS (EPD comment)", + "llm_cfg.api_key_set": "API key: set", + "llm_cfg.api_key_not_set": "API key: not set", + "llm_cfg.save_llm": "SAVE LLM CONFIG", + "llm_cfg.test_connection": "TEST CONNECTION", + "llm_cfg.enable_mcp": "Enable MCP Server", + "llm_cfg.transport": "TRANSPORT", + "llm_cfg.mcp_port": "PORT (HTTP SSE only)", + "llm_cfg.exposed_tools": "EXPOSED TOOLS", + "llm_cfg.mcp_running": "Server running on port", + "llm_cfg.mcp_stopped": "Server not running.", + "llm_cfg.save_mcp": "SAVE MCP CONFIG", + "llm_cfg.saved_llm": "LLM config saved.", + "llm_cfg.saved_mcp": "MCP config saved.", + "llm_cfg.mcp_enabled": "MCP server enabled.", + "llm_cfg.mcp_disabled": "MCP server disabled.", + "llm_cfg.testing": "Testing…", + "llm_cfg.test_failed": "Failed", + "llm_cfg.error": "Error", + "llm_cfg.save_error": "Save error" } diff --git a/web/i18n/fr.json b/web/i18n/fr.json index 77ff574..7db279b 100644 --- a/web/i18n/fr.json +++ b/web/i18n/fr.json @@ -932,5 +932,66 @@ "loki.quick_placeholder": "Taper du texte ici...", "loki.quick_send": "Taper", "loki.quick_sent": "Texte envoyé à la cible", - "loki.quick_error": "Échec de l'envoi" + "loki.quick_error": "Échec de l'envoi", + + "nav.llm_chat": "Chat LLM", + "nav.llm_config": "LLM & MCP", + + "llm_chat.checking": "vérification...", + "llm_chat.disabled": "LLM désactivé", + "llm_chat.online": "en ligne", + "llm_chat.unavailable": "indisponible", + "llm_chat.disabled_msg": "Le LLM est désactivé. Activez-le dans", + "llm_chat.settings_link": "Paramètres → LLM Bridge", + "llm_chat.thinking": "Bjorn réfléchit...", + "llm_chat.placeholder": "Demandez à Bjorn n'importe quoi sur le réseau...", + "llm_chat.send": "ENVOYER", + "llm_chat.clear_history": "Effacer l'historique", + "llm_chat.orch_log": "Log Orch", + "llm_chat.orch_title": "Voir le log de raisonnement de l'orchestrateur LLM", + "llm_chat.back_chat": "← Retour au chat", + "llm_chat.session_started": "Session démarrée. Posez une question ou une commande.", + "llm_chat.history_cleared": "Historique effacé.", + "llm_chat.back_to_chat": "Retour au chat. Posez une question ou une commande.", + "llm_chat.loading_log": "Chargement du log de raisonnement…", + "llm_chat.no_log": "Aucun log de raisonnement. Activez llm_orchestrator_log_reasoning dans la config.", + "llm_chat.log_header": "Log orchestrateur", + "llm_chat.log_error": "Erreur lors du chargement du log", + "llm_chat.error": "Erreur", + "llm_chat.net_error": "Erreur réseau", + + "llm_cfg.enable_bridge": "Activer le LLM Bridge", + "llm_cfg.epd_comments": "Commentaires LLM sur l'écran EPD", + "llm_cfg.backend": "PRIORITÉ BACKEND", + "llm_cfg.laruche_discovery": "Découverte automatique LaRuche via mDNS", + "llm_cfg.laruche_url": "URL NŒUD LARUCHE (optionnel — override découverte)", + "llm_cfg.ollama_url": "URL OLLAMA", + "llm_cfg.ollama_model": "MODÈLE", + "llm_cfg.provider": "FOURNISSEUR", + "llm_cfg.api_model": "MODÈLE", + "llm_cfg.api_key": "CLÉ API", + "llm_cfg.api_key_placeholder":"Laisser vide pour conserver la clé actuelle", + "llm_cfg.base_url": "URL DE BASE PERSONNALISÉE (OpenRouter / proxy)", + "llm_cfg.timeout": "TIMEOUT (s)", + "llm_cfg.max_tokens_chat": "TOKENS MAX (chat)", + "llm_cfg.max_tokens_epd": "TOKENS MAX (commentaire EPD)", + "llm_cfg.api_key_set": "Clé API : définie", + "llm_cfg.api_key_not_set": "Clé API : non définie", + "llm_cfg.save_llm": "SAUVEGARDER CONFIG LLM", + "llm_cfg.test_connection": "TESTER LA CONNEXION", + "llm_cfg.enable_mcp": "Activer le serveur MCP", + "llm_cfg.transport": "TRANSPORT", + "llm_cfg.mcp_port": "PORT (HTTP SSE uniquement)", + "llm_cfg.exposed_tools": "OUTILS EXPOSÉS", + "llm_cfg.mcp_running": "Serveur actif sur le port", + "llm_cfg.mcp_stopped": "Serveur arrêté.", + "llm_cfg.save_mcp": "SAUVEGARDER CONFIG MCP", + "llm_cfg.saved_llm": "Configuration LLM sauvegardée.", + "llm_cfg.saved_mcp": "Configuration MCP sauvegardée.", + "llm_cfg.mcp_enabled": "Serveur MCP activé.", + "llm_cfg.mcp_disabled": "Serveur MCP désactivé.", + "llm_cfg.testing": "Test en cours…", + "llm_cfg.test_failed": "Échec", + "llm_cfg.error": "Erreur", + "llm_cfg.save_error": "Erreur de sauvegarde" } \ No newline at end of file diff --git a/web/index.html b/web/index.html index 0418141..dfd46f6 100644 --- a/web/index.html +++ b/web/index.html @@ -108,6 +108,7 @@ +