mirror of
https://github.com/infinition/Bjorn.git
synced 2026-03-17 01:21:04 +00:00
Add LLM configuration and MCP server management UI and backend functionality
- Implemented a new SPA page for LLM Bridge and MCP Server settings in `llm-config.js`. - Added functionality for managing LLM and MCP configurations, including toggling, saving settings, and testing connections. - Created HTTP endpoints in `llm_utils.py` for handling LLM chat, status checks, and MCP server configuration. - Integrated model fetching from LaRuche and Ollama backends. - Enhanced error handling and logging for better debugging and user feedback.
This commit is contained in:
19
Bjorn.py
19
Bjorn.py
@@ -586,6 +586,25 @@ if __name__ == "__main__":
|
||||
except Exception as e:
|
||||
logger.warning("Loki init skipped: %s", e)
|
||||
|
||||
# LLM Bridge — warm up singleton (starts LaRuche mDNS discovery if enabled)
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
LLMBridge() # Initialise singleton, kicks off background discovery
|
||||
logger.info("LLM Bridge initialised")
|
||||
except Exception as e:
|
||||
logger.warning("LLM Bridge init skipped: %s", e)
|
||||
|
||||
# MCP Server — start if enabled in config
|
||||
try:
|
||||
import mcp_server
|
||||
if shared_data.config.get("mcp_enabled", False):
|
||||
mcp_server.start()
|
||||
logger.info("MCP server started")
|
||||
else:
|
||||
logger.info("MCP server loaded (disabled — enable via Settings)")
|
||||
except Exception as e:
|
||||
logger.warning("MCP server init skipped: %s", e)
|
||||
|
||||
# Signal Handlers
|
||||
exit_handler = lambda s, f: handle_exit(
|
||||
s,
|
||||
|
||||
916
LLM_MCP_ARCHITECTURE.md
Normal file
916
LLM_MCP_ARCHITECTURE.md
Normal file
@@ -0,0 +1,916 @@
|
||||
# BJORN — LLM Bridge, MCP Server & LLM Orchestrator
|
||||
## Complete architecture, operation, commands, fallbacks
|
||||
|
||||
---
|
||||
|
||||
## Table of contents
|
||||
|
||||
1. [Overview](#1-overview)
|
||||
2. [Created / modified files](#2-created--modified-files)
|
||||
3. [LLM Bridge (`llm_bridge.py`)](#3-llm-bridge-llm_bridgepy)
|
||||
4. [MCP Server (`mcp_server.py`)](#4-mcp-server-mcp_serverpy)
|
||||
5. [LLM Orchestrator (`llm_orchestrator.py`)](#5-llm-orchestrator-llm_orchestratorpy)
|
||||
6. [Orchestrator & Scheduler integration](#6-orchestrator--scheduler-integration)
|
||||
7. [Web Utils LLM (`web_utils/llm_utils.py`)](#7-web-utils-llm-web_utilsllm_utilspy)
|
||||
8. [EPD comment integration (`comment.py`)](#8-epd-comment-integration-commentpy)
|
||||
9. [Configuration (`shared.py`)](#9-configuration-sharedpy)
|
||||
10. [HTTP Routes (`webapp.py`)](#10-http-routes-webapppy)
|
||||
11. [Web interfaces](#11-web-interfaces)
|
||||
12. [Startup (`Bjorn.py`)](#12-startup-bjornpy)
|
||||
13. [LaRuche / LAND Protocol compatibility](#13-laruche--land-protocol-compatibility)
|
||||
14. [Optional dependencies](#14-optional-dependencies)
|
||||
15. [Quick activation & configuration](#15-quick-activation--configuration)
|
||||
16. [Complete API endpoint reference](#16-complete-api-endpoint-reference)
|
||||
17. [Queue priority system](#17-queue-priority-system)
|
||||
18. [Fallbacks & graceful degradation](#18-fallbacks--graceful-degradation)
|
||||
19. [Call sequences](#19-call-sequences)
|
||||
|
||||
---
|
||||
|
||||
## 1. Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────┐
|
||||
│ BJORN (RPi) │
|
||||
│ │
|
||||
│ ┌─────────────┐ ┌──────────────────┐ ┌─────────────────────┐ │
|
||||
│ │ Core BJORN │ │ MCP Server │ │ Web UI │ │
|
||||
│ │ (unchanged) │ │ (mcp_server.py) │ │ /chat.html │ │
|
||||
│ │ │ │ 7 exposed tools │ │ /mcp-config.html │ │
|
||||
│ │ comment.py │ │ HTTP SSE / stdio │ │ ↳ Orch Log button │ │
|
||||
│ │ ↕ LLM hook │ │ │ │ │ │
|
||||
│ └──────┬──────┘ └────────┬─────────┘ └──────────┬──────────┘ │
|
||||
│ └─────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ┌──────────────────────────▼─────────────────────────────────┐ │
|
||||
│ │ LLM Bridge (llm_bridge.py) │ │
|
||||
│ │ Singleton · Thread-safe │ │
|
||||
│ │ │ │
|
||||
│ │ Automatic cascade: │ │
|
||||
│ │ 1. LaRuche node (LAND/mDNS → HTTP POST /infer) │ │
|
||||
│ │ 2. Local Ollama (HTTP POST /api/chat) │ │
|
||||
│ │ 3. External API (Anthropic / OpenAI / OpenRouter) │ │
|
||||
│ │ 4. None (→ fallback templates in comment.py) │ │
|
||||
│ │ │ │
|
||||
│ │ Agentic tool-calling loop (stop_reason=tool_use, ≤6 turns) │ │
|
||||
│ │ _BJORN_TOOLS: 7 tools in Anthropic format │ │
|
||||
│ └─────────────────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ┌──────────────────────────▼─────────────────────────────────┐ │
|
||||
│ │ LLM Orchestrator (llm_orchestrator.py) │ │
|
||||
│ │ │ │
|
||||
│ │ mode = none → LLM has no role in scheduling │ │
|
||||
│ │ mode = advisor → LLM suggests 1 action/cycle (prio 85) │ │
|
||||
│ │ mode = autonomous→ own thread, loop + tools (prio 82) │ │
|
||||
│ │ │ │
|
||||
│ │ Fingerprint (hosts↑, vulns↑, creds↑, queue_id↑) │ │
|
||||
│ │ → skip LLM if nothing new (token savings) │ │
|
||||
│ └─────────────────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ┌──────────────────────────▼─────────────────────────────────┐ │
|
||||
│ │ Action Queue (SQLite) │ │
|
||||
│ │ scheduler=40 normal=50 MCP=80 autonomous=82 advisor=85│ │
|
||||
│ └─────────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
↕ mDNS _ai-inference._tcp.local. (zeroconf)
|
||||
┌──────────────────────────────────────────┐
|
||||
│ LaRuche Swarm (LAN) │
|
||||
│ Node A → Mistral 7B :8419 │
|
||||
│ Node B → DeepSeek Coder :8419 │
|
||||
│ Node C → Phi-3 Mini :8419 │
|
||||
└──────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Design principles:**
|
||||
- Everything is **disabled by default** — zero impact if not configured
|
||||
- All dependencies are **optional** — silent import if missing
|
||||
- **Systematic fallback** at every level — Bjorn never crashes because of the LLM
|
||||
- The bridge is a **singleton** — one instance per process, thread-safe
|
||||
- EPD comments preserve their **exact original behaviour** if LLM is disabled
|
||||
- The LLM is the **brain** (decides what to do), the orchestrator is the **arms** (executes)
|
||||
|
||||
---
|
||||
|
||||
## 2. Created / modified files
|
||||
|
||||
### Created files
|
||||
|
||||
| File | Approx. size | Role |
|
||||
|------|-------------|------|
|
||||
| `llm_bridge.py` | ~450 lines | LLM Singleton — backend cascade + agentic tool-calling loop |
|
||||
| `mcp_server.py` | ~280 lines | FastMCP MCP Server — 7 Bjorn tools |
|
||||
| `web_utils/llm_utils.py` | ~220 lines | LLM/MCP HTTP endpoints (web_utils pattern) |
|
||||
| `llm_orchestrator.py` | ~410 lines | LLM Orchestrator — advisor & autonomous modes |
|
||||
| `web/chat.html` | ~300 lines | Chat interface + Orch Log button |
|
||||
| `web/mcp-config.html` | ~400 lines | LLM & MCP configuration page |
|
||||
|
||||
### Modified files
|
||||
|
||||
| File | What changed |
|
||||
|------|-------------|
|
||||
| `shared.py` | +45 config keys (LLM bridge, MCP, orchestrator) |
|
||||
| `comment.py` | LLM hook in `get_comment()` — 12 lines added |
|
||||
| `utils.py` | +1 entry in lazy WebUtils registry: `"llm_utils"` |
|
||||
| `webapp.py` | +9 GET/POST routes in `_register_routes_once()` |
|
||||
| `Bjorn.py` | LLM Bridge warm-up + conditional MCP server start |
|
||||
| `orchestrator.py` | +`LLMOrchestrator` lifecycle + advisor call in background tasks |
|
||||
| `action_scheduler.py` | +skip scheduler if LLM autonomous only (`llm_orchestrator_skip_scheduler`) |
|
||||
| `requirements.txt` | +3 comment lines (optional dependencies documented) |
|
||||
|
||||
---
|
||||
|
||||
## 3. LLM Bridge (`llm_bridge.py`)
|
||||
|
||||
### Internal architecture
|
||||
|
||||
```
|
||||
LLMBridge (Singleton)
|
||||
├── __init__() Initialises singleton, launches LaRuche discovery
|
||||
├── complete() Main API — cascades all backends
|
||||
│ └── tools=None/[...] Optional param to enable tool-calling
|
||||
├── generate_comment() Generates a short EPD comment (≤80 tokens)
|
||||
├── chat() Stateful chat with per-session history
|
||||
│ └── tools=_BJORN_TOOLS if llm_chat_tools_enabled=True
|
||||
├── clear_history() Clears a session's history
|
||||
├── status() Returns bridge state (for the UI)
|
||||
│
|
||||
├── _start_laruche_discovery() Starts mDNS thread in background
|
||||
├── _discover_laruche_mdns() Listens to _ai-inference._tcp.local. continuously
|
||||
│
|
||||
├── _call_laruche() Backend 1 — POST http://[node]:8419/infer
|
||||
├── _call_ollama() Backend 2 — POST http://localhost:11434/api/chat
|
||||
├── _call_anthropic() Backend 3a — POST api.anthropic.com + AGENTIC LOOP
|
||||
│ └── loop ≤6 turns: send → tool_use → execute → feed result → repeat
|
||||
├── _call_openai_compat() Backend 3b — POST [base_url]/v1/chat/completions
|
||||
│
|
||||
├── _execute_tool(name, inputs) Dispatches to mcp_server._impl_*
|
||||
│ └── gate: checks mcp_allowed_tools before executing
|
||||
│
|
||||
└── _build_system_prompt() Builds system prompt with live Bjorn context
|
||||
|
||||
_BJORN_TOOLS : List[Dict] Anthropic-format definitions for the 7 MCP tools
|
||||
```
|
||||
|
||||
### _BJORN_TOOLS — full list
|
||||
|
||||
```python
|
||||
_BJORN_TOOLS = [
|
||||
{"name": "get_hosts", "description": "...", "input_schema": {...}},
|
||||
{"name": "get_vulnerabilities", ...},
|
||||
{"name": "get_credentials", ...},
|
||||
{"name": "get_action_history", ...},
|
||||
{"name": "get_status", ...},
|
||||
{"name": "run_action", ...}, # gated by mcp_allowed_tools
|
||||
{"name": "query_db", ...}, # SELECT only
|
||||
]
|
||||
```
|
||||
|
||||
### Backend cascade
|
||||
|
||||
```
|
||||
llm_backend = "auto" → LaRuche → Ollama → API → None
|
||||
llm_backend = "laruche" → LaRuche only
|
||||
llm_backend = "ollama" → Ollama only
|
||||
llm_backend = "api" → External API only
|
||||
```
|
||||
|
||||
At each step, if a backend fails (timeout, network error, missing model), the next one is tried **silently**. If all fail, `complete()` returns `None`.
|
||||
|
||||
### Agentic tool-calling loop (`_call_anthropic`)
|
||||
|
||||
When `tools` is passed to `complete()`, the Anthropic backend enters agentic mode:
|
||||
|
||||
```
|
||||
_call_anthropic(messages, system, tools, max_tokens, timeout)
|
||||
│
|
||||
├─ POST /v1/messages {tools: [...]}
|
||||
│
|
||||
├─ [stop_reason = "tool_use"]
|
||||
│ for each tool_use block:
|
||||
│ result = _execute_tool(name, inputs)
|
||||
│ append {role: "tool", tool_use_id: ..., content: result}
|
||||
│ POST /v1/messages [messages + tool results] ← next turn
|
||||
│
|
||||
└─ [stop_reason = "end_turn"] → returns final text
|
||||
[≥6 turns] → returns partial text + warning
|
||||
```
|
||||
|
||||
`_execute_tool()` dispatches directly to `mcp_server._impl_*` (no network), checking `mcp_allowed_tools` for `run_action`.
|
||||
|
||||
### Tool-calling in chat (`chat()`)
|
||||
|
||||
If `llm_chat_tools_enabled = True`, the chat passes `tools=_BJORN_TOOLS` to the backend, letting the LLM answer with real-time data (hosts, vulns, creds…) rather than relying only on its training knowledge.
|
||||
|
||||
### Chat history
|
||||
|
||||
- Each session has its own history (key = `session_id`)
|
||||
- Special session `"llm_orchestrator"`: contains the autonomous orchestrator's reasoning
|
||||
- Max size configurable: `llm_chat_history_size` (default: 20 messages)
|
||||
- History is **in-memory only** — not persisted across restarts
|
||||
- Thread-safe via `_hist_lock`
|
||||
|
||||
---
|
||||
|
||||
## 4. MCP Server (`mcp_server.py`)
|
||||
|
||||
### What is MCP?
|
||||
|
||||
The **Model Context Protocol** (Anthropic) is an open-source protocol that lets AI agents (Claude Desktop, custom agents, etc.) use external tools via a standardised interface.
|
||||
|
||||
By enabling Bjorn's MCP server, **any MCP client can query and control Bjorn** — without knowing the internal DB structure.
|
||||
|
||||
### Exposed tools
|
||||
|
||||
| Tool | Arguments | Description |
|
||||
|------|-----------|-------------|
|
||||
| `get_hosts` | `alive_only: bool = True` | Returns discovered hosts (IP, MAC, hostname, OS, ports) |
|
||||
| `get_vulnerabilities` | `host_ip: str = ""`, `limit: int = 100` | Returns discovered CVE vulnerabilities |
|
||||
| `get_credentials` | `service: str = ""`, `limit: int = 100` | Returns captured credentials (SSH, FTP, SMB…) |
|
||||
| `get_action_history` | `limit: int = 50`, `action_name: str = ""` | History of executed actions |
|
||||
| `get_status` | *(none)* | Real-time state: mode, active action, counters |
|
||||
| `run_action` | `action_name: str`, `target_ip: str`, `target_mac: str = ""` | Queues a Bjorn action (MCP priority = 80) |
|
||||
| `query_db` | `sql: str`, `params: str = "[]"` | Free SELECT against the SQLite DB (read-only) |
|
||||
|
||||
**Security:** each tool checks `mcp_allowed_tools` — unlisted tools return a clean error. `query_db` rejects anything that is not a `SELECT`.
|
||||
|
||||
### `_impl_run_action` — priority detail
|
||||
|
||||
```python
|
||||
_MCP_PRIORITY = 80 # > scheduler(40) > normal(50)
|
||||
|
||||
sd.db.queue_action(
|
||||
action_name=action_name,
|
||||
mac=mac, # resolved from hosts WHERE ip=? if not supplied
|
||||
ip=target_ip,
|
||||
priority=_MCP_PRIORITY,
|
||||
trigger="mcp",
|
||||
metadata={"decision_method": "mcp", "decision_origin": "mcp"},
|
||||
)
|
||||
sd.queue_event.set() # wakes the orchestrator immediately
|
||||
```
|
||||
|
||||
### Available transports
|
||||
|
||||
| Transport | Config | Usage |
|
||||
|-----------|--------|-------|
|
||||
| `http` (default) | `mcp_transport: "http"`, `mcp_port: 8765` | Accessible from any MCP client on LAN via SSE |
|
||||
| `stdio` | `mcp_transport: "stdio"` | Claude Desktop, CLI agents |
|
||||
|
||||
---
|
||||
|
||||
## 5. LLM Orchestrator (`llm_orchestrator.py`)
|
||||
|
||||
The LLM Orchestrator transforms Bjorn from a scriptable tool into an autonomous agent. It is **completely optional and disableable** via `llm_orchestrator_mode = "none"`.
|
||||
|
||||
### Operating modes
|
||||
|
||||
| Mode | Config value | Operation |
|
||||
|------|-------------|-----------|
|
||||
| Disabled | `"none"` (default) | LLM plays no role in planning |
|
||||
| Advisor | `"advisor"` | LLM consulted periodically, suggests 1 action |
|
||||
| Autonomous | `"autonomous"` | Own thread, LLM observes + plans with tools |
|
||||
|
||||
### Internal architecture
|
||||
|
||||
```
|
||||
LLMOrchestrator
|
||||
├── start() Starts autonomous thread if mode=autonomous
|
||||
├── stop() Stops thread (join 15s max)
|
||||
├── restart_if_mode_changed() Called from orchestrator.run() each iteration
|
||||
├── is_active() True if autonomous thread is alive
|
||||
│
|
||||
├── [ADVISOR MODE]
|
||||
│ advise() → called from orchestrator._process_background_tasks()
|
||||
│ ├── _build_snapshot() → compact dict (hosts, vulns, creds, queue)
|
||||
│ ├── LLMBridge().complete(prompt, system)
|
||||
│ └── _apply_advisor_response(raw, allowed)
|
||||
│ ├── parse JSON {"action": str, "target_ip": str, "reason": str}
|
||||
│ ├── validate action ∈ allowed
|
||||
│ └── db.queue_action(priority=85, trigger="llm_advisor")
|
||||
│
|
||||
└── [AUTONOMOUS MODE]
|
||||
_autonomous_loop() Thread "LLMOrchestrator" (daemon)
|
||||
└── loop:
|
||||
_compute_fingerprint() → (hosts, vulns, creds, max_queue_id)
|
||||
_has_actionable_change() → skip if nothing increased
|
||||
_run_autonomous_cycle()
|
||||
├── filter tools: read-only always + run_action if in allowed
|
||||
├── LLMBridge().complete(prompt, system, tools=[...])
|
||||
│ └── _call_anthropic() agentic loop
|
||||
│ → LLM calls run_action via tools
|
||||
│ → _execute_tool → _impl_run_action → queue
|
||||
└── if llm_orchestrator_log_reasoning=True:
|
||||
logger.info("[LLM_ORCH_REASONING]...")
|
||||
_push_to_chat() → "llm_orchestrator" session in LLMBridge
|
||||
sleep(llm_orchestrator_interval_s)
|
||||
```
|
||||
|
||||
### Fingerprint and smart skip
|
||||
|
||||
```python
|
||||
def _compute_fingerprint(self) -> tuple:
|
||||
# (host_count, vuln_count, cred_count, max_completed_queue_id)
|
||||
return (hosts, vulns, creds, last_id)
|
||||
|
||||
def _has_actionable_change(self, fp: tuple) -> bool:
|
||||
if self._last_fingerprint is None:
|
||||
return True # first cycle always runs
|
||||
# Triggers ONLY if something INCREASED
|
||||
# hosts going offline → not actionable
|
||||
return any(fp[i] > self._last_fingerprint[i] for i in range(len(fp)))
|
||||
```
|
||||
|
||||
**Token savings:** if `llm_orchestrator_skip_if_no_change = True` (default), the LLM cycle is skipped if no new hosts/vulns/creds and no action completed since the last cycle.
|
||||
|
||||
### LLM priorities vs queue
|
||||
|
||||
```python
|
||||
_ADVISOR_PRIORITY = 85 # advisor > MCP(80) > normal(50) > scheduler(40)
|
||||
_AUTONOMOUS_PRIORITY = 82 # autonomous slightly below advisor
|
||||
```
|
||||
|
||||
### Autonomous system prompt — example
|
||||
|
||||
```
|
||||
"You are Bjorn's autonomous orchestrator, running on a Raspberry Pi network security tool.
|
||||
Current state: 12 hosts discovered, 3 vulnerabilities, 1 credentials.
|
||||
Operation mode: ATTACK. Hard limit: at most 3 run_action calls per cycle.
|
||||
Only these action names may be queued: NmapScan, SSHBruteforce, SMBScan.
|
||||
Strategy: prioritise unexplored services, hosts with high port counts, and hosts with no recent scans.
|
||||
Do not queue duplicate actions already pending or recently successful.
|
||||
Use Norse references occasionally. Be terse and tactical."
|
||||
```
|
||||
|
||||
### Advisor response format
|
||||
|
||||
```json
|
||||
// Action recommended:
|
||||
{"action": "NmapScan", "target_ip": "192.168.1.42", "reason": "unexplored host, 0 open ports known"}
|
||||
|
||||
// Nothing to do:
|
||||
{"action": null}
|
||||
```
|
||||
|
||||
### Reasoning log
|
||||
|
||||
When `llm_orchestrator_log_reasoning = True`:
|
||||
- Full reasoning is logged via `logger.info("[LLM_ORCH_REASONING]...")`
|
||||
- It is also injected into the `"llm_orchestrator"` session in `LLMBridge._chat_histories`
|
||||
- Viewable in real time in `chat.html` via the **Orch Log** button
|
||||
|
||||
---
|
||||
|
||||
## 6. Orchestrator & Scheduler integration
|
||||
|
||||
### `orchestrator.py`
|
||||
|
||||
```python
|
||||
# __init__
|
||||
self.llm_orchestrator = None
|
||||
self._init_llm_orchestrator()
|
||||
|
||||
# _init_llm_orchestrator()
|
||||
if shared_data.config.get("llm_enabled") and shared_data.config.get("llm_orchestrator_mode") != "none":
|
||||
from llm_orchestrator import LLMOrchestrator
|
||||
self.llm_orchestrator = LLMOrchestrator(shared_data)
|
||||
self.llm_orchestrator.start()
|
||||
|
||||
# run() — each iteration
|
||||
self._sync_llm_orchestrator() # starts/stops thread according to runtime config
|
||||
|
||||
# _process_background_tasks()
|
||||
if self.llm_orchestrator and mode == "advisor":
|
||||
self.llm_orchestrator.advise()
|
||||
```
|
||||
|
||||
### `action_scheduler.py` — skip option
|
||||
|
||||
```python
|
||||
# In run(), each iteration:
|
||||
_llm_skip = bool(
|
||||
shared_data.config.get("llm_orchestrator_skip_scheduler", False)
|
||||
and shared_data.config.get("llm_orchestrator_mode") == "autonomous"
|
||||
and shared_data.config.get("llm_enabled", False)
|
||||
)
|
||||
|
||||
if not _llm_skip:
|
||||
self._publish_all_upcoming() # step 2: publish due actions
|
||||
self._evaluate_global_actions() # step 3: global evaluation
|
||||
self.evaluate_all_triggers() # step 4: per-host triggers
|
||||
# Steps 1 (promote due) and 5 (cleanup/priorities) always run
|
||||
```
|
||||
|
||||
When `llm_orchestrator_skip_scheduler = True` + `mode = autonomous` + `llm_enabled = True`:
|
||||
- The scheduler no longer publishes automatic actions (no more `B_require`, `B_trigger`, etc.)
|
||||
- The autonomous LLM becomes **sole master of the queue**
|
||||
- Queue hygiene (promotions, cleanup) remains active
|
||||
|
||||
---
|
||||
|
||||
## 7. Web Utils LLM (`web_utils/llm_utils.py`)
|
||||
|
||||
Follows the exact **same pattern** as all other `web_utils` (constructor `__init__(self, shared_data)`, methods called by `webapp.py`).
|
||||
|
||||
### Methods
|
||||
|
||||
| Method | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `get_llm_status(handler)` | GET | LLM bridge state (active backend, LaRuche URL…) |
|
||||
| `get_llm_config(handler)` | GET | Current LLM config (api_key masked) |
|
||||
| `get_llm_reasoning(handler)` | GET | `llm_orchestrator` session history (reasoning log) |
|
||||
| `handle_chat(data)` | POST | Sends a message, returns LLM response |
|
||||
| `clear_chat_history(data)` | POST | Clears a session's history |
|
||||
| `get_mcp_status(handler)` | GET | MCP server state (running, port, transport) |
|
||||
| `toggle_mcp(data)` | POST | Enables/disables MCP server + saves config |
|
||||
| `save_mcp_config(data)` | POST | Saves MCP config (tools, port, transport) |
|
||||
| `save_llm_config(data)` | POST | Saves LLM config (all parameters) |
|
||||
|
||||
---
|
||||
|
||||
## 8. EPD comment integration (`comment.py`)
|
||||
|
||||
### Behaviour before modification
|
||||
|
||||
```
|
||||
get_comment(status, lang, params)
|
||||
└── if delay elapsed OR status changed
|
||||
└── _pick_text(status, lang, params) ← SQLite DB
|
||||
└── returns weighted text
|
||||
```
|
||||
|
||||
### Behaviour after modification
|
||||
|
||||
```
|
||||
get_comment(status, lang, params)
|
||||
└── if delay elapsed OR status changed
|
||||
│
|
||||
├── [if llm_comments_enabled = True]
|
||||
│ └── LLMBridge().generate_comment(status, params)
|
||||
│ ├── success → LLM text (≤12 words, ~8s max)
|
||||
│ └── failure/timeout → text = None
|
||||
│
|
||||
└── [if text = None] ← SYSTEMATIC FALLBACK
|
||||
└── _pick_text(status, lang, params) ← original behaviour
|
||||
└── returns weighted DB text
|
||||
```
|
||||
|
||||
**Original behaviour preserved 100% if LLM disabled or failing.**
|
||||
|
||||
---
|
||||
|
||||
## 9. Configuration (`shared.py`)
|
||||
|
||||
### LLM Bridge section (`__title_llm__`)
|
||||
|
||||
| Key | Default | Type | Description |
|
||||
|-----|---------|------|-------------|
|
||||
| `llm_enabled` | `False` | bool | **Master toggle** — activates the entire bridge |
|
||||
| `llm_comments_enabled` | `False` | bool | Use LLM for EPD comments |
|
||||
| `llm_chat_enabled` | `True` | bool | Enable /chat.html interface |
|
||||
| `llm_chat_tools_enabled` | `False` | bool | Enable tool-calling in web chat |
|
||||
| `llm_backend` | `"auto"` | str | `auto` \| `laruche` \| `ollama` \| `api` |
|
||||
| `llm_laruche_discovery` | `True` | bool | Auto-discover LaRuche nodes via mDNS |
|
||||
| `llm_laruche_url` | `""` | str | Manual LaRuche URL (overrides discovery) |
|
||||
| `llm_ollama_url` | `"http://127.0.0.1:11434"` | str | Local Ollama URL |
|
||||
| `llm_ollama_model` | `"phi3:mini"` | str | Ollama model to use |
|
||||
| `llm_api_provider` | `"anthropic"` | str | `anthropic` \| `openai` \| `openrouter` |
|
||||
| `llm_api_key` | `""` | str | API key (masked in UI) |
|
||||
| `llm_api_model` | `"claude-haiku-4-5-20251001"` | str | External API model |
|
||||
| `llm_api_base_url` | `""` | str | Custom base URL (OpenRouter, proxy…) |
|
||||
| `llm_timeout_s` | `30` | int | Global LLM call timeout (seconds) |
|
||||
| `llm_max_tokens` | `500` | int | Max tokens for chat |
|
||||
| `llm_comment_max_tokens` | `80` | int | Max tokens for EPD comments |
|
||||
| `llm_chat_history_size` | `20` | int | Max messages per chat session |
|
||||
|
||||
### MCP Server section (`__title_mcp__`)
|
||||
|
||||
| Key | Default | Type | Description |
|
||||
|-----|---------|------|-------------|
|
||||
| `mcp_enabled` | `False` | bool | Enable MCP server |
|
||||
| `mcp_transport` | `"http"` | str | `http` (SSE) \| `stdio` |
|
||||
| `mcp_port` | `8765` | int | HTTP SSE port |
|
||||
| `mcp_allowed_tools` | `[all]` | list | List of authorised MCP tools |
|
||||
|
||||
### LLM Orchestrator section (`__title_llm_orch__`)
|
||||
|
||||
| Key | Default | Type | Description |
|
||||
|-----|---------|------|-------------|
|
||||
| `llm_orchestrator_mode` | `"none"` | str | `none` \| `advisor` \| `autonomous` |
|
||||
| `llm_orchestrator_interval_s` | `60` | int | Delay between autonomous cycles (min 30s) |
|
||||
| `llm_orchestrator_max_actions` | `3` | int | Max actions per autonomous cycle |
|
||||
| `llm_orchestrator_allowed_actions` | `[]` | list | Actions the LLM may queue (empty = mcp_allowed_tools) |
|
||||
| `llm_orchestrator_skip_scheduler` | `False` | bool | Disable scheduler when autonomous is active |
|
||||
| `llm_orchestrator_skip_if_no_change` | `True` | bool | Skip cycle if fingerprint unchanged |
|
||||
| `llm_orchestrator_log_reasoning` | `False` | bool | Log full LLM reasoning |
|
||||
|
||||
---
|
||||
|
||||
## 10. HTTP Routes (`webapp.py`)
|
||||
|
||||
### GET routes
|
||||
|
||||
| Route | Handler | Description |
|
||||
|-------|---------|-------------|
|
||||
| `GET /api/llm/status` | `llm_utils.get_llm_status` | LLM bridge state |
|
||||
| `GET /api/llm/config` | `llm_utils.get_llm_config` | LLM config (api_key masked) |
|
||||
| `GET /api/llm/reasoning` | `llm_utils.get_llm_reasoning` | Orchestrator reasoning log |
|
||||
| `GET /api/mcp/status` | `llm_utils.get_mcp_status` | MCP server state |
|
||||
|
||||
### POST routes (JSON data-only)
|
||||
|
||||
| Route | Handler | Description |
|
||||
|-------|---------|-------------|
|
||||
| `POST /api/llm/chat` | `llm_utils.handle_chat` | Send a message to the LLM |
|
||||
| `POST /api/llm/clear_history` | `llm_utils.clear_chat_history` | Clear a session's history |
|
||||
| `POST /api/llm/config` | `llm_utils.save_llm_config` | Save LLM config |
|
||||
| `POST /api/mcp/toggle` | `llm_utils.toggle_mcp` | Enable/disable MCP |
|
||||
| `POST /api/mcp/config` | `llm_utils.save_mcp_config` | Save MCP config |
|
||||
|
||||
All routes respect Bjorn's existing authentication (`webauth`).
|
||||
|
||||
---
|
||||
|
||||
## 11. Web interfaces
|
||||
|
||||
### `/chat.html`
|
||||
|
||||
Terminal-style chat interface (black/red, consistent with Bjorn).
|
||||
|
||||
**Features:**
|
||||
- Auto-detects LLM state on load (`GET /api/llm/status`)
|
||||
- Displays active backend (LaRuche URL, or mode)
|
||||
- "Bjorn is thinking..." indicator during response
|
||||
- Unique session ID per browser tab
|
||||
- `Enter` = send, `Shift+Enter` = new line
|
||||
- Textarea auto-resize
|
||||
- **"Clear history"** button — clears server-side session
|
||||
- **"Orch Log"** button — loads the autonomous orchestrator's reasoning
|
||||
- Calls `GET /api/llm/reasoning`
|
||||
- Renders each message (cycle prompt + LLM response) as chat bubbles
|
||||
- "← Back to chat" to return to normal chat
|
||||
- Helper message if log is empty (hint: enable `llm_orchestrator_log_reasoning`)
|
||||
|
||||
**Access:** `http://[bjorn-ip]:8000/chat.html`
|
||||
|
||||
### `/mcp-config.html`
|
||||
|
||||
Full LLM & MCP configuration page.
|
||||
|
||||
**LLM Bridge section:**
|
||||
- Master enable/disable toggle
|
||||
- EPD comments, chat, chat tool-calling toggles
|
||||
- Backend selector (auto / laruche / ollama / api)
|
||||
- LaRuche mDNS discovery toggle + manual URL
|
||||
- Ollama configuration (URL + model)
|
||||
- External API configuration (provider, key, model, custom URL)
|
||||
- Timeout and token parameters
|
||||
- "TEST CONNECTION" button
|
||||
|
||||
**MCP Server section:**
|
||||
- Enable toggle with live start/stop
|
||||
- Transport selector (HTTP SSE / stdio)
|
||||
- HTTP port
|
||||
- Per-tool checkboxes
|
||||
- "RUNNING" / "OFF" indicator
|
||||
|
||||
**Access:** `http://[bjorn-ip]:8000/mcp-config.html`
|
||||
|
||||
---
|
||||
|
||||
## 12. Startup (`Bjorn.py`)
|
||||
|
||||
```python
|
||||
# LLM Bridge — warm up singleton
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
LLMBridge() # Starts mDNS discovery if llm_laruche_discovery=True
|
||||
logger.info("LLM Bridge initialised")
|
||||
except Exception as e:
|
||||
logger.warning("LLM Bridge init skipped: %s", e)
|
||||
|
||||
# MCP Server
|
||||
try:
|
||||
import mcp_server
|
||||
if shared_data.config.get("mcp_enabled", False):
|
||||
mcp_server.start() # Daemon thread "MCPServer"
|
||||
logger.info("MCP server started")
|
||||
else:
|
||||
logger.info("MCP server loaded (disabled)")
|
||||
except Exception as e:
|
||||
logger.warning("MCP server init skipped: %s", e)
|
||||
```
|
||||
|
||||
The LLM Orchestrator is initialised inside `orchestrator.py` (not `Bjorn.py`), since it depends on the orchestrator loop cycle.
|
||||
|
||||
---
|
||||
|
||||
## 13. LaRuche / LAND Protocol compatibility
|
||||
|
||||
### LAND Protocol
|
||||
|
||||
LAND (Local AI Network Discovery) is the LaRuche protocol:
|
||||
- **Discovery:** mDNS service type `_ai-inference._tcp.local.`
|
||||
- **Inference:** `POST http://[node]:8419/infer`
|
||||
|
||||
### What Bjorn implements on the Python side
|
||||
|
||||
```python
|
||||
# mDNS listening (zeroconf)
|
||||
from zeroconf import Zeroconf, ServiceBrowser
|
||||
ServiceBrowser(zc, "_ai-inference._tcp.local.", listener)
|
||||
# → Auto-detects LaRuche nodes
|
||||
|
||||
# Inference call (urllib stdlib, zero dependency)
|
||||
payload = {"prompt": "...", "capability": "llm", "max_tokens": 500}
|
||||
urllib.request.urlopen(f"{url}/infer", data=json.dumps(payload))
|
||||
```
|
||||
|
||||
### Scenarios
|
||||
|
||||
| Scenario | Behaviour |
|
||||
|----------|-----------|
|
||||
| LaRuche node detected on LAN | Used automatically as priority backend |
|
||||
| Multiple LaRuche nodes | First discovered is used |
|
||||
| Manual URL configured | Used directly, discovery ignored |
|
||||
| LaRuche node absent | Cascades to Ollama or external API |
|
||||
| `zeroconf` not installed | Discovery silently disabled, DEBUG log |
|
||||
|
||||
---
|
||||
|
||||
## 14. Optional dependencies
|
||||
|
||||
| Package | Min version | Feature unlocked | Install command |
|
||||
|---------|------------|------------------|----------------|
|
||||
| `mcp[cli]` | ≥ 1.0.0 | Full MCP server | `pip install "mcp[cli]"` |
|
||||
| `zeroconf` | ≥ 0.131.0 | LaRuche mDNS discovery | `pip install zeroconf` |
|
||||
|
||||
**No new dependencies** added for LLM backends:
|
||||
- **LaRuche / Ollama**: uses `urllib.request` (Python stdlib)
|
||||
- **Anthropic / OpenAI**: REST API via `urllib` — no SDK needed
|
||||
|
||||
---
|
||||
|
||||
## 15. Quick activation & configuration
|
||||
|
||||
### Basic LLM chat
|
||||
|
||||
```bash
|
||||
curl -X POST http://[bjorn-ip]:8000/api/llm/config \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"llm_enabled": true, "llm_backend": "ollama", "llm_ollama_model": "phi3:mini"}'
|
||||
# → http://[bjorn-ip]:8000/chat.html
|
||||
```
|
||||
|
||||
### Chat with tool-calling (LLM accesses live network data)
|
||||
|
||||
```bash
|
||||
curl -X POST http://[bjorn-ip]:8000/api/llm/config \
|
||||
-d '{"llm_enabled": true, "llm_chat_tools_enabled": true}'
|
||||
```
|
||||
|
||||
### LLM Orchestrator — advisor mode
|
||||
|
||||
```bash
|
||||
curl -X POST http://[bjorn-ip]:8000/api/llm/config \
|
||||
-d '{
|
||||
"llm_enabled": true,
|
||||
"llm_orchestrator_mode": "advisor",
|
||||
"llm_orchestrator_allowed_actions": ["NmapScan", "SSHBruteforce"]
|
||||
}'
|
||||
```
|
||||
|
||||
### LLM Orchestrator — autonomous mode (LLM as sole planner)
|
||||
|
||||
```bash
|
||||
curl -X POST http://[bjorn-ip]:8000/api/llm/config \
|
||||
-d '{
|
||||
"llm_enabled": true,
|
||||
"llm_orchestrator_mode": "autonomous",
|
||||
"llm_orchestrator_skip_scheduler": true,
|
||||
"llm_orchestrator_max_actions": 5,
|
||||
"llm_orchestrator_interval_s": 120,
|
||||
"llm_orchestrator_allowed_actions": ["NmapScan", "SSHBruteforce", "SMBScan"],
|
||||
"llm_orchestrator_log_reasoning": true
|
||||
}'
|
||||
# → View reasoning: http://[bjorn-ip]:8000/chat.html → Orch Log button
|
||||
```
|
||||
|
||||
### With Anthropic API
|
||||
|
||||
```bash
|
||||
curl -X POST http://[bjorn-ip]:8000/api/llm/config \
|
||||
-d '{
|
||||
"llm_enabled": true,
|
||||
"llm_backend": "api",
|
||||
"llm_api_provider": "anthropic",
|
||||
"llm_api_key": "sk-ant-...",
|
||||
"llm_api_model": "claude-haiku-4-5-20251001"
|
||||
}'
|
||||
```
|
||||
|
||||
### With OpenRouter (access to all models)
|
||||
|
||||
```bash
|
||||
curl -X POST http://[bjorn-ip]:8000/api/llm/config \
|
||||
-d '{
|
||||
"llm_enabled": true,
|
||||
"llm_backend": "api",
|
||||
"llm_api_provider": "openrouter",
|
||||
"llm_api_key": "sk-or-...",
|
||||
"llm_api_model": "meta-llama/llama-3.2-3b-instruct",
|
||||
"llm_api_base_url": "https://openrouter.ai/api"
|
||||
}'
|
||||
```
|
||||
|
||||
### Model recommendations by scenario
|
||||
|
||||
| Scenario | Backend | Recommended model | Pi RAM |
|
||||
|----------|---------|-------------------|--------|
|
||||
| Autonomous orchestrator + LaRuche on LAN | laruche | Mistral/Phi on the node | 0 (remote inference) |
|
||||
| Autonomous orchestrator offline | ollama | `qwen2.5:3b` | ~3 GB |
|
||||
| Autonomous orchestrator cloud | api | `claude-haiku-4-5-20251001` | 0 |
|
||||
| Chat + tools | ollama | `phi3:mini` | ~2 GB |
|
||||
| EPD comments only | ollama | `smollm2:360m` | ~400 MB |
|
||||
|
||||
---
|
||||
|
||||
## 16. Complete API endpoint reference
|
||||
|
||||
### GET
|
||||
|
||||
```
|
||||
GET /api/llm/status
|
||||
→ {"enabled": bool, "backend": str, "laruche_url": str|null,
|
||||
"laruche_discovery": bool, "ollama_url": str, "ollama_model": str,
|
||||
"api_provider": str, "api_model": str, "api_key_set": bool}
|
||||
|
||||
GET /api/llm/config
|
||||
→ {all llm_* keys except api_key, + "llm_api_key_set": bool}
|
||||
|
||||
GET /api/llm/reasoning
|
||||
→ {"status": "ok", "messages": [{"role": str, "content": str}, ...], "count": int}
|
||||
→ {"status": "error", "message": str, "messages": [], "count": 0}
|
||||
|
||||
GET /api/mcp/status
|
||||
→ {"enabled": bool, "running": bool, "transport": str,
|
||||
"port": int, "allowed_tools": [str]}
|
||||
```
|
||||
|
||||
### POST
|
||||
|
||||
```
|
||||
POST /api/llm/chat
|
||||
Body: {"message": str, "session_id": str?}
|
||||
→ {"status": "ok", "response": str, "session_id": str}
|
||||
→ {"status": "error", "message": str}
|
||||
|
||||
POST /api/llm/clear_history
|
||||
Body: {"session_id": str?}
|
||||
→ {"status": "ok"}
|
||||
|
||||
POST /api/llm/config
|
||||
Body: {any subset of llm_* and llm_orchestrator_* keys}
|
||||
→ {"status": "ok"}
|
||||
→ {"status": "error", "message": str}
|
||||
|
||||
POST /api/mcp/toggle
|
||||
Body: {"enabled": bool}
|
||||
→ {"status": "ok", "enabled": bool, "started": bool?}
|
||||
|
||||
POST /api/mcp/config
|
||||
Body: {"allowed_tools": [str]?, "port": int?, "transport": str?}
|
||||
→ {"status": "ok", "config": {...}}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 17. Queue priority system
|
||||
|
||||
```
|
||||
Priority Source Trigger
|
||||
──────────────────────────────────────────────────────────────
|
||||
85 LLM Advisor llm_orchestrator.advise()
|
||||
82 LLM Autonomous _run_autonomous_cycle() via run_action tool
|
||||
80 External MCP _impl_run_action() via MCP client or chat
|
||||
50 Normal / manual queue_action() without explicit priority
|
||||
40 Scheduler action_scheduler evaluates triggers
|
||||
```
|
||||
|
||||
The scheduler always processes the highest-priority pending item first. LLM and MCP actions therefore preempt scheduler actions.
|
||||
|
||||
---
|
||||
|
||||
## 18. Fallbacks & graceful degradation
|
||||
|
||||
| Condition | Behaviour |
|
||||
|-----------|-----------|
|
||||
| `llm_enabled = False` | `complete()` returns `None` immediately — zero overhead |
|
||||
| `llm_orchestrator_mode = "none"` | LLMOrchestrator not instantiated |
|
||||
| `mcp` not installed | `_build_mcp_server()` returns `None`, WARNING log |
|
||||
| `zeroconf` not installed | LaRuche discovery silently disabled, DEBUG log |
|
||||
| LaRuche node timeout | Exception caught, cascade to next backend |
|
||||
| Ollama not running | `URLError` caught, cascade to API |
|
||||
| API key missing | `_call_api()` returns `None`, cascade |
|
||||
| All backends fail | `complete()` returns `None` |
|
||||
| LLM returns `None` for EPD | `comment.py` uses `_pick_text()` (original behaviour) |
|
||||
| LLM advisor: invalid JSON | DEBUG log, returns `None`, next cycle |
|
||||
| LLM advisor: disallowed action | WARNING log, ignored |
|
||||
| LLM autonomous: no change | cycle skipped, zero API call |
|
||||
| LLM autonomous: ≥6 tool turns | returns partial text + warning |
|
||||
| Exception in LLM Bridge | `try/except` at every level, DEBUG log |
|
||||
|
||||
### Timeouts
|
||||
|
||||
```
|
||||
Chat / complete() → llm_timeout_s (default: 30s)
|
||||
EPD comments → 8s (hardcoded, short to avoid blocking render)
|
||||
Autonomous cycle → 90s (long: may chain multiple tool calls)
|
||||
Advisor → 20s (short prompt + JSON response)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 19. Call sequences
|
||||
|
||||
### Web chat with tool-calling
|
||||
|
||||
```
|
||||
Browser → POST /api/llm/chat {"message": "which hosts are vulnerable?"}
|
||||
└── LLMUtils.handle_chat(data)
|
||||
└── LLMBridge().chat(message, session_id)
|
||||
└── complete(messages, system, tools=_BJORN_TOOLS)
|
||||
└── _call_anthropic(messages, tools=[...])
|
||||
├── POST /v1/messages → stop_reason=tool_use
|
||||
│ └── tool: get_hosts(alive_only=true)
|
||||
│ → _execute_tool → _impl_get_hosts()
|
||||
│ → JSON of hosts
|
||||
├── POST /v1/messages [+ tool result] → end_turn
|
||||
└── returns "3 exposed SSH hosts: 192.168.1.10, ..."
|
||||
← {"status": "ok", "response": "3 exposed SSH hosts..."}
|
||||
```
|
||||
|
||||
### LLM autonomous cycle
|
||||
|
||||
```
|
||||
Thread "LLMOrchestrator" (daemon, interval=60s)
|
||||
└── _run_autonomous_cycle()
|
||||
├── fp = _compute_fingerprint() → (12, 3, 1, 47)
|
||||
├── _has_actionable_change(fp) → True (vuln_count 2→3)
|
||||
├── self._last_fingerprint = fp
|
||||
│
|
||||
└── LLMBridge().complete(prompt, system, tools=[read-only + run_action])
|
||||
└── _call_anthropic(tools=[...])
|
||||
├── POST → tool_use: get_hosts()
|
||||
│ → [{ip: "192.168.1.20", ports: "22,80,443"}]
|
||||
├── POST → tool_use: get_action_history()
|
||||
│ → [...]
|
||||
├── POST → tool_use: run_action("SSHBruteforce", "192.168.1.20")
|
||||
│ → _execute_tool → _impl_run_action()
|
||||
│ → db.queue_action(priority=82, trigger="llm_autonomous")
|
||||
│ → queue_event.set()
|
||||
└── POST → end_turn
|
||||
→ "Queued SSHBruteforce on 192.168.1.20 (Mjolnir strikes the unguarded gate)"
|
||||
→ [if log_reasoning=True] logger.info("[LLM_ORCH_REASONING]...")
|
||||
→ [if log_reasoning=True] _push_to_chat(bridge, prompt, response)
|
||||
```
|
||||
|
||||
### Reading reasoning from chat.html
|
||||
|
||||
```
|
||||
User clicks "Orch Log"
|
||||
└── fetch GET /api/llm/reasoning
|
||||
└── LLMUtils.get_llm_reasoning(handler)
|
||||
└── LLMBridge()._chat_histories["llm_orchestrator"]
|
||||
→ [{"role": "user", "content": "[Autonomous cycle]..."},
|
||||
{"role": "assistant", "content": "Queued SSHBruteforce..."}]
|
||||
← {"status": "ok", "messages": [...], "count": 2}
|
||||
→ Rendered as chat bubbles in #messages
|
||||
```
|
||||
|
||||
### MCP from external client (Claude Desktop)
|
||||
|
||||
```
|
||||
Claude Desktop → tool_call: run_action("NmapScan", "192.168.1.0/24")
|
||||
└── FastMCP dispatch
|
||||
└── mcp_server.run_action(action_name, target_ip)
|
||||
└── _impl_run_action()
|
||||
├── db.queue_action(priority=80, trigger="mcp")
|
||||
└── queue_event.set()
|
||||
← {"status": "queued", "action": "NmapScan", "target": "192.168.1.0/24", "priority": 80}
|
||||
```
|
||||
|
||||
### EPD comment with LLM
|
||||
|
||||
```
|
||||
display.py → CommentAI.get_comment("SSHBruteforce", params={...})
|
||||
└── delay elapsed OR status changed → proceed
|
||||
├── llm_comments_enabled = True ?
|
||||
│ └── LLMBridge().generate_comment("SSHBruteforce", params)
|
||||
│ └── complete([{role:user, content:"Status: SSHBruteforce..."}],
|
||||
│ max_tokens=80, timeout=8)
|
||||
│ ├── LaRuche → "Norse gods smell SSH credentials..." ✓
|
||||
│ └── [or timeout 8s] → None
|
||||
└── text = None → _pick_text("SSHBruteforce", lang, params)
|
||||
└── SELECT FROM comments WHERE status='SSHBruteforce'
|
||||
→ "Processing authentication attempts..."
|
||||
```
|
||||
@@ -133,19 +133,44 @@ class ActionScheduler:
|
||||
# Keep queue consistent with current enable/disable flags.
|
||||
self._cancel_queued_disabled_actions()
|
||||
|
||||
# 1) Promote scheduled actions that are due
|
||||
# 1) Promote scheduled actions that are due (always — queue hygiene)
|
||||
self._promote_scheduled_to_pending()
|
||||
|
||||
# 2) Publish next scheduled occurrences for interval actions
|
||||
self._publish_all_upcoming()
|
||||
# When LLM autonomous mode owns scheduling, skip trigger evaluation
|
||||
# so it doesn't compete with or duplicate LLM decisions.
|
||||
# BUT: if the queue is empty, the heuristic scheduler resumes as fallback
|
||||
# to prevent deadlock when the LLM fails to produce valid actions.
|
||||
_llm_wants_skip = bool(
|
||||
self.shared_data.config.get("llm_orchestrator_skip_scheduler", False)
|
||||
and self.shared_data.config.get("llm_orchestrator_mode") == "autonomous"
|
||||
and self.shared_data.config.get("llm_enabled", False)
|
||||
)
|
||||
_queue_empty = False
|
||||
if _llm_wants_skip:
|
||||
try:
|
||||
row = self.shared_data.db.query_one(
|
||||
"SELECT COUNT(*) AS cnt FROM action_queue WHERE status IN ('pending','running','scheduled')"
|
||||
)
|
||||
_queue_empty = (row and int(row["cnt"]) == 0)
|
||||
except Exception:
|
||||
pass
|
||||
_llm_skip = _llm_wants_skip and not _queue_empty
|
||||
|
||||
# 3) Evaluate global on_start actions
|
||||
self._evaluate_global_actions()
|
||||
if not _llm_skip:
|
||||
if _llm_wants_skip and _queue_empty:
|
||||
logger.info("Scheduler: LLM queue empty — heuristic fallback active")
|
||||
# 2) Publish next scheduled occurrences for interval actions
|
||||
self._publish_all_upcoming()
|
||||
|
||||
# 4) Evaluate per-host triggers
|
||||
self.evaluate_all_triggers()
|
||||
# 3) Evaluate global on_start actions
|
||||
self._evaluate_global_actions()
|
||||
|
||||
# 5) Queue maintenance
|
||||
# 4) Evaluate per-host triggers
|
||||
self.evaluate_all_triggers()
|
||||
else:
|
||||
logger.debug("Scheduler: trigger evaluation skipped (LLM autonomous owns scheduling)")
|
||||
|
||||
# 5) Queue maintenance (always — starvation prevention + cleanup)
|
||||
self.cleanup_queue()
|
||||
self.update_priorities()
|
||||
|
||||
|
||||
39
comment.py
39
comment.py
@@ -319,6 +319,9 @@ class CommentAI:
|
||||
"""
|
||||
Return a comment if status changed or delay expired.
|
||||
|
||||
When llm_comments_enabled=True in config, tries LLM first;
|
||||
falls back to the database/template system on any failure.
|
||||
|
||||
Args:
|
||||
status: logical status name (e.g., "IDLE", "SSHBruteforce", "NetworkScanner").
|
||||
lang: language override (e.g., "fr"); if None, auto priority is used.
|
||||
@@ -331,14 +334,36 @@ class CommentAI:
|
||||
status = status or "IDLE"
|
||||
|
||||
status_changed = (status != self.last_status)
|
||||
if status_changed or (current_time - self.last_comment_time >= self.comment_delay):
|
||||
if not status_changed and (current_time - self.last_comment_time < self.comment_delay):
|
||||
return None
|
||||
|
||||
# --- Try LLM if enabled ---
|
||||
text: Optional[str] = None
|
||||
llm_generated = False
|
||||
if getattr(self.shared_data, "llm_comments_enabled", False):
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
text = LLMBridge().generate_comment(status, params)
|
||||
if text:
|
||||
llm_generated = True
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM comment failed, using fallback: {e}")
|
||||
|
||||
# --- Fallback: database / template system (original behaviour) ---
|
||||
if not text:
|
||||
text = self._pick_text(status, lang, params)
|
||||
if text:
|
||||
self.last_status = status
|
||||
self.last_comment_time = current_time
|
||||
self.comment_delay = self._new_delay()
|
||||
logger.debug(f"Next comment delay: {self.comment_delay}s")
|
||||
return text
|
||||
|
||||
if text:
|
||||
self.last_status = status
|
||||
self.last_comment_time = current_time
|
||||
self.comment_delay = self._new_delay()
|
||||
logger.debug(f"Next comment delay: {self.comment_delay}s")
|
||||
# Log comments
|
||||
if llm_generated:
|
||||
logger.info(f"[LLM_COMMENT] ({status}) {text}")
|
||||
else:
|
||||
logger.info(f"[COMMENT] ({status}) {text}")
|
||||
return text
|
||||
return None
|
||||
|
||||
|
||||
|
||||
169
land_protocol.py
Normal file
169
land_protocol.py
Normal file
@@ -0,0 +1,169 @@
|
||||
# land_protocol.py
|
||||
# Python client for the LAND Protocol (Local AI Network Discovery).
|
||||
# https://github.com/infinition/land-protocol
|
||||
#
|
||||
# Replace this file to update LAND protocol compatibility.
|
||||
# Imported by llm_bridge.py — no other Bjorn code touches this.
|
||||
#
|
||||
# Protocol summary:
|
||||
# Discovery : mDNS service type _ai-inference._tcp.local. (port 5353)
|
||||
# Transport : TCP HTTP on port 8419 by default
|
||||
# Infer : POST /infer {"prompt": str, "capability": "llm", "max_tokens": int}
|
||||
# Response : {"response": str} or {"text": str}
|
||||
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
from typing import Optional, Callable
|
||||
|
||||
# mDNS service type broadcast by all LAND-compatible nodes (LaRuche, etc.)
|
||||
LAND_SERVICE_TYPE = "_ai-inference._tcp.local."
|
||||
|
||||
# Default inference port
|
||||
LAND_DEFAULT_PORT = 8419
|
||||
|
||||
|
||||
def discover_node(
|
||||
on_found: Callable[[str], None],
|
||||
stop_event: threading.Event,
|
||||
logger=None,
|
||||
) -> None:
|
||||
"""
|
||||
Background mDNS listener for LAND nodes.
|
||||
|
||||
Calls on_found(url) whenever a new node is discovered.
|
||||
Runs until stop_event is set.
|
||||
|
||||
Requires: pip install zeroconf
|
||||
"""
|
||||
try:
|
||||
from zeroconf import Zeroconf, ServiceBrowser, ServiceListener
|
||||
except ImportError:
|
||||
if logger:
|
||||
logger.warning(
|
||||
"zeroconf not installed — LAND mDNS discovery disabled. "
|
||||
"Run: pip install zeroconf"
|
||||
)
|
||||
else:
|
||||
print("[LAND] zeroconf not installed — mDNS discovery disabled")
|
||||
return
|
||||
|
||||
class _Listener(ServiceListener):
|
||||
def add_service(self, zc: Zeroconf, type_: str, name: str) -> None: # type: ignore[override]
|
||||
info = zc.get_service_info(type_, name)
|
||||
if not info:
|
||||
return
|
||||
addresses = info.parsed_scoped_addresses()
|
||||
if not addresses:
|
||||
return
|
||||
port = info.port or LAND_DEFAULT_PORT
|
||||
url = f"http://{addresses[0]}:{port}"
|
||||
on_found(url)
|
||||
|
||||
def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None: # type: ignore[override]
|
||||
pass
|
||||
|
||||
def update_service(self, zc: Zeroconf, type_: str, name: str) -> None: # type: ignore[override]
|
||||
self.add_service(zc, type_, name)
|
||||
|
||||
zc = Zeroconf()
|
||||
try:
|
||||
ServiceBrowser(zc, LAND_SERVICE_TYPE, _Listener())
|
||||
if logger:
|
||||
logger.info(f"LAND: mDNS discovery active ({LAND_SERVICE_TYPE})")
|
||||
while not stop_event.is_set():
|
||||
time.sleep(5)
|
||||
finally:
|
||||
zc.close()
|
||||
|
||||
|
||||
def infer(
|
||||
base_url: str,
|
||||
prompt: str,
|
||||
max_tokens: int = 500,
|
||||
capability: str = "llm",
|
||||
model: Optional[str] = None,
|
||||
timeout: int = 30,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Send an inference request to a LAND node.
|
||||
|
||||
POST {base_url}/infer
|
||||
Body: {"prompt": str, "capability": str, "max_tokens": int, "model": str|null}
|
||||
|
||||
If model is None, the node uses its default model.
|
||||
Returns the response text, or None on failure.
|
||||
"""
|
||||
payload = {
|
||||
"prompt": prompt,
|
||||
"capability": capability,
|
||||
"max_tokens": max_tokens,
|
||||
}
|
||||
if model:
|
||||
payload["model"] = model
|
||||
data = json.dumps(payload).encode()
|
||||
req = urllib.request.Request(
|
||||
f"{base_url.rstrip('/')}/infer",
|
||||
data=data,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
body = json.loads(resp.read().decode())
|
||||
# LAND response may use "response" or "text" key
|
||||
return body.get("response") or body.get("text") or None
|
||||
|
||||
|
||||
def get_default_model(base_url: str, timeout: int = 10) -> Optional[str]:
|
||||
"""
|
||||
Get the current default model from a LAND node.
|
||||
|
||||
GET {base_url}/config/default_model
|
||||
Returns the model name string, or None on failure.
|
||||
"""
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{base_url.rstrip('/')}/config/default_model",
|
||||
headers={"Accept": "application/json"},
|
||||
method="GET",
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
body = json.loads(resp.read().decode())
|
||||
return body.get("default_model") or None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def list_models(base_url: str, timeout: int = 10) -> dict:
|
||||
"""
|
||||
List available models on a LAND node.
|
||||
|
||||
GET {base_url}/models
|
||||
Returns a dict with:
|
||||
- "models": list of model dicts
|
||||
- "default_model": str or None (the node's current default model)
|
||||
|
||||
Example: {"models": [{"name": "mistral:latest", ...}], "default_model": "mistral:latest"}
|
||||
Returns {"models": [], "default_model": None} on failure.
|
||||
"""
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{base_url.rstrip('/')}/models",
|
||||
headers={"Accept": "application/json"},
|
||||
method="GET",
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
body = json.loads(resp.read().decode())
|
||||
# LaRuche returns {"models": [...], "default_model": "..."} or a flat list
|
||||
if isinstance(body, list):
|
||||
return {"models": body, "default_model": None}
|
||||
if isinstance(body, dict):
|
||||
return {
|
||||
"models": body.get("models", []),
|
||||
"default_model": body.get("default_model") or None,
|
||||
}
|
||||
return {"models": [], "default_model": None}
|
||||
except Exception:
|
||||
return {"models": [], "default_model": None}
|
||||
629
llm_bridge.py
Normal file
629
llm_bridge.py
Normal file
@@ -0,0 +1,629 @@
|
||||
# llm_bridge.py
|
||||
# LLM backend cascade for Bjorn.
|
||||
# Priority: LaRuche (LAND/mDNS) → Ollama local → External API → None (template fallback)
|
||||
# All external deps are optional — graceful degradation at every level.
|
||||
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
from logger import Logger
|
||||
import land_protocol
|
||||
|
||||
logger = Logger(name="llm_bridge.py", level=20) # INFO
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tool definitions (Anthropic Messages API format).
|
||||
# Mirrors the tools exposed by mcp_server.py — add new tools here too.
|
||||
# ---------------------------------------------------------------------------
|
||||
_BJORN_TOOLS: List[Dict] = [
|
||||
{
|
||||
"name": "get_hosts",
|
||||
"description": "Return all network hosts discovered by Bjorn's scanner.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"alive_only": {"type": "boolean", "description": "Only return alive hosts. Default: true."},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "get_vulnerabilities",
|
||||
"description": "Return discovered vulnerabilities, optionally filtered by host IP.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"host_ip": {"type": "string", "description": "Filter by IP address. Empty = all hosts."},
|
||||
"limit": {"type": "integer", "description": "Max results. Default: 100."},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "get_credentials",
|
||||
"description": "Return captured credentials, optionally filtered by service name.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"service": {"type": "string", "description": "Service filter (ssh, ftp, smb…). Empty = all."},
|
||||
"limit": {"type": "integer", "description": "Max results. Default: 100."},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "get_action_history",
|
||||
"description": "Return the history of executed Bjorn actions, most recent first.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limit": {"type": "integer", "description": "Max results. Default: 50."},
|
||||
"action_name": {"type": "string", "description": "Filter by action name. Empty = all."},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "get_status",
|
||||
"description": "Return Bjorn's current operational status, scan counters, and active action.",
|
||||
"input_schema": {"type": "object", "properties": {}},
|
||||
},
|
||||
{
|
||||
"name": "run_action",
|
||||
"description": "Queue a Bjorn action (e.g. port_scan, ssh_bruteforce) against a target IP address.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action_name": {"type": "string", "description": "Action module name (e.g. port_scan)."},
|
||||
"target_ip": {"type": "string", "description": "Target IP address."},
|
||||
"target_mac": {"type": "string", "description": "Target MAC address (optional)."},
|
||||
},
|
||||
"required": ["action_name", "target_ip"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "query_db",
|
||||
"description": "Run a read-only SELECT query against Bjorn's SQLite database.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"sql": {"type": "string", "description": "SELECT SQL statement."},
|
||||
"params": {"type": "array", "items": {"type": "string"}, "description": "Bind parameters."},
|
||||
},
|
||||
"required": ["sql"],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
class LLMBridge:
|
||||
"""
|
||||
Unified LLM backend with automatic cascade:
|
||||
1. LaRuche node discovered via LAND protocol (mDNS _ai-inference._tcp.local.)
|
||||
2. Ollama running locally (http://localhost:11434)
|
||||
3. External API (Anthropic / OpenAI / OpenRouter)
|
||||
4. None → caller falls back to templates
|
||||
|
||||
Singleton — one instance per process, thread-safe.
|
||||
"""
|
||||
|
||||
_instance: Optional["LLMBridge"] = None
|
||||
_init_lock = threading.Lock()
|
||||
|
||||
def __new__(cls) -> "LLMBridge":
|
||||
with cls._init_lock:
|
||||
if cls._instance is None:
|
||||
inst = super().__new__(cls)
|
||||
inst._ready = False
|
||||
cls._instance = inst
|
||||
return cls._instance
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Init
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def __init__(self) -> None:
|
||||
if self._ready:
|
||||
return
|
||||
with self._init_lock:
|
||||
if self._ready:
|
||||
return
|
||||
from init_shared import shared_data
|
||||
self._sd = shared_data
|
||||
self._laruche_url: Optional[str] = None
|
||||
self._laruche_lock = threading.Lock()
|
||||
self._discovery_active = False
|
||||
self._chat_histories: Dict[str, List[Dict]] = {} # session_id → messages
|
||||
self._hist_lock = threading.Lock()
|
||||
self._ready = True
|
||||
|
||||
# Always start mDNS discovery — even if LLM is disabled.
|
||||
# This way LaRuche URL is ready the moment the user enables LLM.
|
||||
if self._cfg("llm_laruche_discovery", True):
|
||||
self._start_laruche_discovery()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Config helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _cfg(self, key: str, default=None):
|
||||
return self._sd.config.get(key, getattr(self._sd, key, default))
|
||||
|
||||
def _is_enabled(self) -> bool:
|
||||
return bool(self._cfg("llm_enabled", False))
|
||||
|
||||
def _lang_instruction(self) -> str:
|
||||
"""Return a prompt sentence that forces the LLM to reply in the configured language."""
|
||||
_LANG_NAMES = {
|
||||
"en": "English", "fr": "French", "es": "Spanish", "de": "German",
|
||||
"it": "Italian", "pt": "Portuguese", "nl": "Dutch", "ru": "Russian",
|
||||
"zh": "Chinese", "ja": "Japanese", "ko": "Korean", "ar": "Arabic",
|
||||
"pl": "Polish", "sv": "Swedish", "no": "Norwegian", "da": "Danish",
|
||||
"fi": "Finnish", "cs": "Czech", "tr": "Turkish",
|
||||
}
|
||||
code = self._cfg("lang", "en")
|
||||
name = _LANG_NAMES.get(code, code)
|
||||
if code == "en":
|
||||
return "" # No extra instruction needed for English (default)
|
||||
return f"Always respond in {name}."
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# LaRuche / LAND discovery
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _start_laruche_discovery(self) -> None:
|
||||
"""Launch background mDNS discovery for LaRuche/LAND nodes (non-blocking)."""
|
||||
manual_url = self._cfg("llm_laruche_url", "")
|
||||
if manual_url:
|
||||
with self._laruche_lock:
|
||||
self._laruche_url = manual_url.rstrip("/")
|
||||
logger.info(f"LaRuche: manual URL configured → {self._laruche_url}")
|
||||
return
|
||||
|
||||
stop_event = threading.Event()
|
||||
self._discovery_stop = stop_event
|
||||
|
||||
def _on_found(url: str) -> None:
|
||||
with self._laruche_lock:
|
||||
if self._laruche_url != url:
|
||||
self._laruche_url = url
|
||||
logger.info(f"LaRuche: discovered LAND node → {url}")
|
||||
self._discovery_active = True
|
||||
|
||||
def _run() -> None:
|
||||
try:
|
||||
land_protocol.discover_node(_on_found, stop_event, logger=logger)
|
||||
except Exception as e:
|
||||
logger.warning(f"LAND discovery error: {e}")
|
||||
|
||||
threading.Thread(target=_run, daemon=True, name="LANDDiscovery").start()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public API
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def complete(
|
||||
self,
|
||||
messages: List[Dict[str, str]],
|
||||
max_tokens: Optional[int] = None,
|
||||
system: Optional[str] = None,
|
||||
timeout: Optional[int] = None,
|
||||
tools: Optional[List[Dict]] = None,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Send a chat completion request through the configured cascade.
|
||||
|
||||
Args:
|
||||
messages: List of {"role": "user"|"assistant", "content": "..."}
|
||||
max_tokens: Override llm_max_tokens config value
|
||||
system: System prompt (prepended if supported by backend)
|
||||
timeout: Override llm_timeout_s config value
|
||||
|
||||
Returns:
|
||||
str response, or None if all backends fail / LLM disabled
|
||||
"""
|
||||
if not self._is_enabled():
|
||||
return None
|
||||
|
||||
max_tok = max_tokens or int(self._cfg("llm_max_tokens", 500))
|
||||
tout = timeout or int(self._cfg("llm_timeout_s", 30))
|
||||
backend = self._cfg("llm_backend", "auto")
|
||||
|
||||
if backend == "auto":
|
||||
order = ["laruche", "ollama", "api"]
|
||||
else:
|
||||
order = [backend]
|
||||
|
||||
for b in order:
|
||||
try:
|
||||
result = self._dispatch(b, messages, max_tok, tout, system, tools)
|
||||
if result:
|
||||
logger.info(f"LLM response from [{b}] (len={len(result)})")
|
||||
return result
|
||||
else:
|
||||
logger.warning(f"LLM backend [{b}] returned empty response — skipping")
|
||||
except Exception as exc:
|
||||
logger.warning(f"LLM backend [{b}] failed: {exc}")
|
||||
|
||||
logger.debug("All LLM backends failed — returning None (template fallback)")
|
||||
return None
|
||||
|
||||
def generate_comment(
|
||||
self,
|
||||
status: str,
|
||||
params: Optional[Dict[str, Any]] = None,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Generate a short EPD status comment (≤ ~12 words).
|
||||
Used by comment.py when llm_comments_enabled=True.
|
||||
"""
|
||||
if not self._is_enabled():
|
||||
return None
|
||||
|
||||
lang = self._lang_instruction()
|
||||
custom_comment = str(self._cfg("llm_system_prompt_comment", "") or "").strip()
|
||||
if custom_comment:
|
||||
system = custom_comment + (f" {lang}" if lang else "")
|
||||
else:
|
||||
system = (
|
||||
"You are Bjorn, a terse Norse-themed autonomous security AI. "
|
||||
"Reply with ONE sentence of at most 12 words as a status comment. "
|
||||
"Be cryptic, dark, and technical. No punctuation at the end."
|
||||
+ (f" {lang}" if lang else "")
|
||||
)
|
||||
params_str = f" Context: {json.dumps(params)}" if params else ""
|
||||
prompt = f"Current status: {status}.{params_str} Write a brief status comment."
|
||||
|
||||
return self.complete(
|
||||
[{"role": "user", "content": prompt}],
|
||||
max_tokens=int(self._cfg("llm_comment_max_tokens", 80)),
|
||||
system=system,
|
||||
timeout=8, # Short timeout for EPD — fall back fast
|
||||
)
|
||||
|
||||
def chat(
|
||||
self,
|
||||
user_message: str,
|
||||
session_id: str = "default",
|
||||
system: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Stateful chat with Bjorn — maintains conversation history per session.
|
||||
"""
|
||||
if not self._is_enabled():
|
||||
return "LLM is disabled. Enable it in Settings → LLM Bridge."
|
||||
|
||||
max_hist = int(self._cfg("llm_chat_history_size", 20))
|
||||
|
||||
if system is None:
|
||||
system = self._build_system_prompt()
|
||||
|
||||
with self._hist_lock:
|
||||
history = self._chat_histories.setdefault(session_id, [])
|
||||
history.append({"role": "user", "content": user_message})
|
||||
# Keep history bounded
|
||||
if len(history) > max_hist:
|
||||
history[:] = history[-max_hist:]
|
||||
messages = list(history)
|
||||
|
||||
tools = _BJORN_TOOLS if self._cfg("llm_chat_tools_enabled", False) else None
|
||||
response = self.complete(messages, system=system, tools=tools)
|
||||
|
||||
if response:
|
||||
with self._hist_lock:
|
||||
self._chat_histories[session_id].append(
|
||||
{"role": "assistant", "content": response}
|
||||
)
|
||||
|
||||
return response or "No LLM backend available. Check Settings → LLM Bridge."
|
||||
|
||||
def clear_history(self, session_id: str = "default") -> None:
|
||||
with self._hist_lock:
|
||||
self._chat_histories.pop(session_id, None)
|
||||
|
||||
def status(self) -> Dict[str, Any]:
|
||||
"""Return current bridge status for the web UI."""
|
||||
with self._laruche_lock:
|
||||
laruche = self._laruche_url
|
||||
|
||||
return {
|
||||
"enabled": self._is_enabled(),
|
||||
"backend": self._cfg("llm_backend", "auto"),
|
||||
"laruche_url": laruche,
|
||||
"laruche_discovery": self._discovery_active,
|
||||
"ollama_url": self._cfg("llm_ollama_url", "http://127.0.0.1:11434"),
|
||||
"ollama_model": self._cfg("llm_ollama_model", "phi3:mini"),
|
||||
"api_provider": self._cfg("llm_api_provider", "anthropic"),
|
||||
"api_model": self._cfg("llm_api_model", "claude-haiku-4-5-20251001"),
|
||||
"api_key_set": bool(self._cfg("llm_api_key", "")),
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Backend dispatcher
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _dispatch(
|
||||
self,
|
||||
backend: str,
|
||||
messages: List[Dict],
|
||||
max_tokens: int,
|
||||
timeout: int,
|
||||
system: Optional[str],
|
||||
tools: Optional[List[Dict]] = None,
|
||||
) -> Optional[str]:
|
||||
if backend == "laruche":
|
||||
return self._call_laruche(messages, max_tokens, timeout, system)
|
||||
if backend == "ollama":
|
||||
return self._call_ollama(messages, max_tokens, timeout, system)
|
||||
if backend == "api":
|
||||
return self._call_api(messages, max_tokens, timeout, system, tools)
|
||||
return None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# LaRuche backend (LAND /infer endpoint)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _call_laruche(
|
||||
self,
|
||||
messages: List[Dict],
|
||||
max_tokens: int,
|
||||
timeout: int,
|
||||
system: Optional[str],
|
||||
) -> Optional[str]:
|
||||
with self._laruche_lock:
|
||||
url = self._laruche_url
|
||||
if not url:
|
||||
return None
|
||||
|
||||
# Build flat prompt string (LAND /infer expects a single prompt)
|
||||
prompt_parts = []
|
||||
if system:
|
||||
prompt_parts.append(f"[System]: {system}")
|
||||
for m in messages:
|
||||
role = m.get("role", "user").capitalize()
|
||||
prompt_parts.append(f"[{role}]: {m.get('content', '')}")
|
||||
prompt = "\n".join(prompt_parts)
|
||||
|
||||
model = self._cfg("llm_laruche_model", "") or None
|
||||
return land_protocol.infer(url, prompt, max_tokens=max_tokens, capability="llm", model=model, timeout=timeout)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Ollama backend (/api/chat)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _call_ollama(
|
||||
self,
|
||||
messages: List[Dict],
|
||||
max_tokens: int,
|
||||
timeout: int,
|
||||
system: Optional[str],
|
||||
) -> Optional[str]:
|
||||
base = self._cfg("llm_ollama_url", "http://127.0.0.1:11434").rstrip("/")
|
||||
model = self._cfg("llm_ollama_model", "phi3:mini")
|
||||
|
||||
# Ollama /api/chat supports system messages natively
|
||||
ollama_messages = []
|
||||
if system:
|
||||
ollama_messages.append({"role": "system", "content": system})
|
||||
ollama_messages.extend(messages)
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": ollama_messages,
|
||||
"stream": False,
|
||||
"options": {"num_predict": max_tokens},
|
||||
}
|
||||
data = json.dumps(payload).encode()
|
||||
req = urllib.request.Request(
|
||||
f"{base}/api/chat",
|
||||
data=data,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
body = json.loads(resp.read().decode())
|
||||
return body.get("message", {}).get("content") or None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# External API backend (Anthropic / OpenAI / OpenRouter)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _call_api(
|
||||
self,
|
||||
messages: List[Dict],
|
||||
max_tokens: int,
|
||||
timeout: int,
|
||||
system: Optional[str],
|
||||
tools: Optional[List[Dict]] = None,
|
||||
) -> Optional[str]:
|
||||
provider = self._cfg("llm_api_provider", "anthropic")
|
||||
api_key = self._cfg("llm_api_key", "")
|
||||
if not api_key:
|
||||
return None
|
||||
|
||||
if provider == "anthropic":
|
||||
return self._call_anthropic(messages, max_tokens, timeout, system, api_key, tools)
|
||||
else:
|
||||
# OpenAI-compatible (openai / openrouter)
|
||||
return self._call_openai_compat(messages, max_tokens, timeout, system, api_key)
|
||||
|
||||
def _call_anthropic(
|
||||
self,
|
||||
messages: List[Dict],
|
||||
max_tokens: int,
|
||||
timeout: int,
|
||||
system: Optional[str],
|
||||
api_key: str,
|
||||
tools: Optional[List[Dict]] = None,
|
||||
) -> Optional[str]:
|
||||
"""Call Anthropic Messages API with optional agentic tool-calling loop."""
|
||||
model = self._cfg("llm_api_model", "claude-haiku-4-5-20251001")
|
||||
base_url = self._cfg("llm_api_base_url", "") or "https://api.anthropic.com"
|
||||
api_url = f"{base_url.rstrip('/')}/v1/messages"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"x-api-key": api_key,
|
||||
"anthropic-version": "2023-06-01",
|
||||
}
|
||||
|
||||
current_messages = list(messages)
|
||||
|
||||
for _round in range(6): # max 5 tool-call rounds + 1 final
|
||||
payload: Dict[str, Any] = {
|
||||
"model": model,
|
||||
"max_tokens": max_tokens,
|
||||
"messages": current_messages,
|
||||
}
|
||||
if system:
|
||||
payload["system"] = system
|
||||
if tools:
|
||||
payload["tools"] = tools
|
||||
|
||||
data = json.dumps(payload).encode()
|
||||
req = urllib.request.Request(api_url, data=data, headers=headers, method="POST")
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
body = json.loads(resp.read().decode())
|
||||
|
||||
stop_reason = body.get("stop_reason")
|
||||
content = body.get("content", [])
|
||||
|
||||
if stop_reason != "tool_use" or not tools:
|
||||
# Final text response
|
||||
for block in content:
|
||||
if isinstance(block, dict) and block.get("type") == "text":
|
||||
return block.get("text") or None
|
||||
return None
|
||||
|
||||
# ---- tool_use round ----
|
||||
current_messages.append({"role": "assistant", "content": content})
|
||||
tool_results = []
|
||||
for block in content:
|
||||
if isinstance(block, dict) and block.get("type") == "tool_use":
|
||||
result_text = self._execute_tool(block["name"], block.get("input", {}))
|
||||
logger.debug(f"Tool [{block['name']}] → {result_text[:200]}")
|
||||
tool_results.append({
|
||||
"type": "tool_result",
|
||||
"tool_use_id": block["id"],
|
||||
"content": result_text,
|
||||
})
|
||||
if not tool_results:
|
||||
break
|
||||
current_messages.append({"role": "user", "content": tool_results})
|
||||
|
||||
return None
|
||||
|
||||
def _execute_tool(self, name: str, inputs: Dict) -> str:
|
||||
"""Execute a Bjorn tool by name and return a JSON string result."""
|
||||
try:
|
||||
import mcp_server
|
||||
except Exception as e:
|
||||
return json.dumps({"error": f"mcp_server unavailable: {e}"})
|
||||
|
||||
allowed: List[str] = self._cfg("mcp_allowed_tools", [])
|
||||
if name not in allowed:
|
||||
return json.dumps({"error": f"Tool '{name}' is not enabled in Bjorn MCP config."})
|
||||
|
||||
try:
|
||||
if name == "get_hosts":
|
||||
return mcp_server._impl_get_hosts(inputs.get("alive_only", True))
|
||||
if name == "get_vulnerabilities":
|
||||
return mcp_server._impl_get_vulnerabilities(
|
||||
inputs.get("host_ip") or None, inputs.get("limit", 100)
|
||||
)
|
||||
if name == "get_credentials":
|
||||
return mcp_server._impl_get_credentials(
|
||||
inputs.get("service") or None, inputs.get("limit", 100)
|
||||
)
|
||||
if name == "get_action_history":
|
||||
return mcp_server._impl_get_action_history(
|
||||
inputs.get("limit", 50), inputs.get("action_name") or None
|
||||
)
|
||||
if name == "get_status":
|
||||
return mcp_server._impl_get_status()
|
||||
if name == "run_action":
|
||||
return mcp_server._impl_run_action(
|
||||
inputs["action_name"], inputs["target_ip"], inputs.get("target_mac", "")
|
||||
)
|
||||
if name == "query_db":
|
||||
return mcp_server._impl_query_db(inputs["sql"], inputs.get("params"))
|
||||
return json.dumps({"error": f"Unknown tool: {name}"})
|
||||
except Exception as e:
|
||||
return json.dumps({"error": str(e)})
|
||||
|
||||
def _call_openai_compat(
|
||||
self,
|
||||
messages: List[Dict],
|
||||
max_tokens: int,
|
||||
timeout: int,
|
||||
system: Optional[str],
|
||||
api_key: str,
|
||||
) -> Optional[str]:
|
||||
"""Call OpenAI-compatible API (OpenAI / OpenRouter / local)."""
|
||||
model = self._cfg("llm_api_model", "gpt-4o-mini")
|
||||
base_url = (
|
||||
self._cfg("llm_api_base_url", "")
|
||||
or "https://api.openai.com"
|
||||
)
|
||||
|
||||
oai_messages = []
|
||||
if system:
|
||||
oai_messages.append({"role": "system", "content": system})
|
||||
oai_messages.extend(messages)
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": oai_messages,
|
||||
"max_tokens": max_tokens,
|
||||
}
|
||||
data = json.dumps(payload).encode()
|
||||
req = urllib.request.Request(
|
||||
f"{base_url.rstrip('/')}/v1/chat/completions",
|
||||
data=data,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
},
|
||||
method="POST",
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
body = json.loads(resp.read().decode())
|
||||
return body.get("choices", [{}])[0].get("message", {}).get("content") or None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# System prompt builder
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _build_system_prompt(self) -> str:
|
||||
try:
|
||||
hosts = self._sd.target_count
|
||||
vulns = self._sd.vuln_count
|
||||
creds = self._sd.cred_count
|
||||
mode = self._sd.operation_mode
|
||||
status = getattr(self._sd, "bjorn_status_text", "IDLE")
|
||||
except Exception:
|
||||
hosts, vulns, creds, mode, status = "?", "?", "?", "?", "IDLE"
|
||||
|
||||
# Use custom prompt if configured, otherwise default
|
||||
custom = str(self._cfg("llm_system_prompt_chat", "") or "").strip()
|
||||
if custom:
|
||||
base = custom
|
||||
else:
|
||||
base = (
|
||||
f"You are Bjorn, an autonomous network security AI assistant running on a Raspberry Pi. "
|
||||
f"Current state: {hosts} hosts discovered, {vulns} vulnerabilities, {creds} credentials captured. "
|
||||
f"Operation mode: {mode}. Current action: {status}. "
|
||||
f"Answer security questions concisely and technically. "
|
||||
f"You can discuss network topology, vulnerabilities, and suggest next steps. "
|
||||
f"Use brief Norse references occasionally. Never break character."
|
||||
)
|
||||
|
||||
# Inject user profile if set
|
||||
user_name = str(self._cfg("llm_user_name", "") or "").strip()
|
||||
user_bio = str(self._cfg("llm_user_bio", "") or "").strip()
|
||||
if user_name:
|
||||
base += f"\nThe operator's name is {user_name}."
|
||||
if user_bio:
|
||||
base += f" {user_bio}"
|
||||
|
||||
lang = self._lang_instruction()
|
||||
return base + (f" {lang}" if lang else "")
|
||||
757
llm_orchestrator.py
Normal file
757
llm_orchestrator.py
Normal file
@@ -0,0 +1,757 @@
|
||||
# llm_orchestrator.py
|
||||
# LLM-based orchestration layer for Bjorn.
|
||||
#
|
||||
# Modes (llm_orchestrator_mode in config):
|
||||
# none — disabled (default); LLM has no role in scheduling
|
||||
# advisor — LLM reviews state periodically and injects ONE priority action
|
||||
# autonomous — LLM runs its own agentic cycle, observes via MCP tools, queues actions
|
||||
#
|
||||
# Prerequisites: llm_enabled=True, llm_orchestrator_mode != "none"
|
||||
#
|
||||
# Guard rails:
|
||||
# llm_orchestrator_allowed_actions — whitelist for run_action (empty = mcp_allowed_tools)
|
||||
# llm_orchestrator_max_actions — hard cap on actions per autonomous cycle
|
||||
# llm_orchestrator_interval_s — cooldown between autonomous cycles
|
||||
# Falls back silently when LLM unavailable (no crash, no spam)
|
||||
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="llm_orchestrator.py", level=20)
|
||||
|
||||
# Priority levels (must stay above normal scheduler/queue to be useful)
|
||||
_ADVISOR_PRIORITY = 85 # advisor > MCP (80) > normal (50) > scheduler (40)
|
||||
_AUTONOMOUS_PRIORITY = 82
|
||||
|
||||
|
||||
class LLMOrchestrator:
|
||||
"""
|
||||
LLM-based orchestration layer.
|
||||
|
||||
advisor mode — called from orchestrator background tasks; LLM suggests one action.
|
||||
autonomous mode — runs its own thread; LLM loops with full tool-calling.
|
||||
"""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self._sd = shared_data
|
||||
self._thread: Optional[threading.Thread] = None
|
||||
self._stop = threading.Event()
|
||||
self._last_fingerprint: Optional[tuple] = None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Lifecycle
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def start(self) -> None:
|
||||
mode = self._mode()
|
||||
if mode == "autonomous":
|
||||
if self._thread and self._thread.is_alive():
|
||||
return
|
||||
self._stop.clear()
|
||||
self._thread = threading.Thread(
|
||||
target=self._autonomous_loop, daemon=True, name="LLMOrchestrator"
|
||||
)
|
||||
self._thread.start()
|
||||
logger.info("LLM Orchestrator started (autonomous)")
|
||||
elif mode == "advisor":
|
||||
logger.info("LLM Orchestrator ready (advisor — called from background tasks)")
|
||||
|
||||
def stop(self) -> None:
|
||||
self._stop.set()
|
||||
if self._thread and self._thread.is_alive():
|
||||
self._thread.join(timeout=15)
|
||||
self._thread = None
|
||||
|
||||
def restart_if_mode_changed(self) -> None:
|
||||
"""
|
||||
Call from the orchestrator main loop to react to runtime config changes.
|
||||
Starts/stops the autonomous thread when the mode changes.
|
||||
"""
|
||||
mode = self._mode()
|
||||
running = self._thread is not None and self._thread.is_alive()
|
||||
|
||||
if mode == "autonomous" and not running and self._is_llm_enabled():
|
||||
self.start()
|
||||
elif mode != "autonomous" and running:
|
||||
self.stop()
|
||||
|
||||
def is_active(self) -> bool:
|
||||
return self._thread is not None and self._thread.is_alive()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Config helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _mode(self) -> str:
|
||||
return str(self._sd.config.get("llm_orchestrator_mode", "none"))
|
||||
|
||||
def _is_llm_enabled(self) -> bool:
|
||||
return bool(self._sd.config.get("llm_enabled", False))
|
||||
|
||||
def _allowed_actions(self) -> List[str]:
|
||||
"""
|
||||
Bjorn action module names the LLM may queue via run_action.
|
||||
Falls back to all loaded action names if empty.
|
||||
NOTE: These are action MODULE names (e.g. 'NetworkScanner', 'SSHBruteforce'),
|
||||
NOT MCP tool names (get_hosts, run_action, etc.).
|
||||
"""
|
||||
custom = self._sd.config.get("llm_orchestrator_allowed_actions", [])
|
||||
if custom:
|
||||
return list(custom)
|
||||
# Auto-discover from loaded actions
|
||||
try:
|
||||
loaded = getattr(self._sd, 'loaded_action_names', None)
|
||||
if loaded:
|
||||
return list(loaded)
|
||||
except Exception:
|
||||
pass
|
||||
# Fallback: ask the DB for known action names
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT DISTINCT action_name FROM action_queue ORDER BY action_name"
|
||||
)
|
||||
if rows:
|
||||
return [r["action_name"] for r in rows]
|
||||
except Exception:
|
||||
pass
|
||||
return []
|
||||
|
||||
def _max_actions(self) -> int:
|
||||
return max(1, int(self._sd.config.get("llm_orchestrator_max_actions", 3)))
|
||||
|
||||
def _interval(self) -> int:
|
||||
return max(30, int(self._sd.config.get("llm_orchestrator_interval_s", 60)))
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Advisor mode (called externally from orchestrator background tasks)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def advise(self) -> Optional[str]:
|
||||
"""
|
||||
Ask the LLM for ONE tactical action recommendation.
|
||||
Returns the action name if one was queued, else None.
|
||||
"""
|
||||
if not self._is_llm_enabled() or self._mode() != "advisor":
|
||||
return None
|
||||
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
|
||||
allowed = self._allowed_actions()
|
||||
if not allowed:
|
||||
return None
|
||||
|
||||
snapshot = self._build_snapshot()
|
||||
real_ips = snapshot.get("VALID_TARGET_IPS", [])
|
||||
ip_list_str = ", ".join(real_ips) if real_ips else "(none)"
|
||||
|
||||
system = (
|
||||
"You are Bjorn's tactical advisor. Review the current network state "
|
||||
"and suggest ONE action to queue, or nothing if the queue is sufficient. "
|
||||
"Reply ONLY with valid JSON — no markdown, no commentary.\n"
|
||||
'Format when action needed: {"action": "ActionName", "target_ip": "1.2.3.4", "reason": "brief"}\n'
|
||||
'Format when nothing needed: {"action": null}\n'
|
||||
"action must be exactly one of: " + ", ".join(allowed) + "\n"
|
||||
f"target_ip MUST be one of these exact IPs: {ip_list_str}\n"
|
||||
"NEVER use placeholder IPs. Only use IPs from the hosts_alive list."
|
||||
)
|
||||
prompt = (
|
||||
f"Current Bjorn state:\n{json.dumps(snapshot, indent=2)}\n\n"
|
||||
"Suggest one action or null."
|
||||
)
|
||||
|
||||
raw = LLMBridge().complete(
|
||||
[{"role": "user", "content": prompt}],
|
||||
system=system,
|
||||
max_tokens=150,
|
||||
timeout=20,
|
||||
)
|
||||
if not raw:
|
||||
return None
|
||||
|
||||
return self._apply_advisor_response(raw, allowed)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM advisor error: {e}")
|
||||
return None
|
||||
|
||||
def _apply_advisor_response(self, raw: str, allowed: List[str]) -> Optional[str]:
|
||||
"""Parse advisor JSON and queue the suggested action. Returns action name or None."""
|
||||
try:
|
||||
text = raw.strip()
|
||||
# Strip markdown fences if the model added them
|
||||
if "```" in text:
|
||||
parts = text.split("```")
|
||||
text = parts[1] if len(parts) > 1 else parts[0]
|
||||
if text.startswith("json"):
|
||||
text = text[4:]
|
||||
|
||||
data = json.loads(text.strip())
|
||||
action = data.get("action")
|
||||
if not action:
|
||||
logger.debug("LLM advisor: no action suggested this cycle")
|
||||
return None
|
||||
|
||||
if action not in allowed:
|
||||
logger.warning(f"LLM advisor suggested disallowed action '{action}' — ignored")
|
||||
return None
|
||||
|
||||
target_ip = str(data.get("target_ip", "")).strip()
|
||||
reason = str(data.get("reason", "llm_advisor"))[:120]
|
||||
|
||||
mac = self._resolve_mac(target_ip)
|
||||
|
||||
self._sd.db.queue_action(
|
||||
action_name=action,
|
||||
mac=mac,
|
||||
ip=target_ip,
|
||||
priority=_ADVISOR_PRIORITY,
|
||||
trigger="llm_advisor",
|
||||
metadata={
|
||||
"decision_method": "llm_advisor",
|
||||
"decision_origin": "llm",
|
||||
"ai_reason": reason,
|
||||
},
|
||||
)
|
||||
try:
|
||||
self._sd.queue_event.set()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
logger.info(f"[LLM_ADVISOR] → {action} @ {target_ip}: {reason}")
|
||||
return action
|
||||
|
||||
except json.JSONDecodeError:
|
||||
logger.debug(f"LLM advisor: invalid JSON: {raw[:200]}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM advisor apply error: {e}")
|
||||
return None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Autonomous mode (own thread)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _autonomous_loop(self) -> None:
|
||||
logger.info("LLM Orchestrator autonomous loop starting")
|
||||
while not self._stop.is_set():
|
||||
try:
|
||||
if self._is_llm_enabled() and self._mode() == "autonomous":
|
||||
self._run_autonomous_cycle()
|
||||
else:
|
||||
# Mode was switched off at runtime — stop thread
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"LLM autonomous cycle error: {e}")
|
||||
|
||||
self._stop.wait(self._interval())
|
||||
|
||||
logger.info("LLM Orchestrator autonomous loop stopped")
|
||||
|
||||
def _compute_fingerprint(self) -> tuple:
|
||||
"""
|
||||
Compact state fingerprint: (hosts, vulns, creds, last_completed_queue_id).
|
||||
Only increases are meaningful — a host going offline is not an opportunity.
|
||||
"""
|
||||
try:
|
||||
hosts = int(getattr(self._sd, "target_count", 0))
|
||||
vulns = int(getattr(self._sd, "vuln_count", 0))
|
||||
creds = int(getattr(self._sd, "cred_count", 0))
|
||||
row = self._sd.db.query_one(
|
||||
"SELECT MAX(id) AS mid FROM action_queue WHERE status IN ('success','failed')"
|
||||
)
|
||||
last_id = int(row["mid"]) if row and row["mid"] is not None else 0
|
||||
return (hosts, vulns, creds, last_id)
|
||||
except Exception:
|
||||
return (0, 0, 0, 0)
|
||||
|
||||
def _has_actionable_change(self, fp: tuple) -> bool:
|
||||
"""
|
||||
Return True only if something *increased* since the last cycle:
|
||||
- new host discovered (hosts ↑)
|
||||
- new vulnerability found (vulns ↑)
|
||||
- new credential captured (creds ↑)
|
||||
- an action completed (last_id ↑)
|
||||
A host going offline (hosts ↓) is not an actionable event.
|
||||
"""
|
||||
if self._last_fingerprint is None:
|
||||
return True # first cycle always runs
|
||||
return any(fp[i] > self._last_fingerprint[i] for i in range(len(fp)))
|
||||
|
||||
def _run_autonomous_cycle(self) -> None:
|
||||
"""
|
||||
One autonomous cycle.
|
||||
|
||||
Two paths based on backend capability:
|
||||
A) API backend (Anthropic) → agentic tool-calling loop
|
||||
B) LaRuche / Ollama → snapshot-based JSON prompt (no tool-calling)
|
||||
|
||||
Path B injects the full network state into the prompt and asks the LLM
|
||||
to reply with a JSON array of actions. This works with any text-only LLM.
|
||||
"""
|
||||
# Skip if nothing actionable changed (save tokens)
|
||||
if self._sd.config.get("llm_orchestrator_skip_if_no_change", True):
|
||||
fp = self._compute_fingerprint()
|
||||
if not self._has_actionable_change(fp):
|
||||
logger.debug("LLM autonomous: no actionable change, skipping cycle (no tokens used)")
|
||||
return
|
||||
self._last_fingerprint = fp
|
||||
|
||||
try:
|
||||
from llm_bridge import LLMBridge, _BJORN_TOOLS
|
||||
except ImportError as e:
|
||||
logger.warning(f"LLM Orchestrator: cannot import llm_bridge: {e}")
|
||||
return
|
||||
|
||||
bridge = LLMBridge()
|
||||
allowed = self._allowed_actions()
|
||||
max_act = self._max_actions()
|
||||
|
||||
# Detect if the active backend supports tool-calling
|
||||
backend = self._sd.config.get("llm_backend", "auto")
|
||||
supports_tools = (backend == "api") or (
|
||||
backend == "auto" and not bridge._laruche_url
|
||||
and not self._ollama_reachable()
|
||||
)
|
||||
|
||||
if supports_tools:
|
||||
response = self._cycle_with_tools(bridge, allowed, max_act)
|
||||
else:
|
||||
response = self._cycle_without_tools(bridge, allowed, max_act)
|
||||
|
||||
if response:
|
||||
log_reasoning = self._sd.config.get("llm_orchestrator_log_reasoning", False)
|
||||
prompt_desc = f"Autonomous cycle (tools={'yes' if supports_tools else 'no'})"
|
||||
if log_reasoning:
|
||||
logger.info(f"[LLM_ORCH_REASONING]\n{response}")
|
||||
self._push_to_chat(bridge, prompt_desc, response)
|
||||
else:
|
||||
logger.info(f"[LLM_AUTONOMOUS] {response[:300]}")
|
||||
|
||||
def _ollama_reachable(self) -> bool:
|
||||
"""Quick check if Ollama is up (for backend detection)."""
|
||||
try:
|
||||
base = self._sd.config.get("llm_ollama_url", "http://127.0.0.1:11434").rstrip("/")
|
||||
import urllib.request
|
||||
urllib.request.urlopen(f"{base}/api/tags", timeout=2)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# ------ Path A: agentic tool-calling (Anthropic API only) ------
|
||||
|
||||
def _cycle_with_tools(self, bridge, allowed: List[str], max_act: int) -> Optional[str]:
|
||||
"""Full agentic loop: LLM calls MCP tools and queues actions."""
|
||||
from llm_bridge import _BJORN_TOOLS
|
||||
|
||||
read_only = {"get_hosts", "get_vulnerabilities", "get_credentials",
|
||||
"get_action_history", "get_status", "query_db"}
|
||||
tools = [
|
||||
t for t in _BJORN_TOOLS
|
||||
if t["name"] in read_only or t["name"] == "run_action"
|
||||
]
|
||||
|
||||
system = self._build_autonomous_system_prompt(allowed, max_act)
|
||||
prompt = (
|
||||
"Start a new orchestration cycle. "
|
||||
"Use get_status and get_hosts to understand the current state. "
|
||||
f"Then queue up to {max_act} high-value action(s) via run_action. "
|
||||
"When done, summarise what you queued and why."
|
||||
)
|
||||
|
||||
return bridge.complete(
|
||||
[{"role": "user", "content": prompt}],
|
||||
system=system,
|
||||
tools=tools,
|
||||
max_tokens=1000,
|
||||
timeout=90,
|
||||
)
|
||||
|
||||
# ------ Path B: snapshot + JSON parsing (LaRuche / Ollama) ------
|
||||
|
||||
def _cycle_without_tools(self, bridge, allowed: List[str], max_act: int) -> Optional[str]:
|
||||
"""
|
||||
No tool-calling: inject state snapshot into prompt, ask LLM for JSON actions.
|
||||
Parse the response and queue actions ourselves.
|
||||
"""
|
||||
snapshot = self._build_snapshot()
|
||||
allowed_str = ", ".join(allowed) if allowed else "none"
|
||||
|
||||
# Extract the real IP list so we can stress it in the prompt
|
||||
real_ips = snapshot.get("VALID_TARGET_IPS", [])
|
||||
ip_list_str = ", ".join(real_ips) if real_ips else "(no hosts discovered yet)"
|
||||
|
||||
# Short system prompt — small models forget long instructions
|
||||
system = (
|
||||
"You are a network security orchestrator. "
|
||||
"You receive network scan data and output a JSON array of actions. "
|
||||
"Output ONLY a JSON array. No explanations, no markdown, no commentary."
|
||||
)
|
||||
|
||||
# Put the real instructions in the user message AFTER the data,
|
||||
# so the model sees them last (recency bias helps small models).
|
||||
prompt = (
|
||||
f"Network state:\n{json.dumps(snapshot, indent=2)}\n\n"
|
||||
"---\n"
|
||||
f"Pick up to {max_act} actions from: {allowed_str}\n"
|
||||
f"Target IPs MUST be from this list: {ip_list_str}\n"
|
||||
"Match actions to open ports. Skip hosts already in pending_queue.\n"
|
||||
"Output ONLY a JSON array like:\n"
|
||||
'[{"action":"ActionName","target_ip":"1.2.3.4","reason":"brief"}]\n'
|
||||
"or [] if nothing needed.\n"
|
||||
"JSON array:"
|
||||
)
|
||||
|
||||
# Use an assistant prefix to force the model into JSON mode.
|
||||
# Many LLMs will continue from this prefix rather than describe.
|
||||
messages = [
|
||||
{"role": "user", "content": prompt},
|
||||
{"role": "assistant", "content": "["},
|
||||
]
|
||||
|
||||
raw = bridge.complete(
|
||||
messages,
|
||||
system=system,
|
||||
max_tokens=500,
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
# Prepend the '[' prefix we forced if the model didn't include it
|
||||
if raw and not raw.strip().startswith("["):
|
||||
raw = "[" + raw
|
||||
|
||||
if not raw:
|
||||
return None
|
||||
|
||||
# Parse and queue actions
|
||||
queued = self._parse_and_queue_actions(raw, allowed, max_act)
|
||||
|
||||
summary = raw.strip()
|
||||
if queued:
|
||||
summary += f"\n\n[Orchestrator queued {len(queued)} action(s): {', '.join(queued)}]"
|
||||
else:
|
||||
summary += "\n\n[Orchestrator: no valid actions parsed from LLM response]"
|
||||
|
||||
return summary
|
||||
|
||||
@staticmethod
|
||||
def _is_valid_ip(ip: str) -> bool:
|
||||
"""Check that ip is a real IPv4 address (no placeholders like 192.168.1.x)."""
|
||||
parts = ip.split(".")
|
||||
if len(parts) != 4:
|
||||
return False
|
||||
for p in parts:
|
||||
try:
|
||||
n = int(p)
|
||||
if n < 0 or n > 255:
|
||||
return False
|
||||
except ValueError:
|
||||
return False # catches 'x', 'xx', etc.
|
||||
return True
|
||||
|
||||
def _parse_and_queue_actions(self, raw: str, allowed: List[str], max_act: int) -> List[str]:
|
||||
"""Parse JSON array from LLM response and queue valid actions. Returns list of queued action names."""
|
||||
queued = []
|
||||
try:
|
||||
text = raw.strip()
|
||||
# Strip markdown fences
|
||||
if "```" in text:
|
||||
parts = text.split("```")
|
||||
text = parts[1] if len(parts) > 1 else parts[0]
|
||||
if text.startswith("json"):
|
||||
text = text[4:]
|
||||
text = text.strip()
|
||||
|
||||
# Try to find JSON array in the text
|
||||
start = text.find("[")
|
||||
end = text.rfind("]")
|
||||
if start == -1 or end == -1:
|
||||
# Check if the model wrote a text description instead of JSON
|
||||
if any(text.lower().startswith(w) for w in ("this ", "here", "the ", "based", "from ", "i ")):
|
||||
logger.warning(
|
||||
"LLM autonomous: model returned a text description instead of JSON array. "
|
||||
"The model may not support structured output. First 120 chars: "
|
||||
+ text[:120]
|
||||
)
|
||||
else:
|
||||
logger.debug(f"LLM autonomous: no JSON array found in response: {text[:120]}")
|
||||
return []
|
||||
|
||||
data = json.loads(text[start:end + 1])
|
||||
if not isinstance(data, list):
|
||||
data = [data]
|
||||
|
||||
for item in data[:max_act]:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
action = item.get("action", "").strip()
|
||||
target_ip = str(item.get("target_ip", "")).strip()
|
||||
reason = str(item.get("reason", "llm_autonomous"))[:120]
|
||||
|
||||
if not action or action not in allowed:
|
||||
logger.debug(f"LLM autonomous: skipping invalid/disallowed action '{action}'")
|
||||
continue
|
||||
if not target_ip:
|
||||
logger.debug(f"LLM autonomous: skipping '{action}' — no target_ip")
|
||||
continue
|
||||
if not self._is_valid_ip(target_ip):
|
||||
logger.warning(
|
||||
f"LLM autonomous: skipping '{action}' — invalid/placeholder IP '{target_ip}' "
|
||||
f"(LLM must use exact IPs from alive_hosts)"
|
||||
)
|
||||
continue
|
||||
|
||||
mac = self._resolve_mac(target_ip)
|
||||
if not mac:
|
||||
logger.warning(
|
||||
f"LLM autonomous: skipping '{action}' @ {target_ip} — "
|
||||
f"IP not found in hosts table (LLM used an IP not in alive_hosts)"
|
||||
)
|
||||
continue
|
||||
|
||||
self._sd.db.queue_action(
|
||||
action_name=action,
|
||||
mac=mac,
|
||||
ip=target_ip,
|
||||
priority=_AUTONOMOUS_PRIORITY,
|
||||
trigger="llm_autonomous",
|
||||
metadata={
|
||||
"decision_method": "llm_autonomous",
|
||||
"decision_origin": "llm",
|
||||
"ai_reason": reason,
|
||||
},
|
||||
)
|
||||
queued.append(f"{action}@{target_ip}")
|
||||
logger.info(f"[LLM_AUTONOMOUS] → {action} @ {target_ip} (mac={mac}): {reason}")
|
||||
|
||||
if queued:
|
||||
try:
|
||||
self._sd.queue_event.set()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug(f"LLM autonomous: JSON parse error: {e} — raw: {raw[:200]}")
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM autonomous: action queue error: {e}")
|
||||
|
||||
return queued
|
||||
|
||||
def _build_autonomous_system_prompt(self, allowed: List[str], max_act: int) -> str:
|
||||
try:
|
||||
hosts = getattr(self._sd, "target_count", "?")
|
||||
vulns = getattr(self._sd, "vuln_count", "?")
|
||||
creds = getattr(self._sd, "cred_count", "?")
|
||||
mode = getattr(self._sd, "operation_mode", "?")
|
||||
except Exception:
|
||||
hosts = vulns = creds = mode = "?"
|
||||
|
||||
allowed_str = ", ".join(allowed) if allowed else "none"
|
||||
|
||||
lang = ""
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
lang = LLMBridge()._lang_instruction()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return (
|
||||
"You are Bjorn's Cyberviking autonomous orchestrator, running on a Raspberry Pi network security tool. "
|
||||
f"Current state: {hosts} hosts discovered, {vulns} vulnerabilities, {creds} credentials. "
|
||||
f"Operation mode: {mode}. "
|
||||
"Your objective: observe the network state via tools, then queue the most valuable actions. "
|
||||
f"Hard limit: at most {max_act} run_action calls per cycle. "
|
||||
f"Only these action names may be queued: {allowed_str}. "
|
||||
"Strategy: prioritise unexplored services, hosts with high port counts, and hosts with no recent scans. "
|
||||
"Do not queue duplicate actions already pending or recently successful. "
|
||||
"Use Norse references occasionally. Be terse and tactical."
|
||||
+ (f" {lang}" if lang else "")
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Shared helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _push_to_chat(self, bridge, user_prompt: str, assistant_response: str) -> None:
|
||||
"""
|
||||
Inject the LLM's reasoning into the 'llm_orchestrator' chat session
|
||||
so it can be reviewed in chat.html (load session 'llm_orchestrator').
|
||||
Keeps last 40 messages to avoid unbounded memory.
|
||||
"""
|
||||
try:
|
||||
with bridge._hist_lock:
|
||||
hist = bridge._chat_histories.setdefault("llm_orchestrator", [])
|
||||
hist.append({"role": "user", "content": f"[Autonomous cycle]\n{user_prompt}"})
|
||||
hist.append({"role": "assistant", "content": assistant_response})
|
||||
if len(hist) > 40:
|
||||
hist[:] = hist[-40:]
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM reasoning push to chat failed: {e}")
|
||||
|
||||
def _resolve_mac(self, ip: str) -> str:
|
||||
"""Resolve IP → MAC from hosts table. Column is 'ips' (may hold multiple IPs)."""
|
||||
if not ip:
|
||||
return ""
|
||||
try:
|
||||
row = self._sd.db.query_one(
|
||||
"SELECT mac_address FROM hosts WHERE ips LIKE ? LIMIT 1", (f"%{ip}%",)
|
||||
)
|
||||
return row["mac_address"] if row else ""
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
def _build_snapshot(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Rich state snapshot for advisor / autonomous prompts.
|
||||
|
||||
Includes:
|
||||
- alive_hosts : full host details (ip, mac, hostname, vendor, ports)
|
||||
- services : identified services per host (port, service, product, version)
|
||||
- vulns_found : active vulnerabilities per host
|
||||
- creds_found : captured credentials per host/service
|
||||
- available_actions : what the LLM can queue (name, description, target port/service)
|
||||
- pending_queue : actions already queued
|
||||
- recent_actions: last completed actions (avoid repeats)
|
||||
"""
|
||||
hosts, services, vulns, creds = [], [], [], []
|
||||
actions_catalog, pending, history = [], [], []
|
||||
|
||||
# ── Alive hosts ──
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT mac_address, ips, hostnames, ports, vendor "
|
||||
"FROM hosts WHERE alive=1 LIMIT 30"
|
||||
)
|
||||
for r in (rows or []):
|
||||
ip = (r.get("ips") or "").split(";")[0].strip()
|
||||
if not ip:
|
||||
continue
|
||||
hosts.append({
|
||||
"ip": ip,
|
||||
"mac": r.get("mac_address", ""),
|
||||
"hostname": (r.get("hostnames") or "").split(";")[0].strip(),
|
||||
"vendor": r.get("vendor", ""),
|
||||
"ports": r.get("ports", ""),
|
||||
})
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ── Port services (identified services with product/version) ──
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT mac_address, ip, port, service, product, version "
|
||||
"FROM port_services WHERE is_current=1 AND state='open' "
|
||||
"ORDER BY mac_address, port LIMIT 100"
|
||||
)
|
||||
for r in (rows or []):
|
||||
svc = {"mac": r.get("mac_address", ""), "port": r.get("port")}
|
||||
if r.get("ip"):
|
||||
svc["ip"] = r["ip"]
|
||||
if r.get("service"):
|
||||
svc["service"] = r["service"]
|
||||
if r.get("product"):
|
||||
svc["product"] = r["product"]
|
||||
if r.get("version"):
|
||||
svc["version"] = r["version"]
|
||||
services.append(svc)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ── Active vulnerabilities ──
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT ip, port, vuln_id, hostname "
|
||||
"FROM vulnerabilities WHERE is_active=1 LIMIT 30"
|
||||
)
|
||||
vulns = [{"ip": r.get("ip", ""), "port": r.get("port"),
|
||||
"vuln_id": r.get("vuln_id", ""),
|
||||
"hostname": r.get("hostname", "")}
|
||||
for r in (rows or [])]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ── Captured credentials ──
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT service, ip, hostname, port, \"user\" "
|
||||
"FROM creds LIMIT 30"
|
||||
)
|
||||
creds = [{"service": r.get("service", ""), "ip": r.get("ip", ""),
|
||||
"hostname": r.get("hostname", ""), "port": r.get("port"),
|
||||
"user": r.get("user", "")}
|
||||
for r in (rows or [])]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ── Available actions catalog (what the LLM can queue) ──
|
||||
allowed = self._allowed_actions()
|
||||
try:
|
||||
if allowed:
|
||||
placeholders = ",".join("?" * len(allowed))
|
||||
rows = self._sd.db.query(
|
||||
f"SELECT b_class, b_description, b_port, b_service "
|
||||
f"FROM actions WHERE b_class IN ({placeholders}) AND b_enabled=1",
|
||||
tuple(allowed)
|
||||
)
|
||||
for r in (rows or []):
|
||||
entry = {"name": r["b_class"]}
|
||||
if r.get("b_description"):
|
||||
entry["description"] = r["b_description"][:100]
|
||||
if r.get("b_port"):
|
||||
entry["target_port"] = r["b_port"]
|
||||
if r.get("b_service"):
|
||||
entry["target_service"] = r["b_service"]
|
||||
actions_catalog.append(entry)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ── Pending queue ──
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT action_name, ip, priority FROM action_queue "
|
||||
"WHERE status='pending' ORDER BY priority DESC LIMIT 15"
|
||||
)
|
||||
pending = [{"action": r["action_name"], "ip": r["ip"]} for r in (rows or [])]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ── Recent action history ──
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT action_name, ip, status FROM action_queue "
|
||||
"WHERE status IN ('success','failed') ORDER BY completed_at DESC LIMIT 15"
|
||||
)
|
||||
history = [{"action": r["action_name"], "ip": r["ip"], "result": r["status"]}
|
||||
for r in (rows or [])]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Build explicit IP list for emphasis
|
||||
ip_list = [h["ip"] for h in hosts if h.get("ip")]
|
||||
|
||||
result = {
|
||||
"VALID_TARGET_IPS": ip_list,
|
||||
"hosts_alive": hosts,
|
||||
"operation_mode": getattr(self._sd, "operation_mode", "?"),
|
||||
}
|
||||
if services:
|
||||
result["services_detected"] = services
|
||||
if vulns:
|
||||
result["vulnerabilities_found"] = vulns
|
||||
if creds:
|
||||
result["credentials_captured"] = creds
|
||||
if actions_catalog:
|
||||
result["available_actions"] = actions_catalog
|
||||
result["pending_queue"] = pending
|
||||
result["recent_actions"] = history
|
||||
result["summary"] = {
|
||||
"hosts_alive": len(ip_list),
|
||||
"vulns": getattr(self._sd, "vuln_count", 0),
|
||||
"creds": getattr(self._sd, "cred_count", 0),
|
||||
}
|
||||
|
||||
return result
|
||||
422
loki/layouts/de.json
Normal file
422
loki/layouts/de.json
Normal file
@@ -0,0 +1,422 @@
|
||||
{
|
||||
"a": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"b": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"c": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"d": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"e": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"f": [
|
||||
0,
|
||||
9
|
||||
],
|
||||
"g": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"h": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"i": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"j": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"k": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"l": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"m": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"n": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"o": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"p": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"q": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"r": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"s": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"t": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"u": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"v": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"w": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"x": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"y": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"z": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"A": [
|
||||
2,
|
||||
4
|
||||
],
|
||||
"B": [
|
||||
2,
|
||||
5
|
||||
],
|
||||
"C": [
|
||||
2,
|
||||
6
|
||||
],
|
||||
"D": [
|
||||
2,
|
||||
7
|
||||
],
|
||||
"E": [
|
||||
2,
|
||||
8
|
||||
],
|
||||
"F": [
|
||||
2,
|
||||
9
|
||||
],
|
||||
"G": [
|
||||
2,
|
||||
10
|
||||
],
|
||||
"H": [
|
||||
2,
|
||||
11
|
||||
],
|
||||
"I": [
|
||||
2,
|
||||
12
|
||||
],
|
||||
"J": [
|
||||
2,
|
||||
13
|
||||
],
|
||||
"K": [
|
||||
2,
|
||||
14
|
||||
],
|
||||
"L": [
|
||||
2,
|
||||
15
|
||||
],
|
||||
"M": [
|
||||
2,
|
||||
16
|
||||
],
|
||||
"N": [
|
||||
2,
|
||||
17
|
||||
],
|
||||
"O": [
|
||||
2,
|
||||
18
|
||||
],
|
||||
"P": [
|
||||
2,
|
||||
19
|
||||
],
|
||||
"Q": [
|
||||
2,
|
||||
20
|
||||
],
|
||||
"R": [
|
||||
2,
|
||||
21
|
||||
],
|
||||
"S": [
|
||||
2,
|
||||
22
|
||||
],
|
||||
"T": [
|
||||
2,
|
||||
23
|
||||
],
|
||||
"U": [
|
||||
2,
|
||||
24
|
||||
],
|
||||
"V": [
|
||||
2,
|
||||
25
|
||||
],
|
||||
"W": [
|
||||
2,
|
||||
26
|
||||
],
|
||||
"X": [
|
||||
2,
|
||||
27
|
||||
],
|
||||
"Y": [
|
||||
2,
|
||||
29
|
||||
],
|
||||
"Z": [
|
||||
2,
|
||||
28
|
||||
],
|
||||
"1": [
|
||||
0,
|
||||
30
|
||||
],
|
||||
"2": [
|
||||
0,
|
||||
31
|
||||
],
|
||||
"3": [
|
||||
0,
|
||||
32
|
||||
],
|
||||
"4": [
|
||||
0,
|
||||
33
|
||||
],
|
||||
"5": [
|
||||
0,
|
||||
34
|
||||
],
|
||||
"6": [
|
||||
0,
|
||||
35
|
||||
],
|
||||
"7": [
|
||||
0,
|
||||
36
|
||||
],
|
||||
"8": [
|
||||
0,
|
||||
37
|
||||
],
|
||||
"9": [
|
||||
0,
|
||||
38
|
||||
],
|
||||
"0": [
|
||||
0,
|
||||
39
|
||||
],
|
||||
"!": [
|
||||
2,
|
||||
30
|
||||
],
|
||||
"@": [
|
||||
2,
|
||||
31
|
||||
],
|
||||
"#": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"$": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
"%": [
|
||||
2,
|
||||
34
|
||||
],
|
||||
"^": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"&": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"*": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"(": [
|
||||
2,
|
||||
37
|
||||
],
|
||||
")": [
|
||||
2,
|
||||
38
|
||||
],
|
||||
"\n": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\r": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\t": [
|
||||
0,
|
||||
43
|
||||
],
|
||||
" ": [
|
||||
0,
|
||||
44
|
||||
],
|
||||
"-": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"_": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"=": [
|
||||
2,
|
||||
39
|
||||
],
|
||||
"+": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"[": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"{": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"]": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"}": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"\\": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"|": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
";": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
":": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"'": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
"\"": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"`": [
|
||||
0,
|
||||
53
|
||||
],
|
||||
"~": [
|
||||
2,
|
||||
53
|
||||
],
|
||||
",": [
|
||||
0,
|
||||
54
|
||||
],
|
||||
"<": [
|
||||
0,
|
||||
100
|
||||
],
|
||||
".": [
|
||||
0,
|
||||
55
|
||||
],
|
||||
">": [
|
||||
2,
|
||||
100
|
||||
],
|
||||
"/": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"?": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"ß": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"ü": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"Ü": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"ö": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
"Ö": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"ä": [
|
||||
0,
|
||||
52
|
||||
],
|
||||
"Ä": [
|
||||
2,
|
||||
52
|
||||
]
|
||||
}
|
||||
426
loki/layouts/es.json
Normal file
426
loki/layouts/es.json
Normal file
@@ -0,0 +1,426 @@
|
||||
{
|
||||
"a": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"b": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"c": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"d": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"e": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"f": [
|
||||
0,
|
||||
9
|
||||
],
|
||||
"g": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"h": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"i": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"j": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"k": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"l": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"m": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"n": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"o": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"p": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"q": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"r": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"s": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"t": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"u": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"v": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"w": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"x": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"y": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"z": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"A": [
|
||||
2,
|
||||
4
|
||||
],
|
||||
"B": [
|
||||
2,
|
||||
5
|
||||
],
|
||||
"C": [
|
||||
2,
|
||||
6
|
||||
],
|
||||
"D": [
|
||||
2,
|
||||
7
|
||||
],
|
||||
"E": [
|
||||
2,
|
||||
8
|
||||
],
|
||||
"F": [
|
||||
2,
|
||||
9
|
||||
],
|
||||
"G": [
|
||||
2,
|
||||
10
|
||||
],
|
||||
"H": [
|
||||
2,
|
||||
11
|
||||
],
|
||||
"I": [
|
||||
2,
|
||||
12
|
||||
],
|
||||
"J": [
|
||||
2,
|
||||
13
|
||||
],
|
||||
"K": [
|
||||
2,
|
||||
14
|
||||
],
|
||||
"L": [
|
||||
2,
|
||||
15
|
||||
],
|
||||
"M": [
|
||||
2,
|
||||
16
|
||||
],
|
||||
"N": [
|
||||
2,
|
||||
17
|
||||
],
|
||||
"O": [
|
||||
2,
|
||||
18
|
||||
],
|
||||
"P": [
|
||||
2,
|
||||
19
|
||||
],
|
||||
"Q": [
|
||||
2,
|
||||
20
|
||||
],
|
||||
"R": [
|
||||
2,
|
||||
21
|
||||
],
|
||||
"S": [
|
||||
2,
|
||||
22
|
||||
],
|
||||
"T": [
|
||||
2,
|
||||
23
|
||||
],
|
||||
"U": [
|
||||
2,
|
||||
24
|
||||
],
|
||||
"V": [
|
||||
2,
|
||||
25
|
||||
],
|
||||
"W": [
|
||||
2,
|
||||
26
|
||||
],
|
||||
"X": [
|
||||
2,
|
||||
27
|
||||
],
|
||||
"Y": [
|
||||
2,
|
||||
28
|
||||
],
|
||||
"Z": [
|
||||
2,
|
||||
29
|
||||
],
|
||||
"1": [
|
||||
0,
|
||||
30
|
||||
],
|
||||
"2": [
|
||||
0,
|
||||
31
|
||||
],
|
||||
"3": [
|
||||
0,
|
||||
32
|
||||
],
|
||||
"4": [
|
||||
0,
|
||||
33
|
||||
],
|
||||
"5": [
|
||||
0,
|
||||
34
|
||||
],
|
||||
"6": [
|
||||
0,
|
||||
35
|
||||
],
|
||||
"7": [
|
||||
0,
|
||||
36
|
||||
],
|
||||
"8": [
|
||||
0,
|
||||
37
|
||||
],
|
||||
"9": [
|
||||
0,
|
||||
38
|
||||
],
|
||||
"0": [
|
||||
0,
|
||||
39
|
||||
],
|
||||
"!": [
|
||||
2,
|
||||
30
|
||||
],
|
||||
"@": [
|
||||
2,
|
||||
31
|
||||
],
|
||||
"#": [
|
||||
2,
|
||||
32
|
||||
],
|
||||
"$": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
"%": [
|
||||
2,
|
||||
34
|
||||
],
|
||||
"^": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"&": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"*": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"(": [
|
||||
2,
|
||||
38
|
||||
],
|
||||
")": [
|
||||
2,
|
||||
39
|
||||
],
|
||||
"\n": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\r": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\t": [
|
||||
0,
|
||||
43
|
||||
],
|
||||
" ": [
|
||||
0,
|
||||
44
|
||||
],
|
||||
"-": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"_": [
|
||||
2,
|
||||
56
|
||||
],
|
||||
"=": [
|
||||
0,
|
||||
46
|
||||
],
|
||||
"+": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"[": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"{": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"]": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"}": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"\\": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"|": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
";": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
":": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"'": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"\"": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"`": [
|
||||
0,
|
||||
53
|
||||
],
|
||||
"~": [
|
||||
2,
|
||||
53
|
||||
],
|
||||
",": [
|
||||
0,
|
||||
54
|
||||
],
|
||||
"<": [
|
||||
0,
|
||||
100
|
||||
],
|
||||
".": [
|
||||
0,
|
||||
55
|
||||
],
|
||||
">": [
|
||||
2,
|
||||
100
|
||||
],
|
||||
"/": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"?": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"ñ": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
"Ñ": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"ç": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"Ç": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
"¡": [
|
||||
0,
|
||||
46
|
||||
],
|
||||
"¿": [
|
||||
2,
|
||||
46
|
||||
],
|
||||
"´": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"¨": [
|
||||
2,
|
||||
47
|
||||
]
|
||||
}
|
||||
446
loki/layouts/fr.json
Normal file
446
loki/layouts/fr.json
Normal file
@@ -0,0 +1,446 @@
|
||||
{
|
||||
"a": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"b": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"c": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"d": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"e": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"f": [
|
||||
0,
|
||||
9
|
||||
],
|
||||
"g": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"h": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"i": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"j": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"k": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"l": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"m": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
"n": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"o": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"p": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"q": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"r": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"s": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"t": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"u": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"v": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"w": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"x": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"y": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"z": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"A": [
|
||||
2,
|
||||
20
|
||||
],
|
||||
"B": [
|
||||
2,
|
||||
5
|
||||
],
|
||||
"C": [
|
||||
2,
|
||||
6
|
||||
],
|
||||
"D": [
|
||||
2,
|
||||
7
|
||||
],
|
||||
"E": [
|
||||
2,
|
||||
8
|
||||
],
|
||||
"F": [
|
||||
2,
|
||||
9
|
||||
],
|
||||
"G": [
|
||||
2,
|
||||
10
|
||||
],
|
||||
"H": [
|
||||
2,
|
||||
11
|
||||
],
|
||||
"I": [
|
||||
2,
|
||||
12
|
||||
],
|
||||
"J": [
|
||||
2,
|
||||
13
|
||||
],
|
||||
"K": [
|
||||
2,
|
||||
14
|
||||
],
|
||||
"L": [
|
||||
2,
|
||||
15
|
||||
],
|
||||
"M": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"N": [
|
||||
2,
|
||||
17
|
||||
],
|
||||
"O": [
|
||||
2,
|
||||
18
|
||||
],
|
||||
"P": [
|
||||
2,
|
||||
19
|
||||
],
|
||||
"Q": [
|
||||
2,
|
||||
4
|
||||
],
|
||||
"R": [
|
||||
2,
|
||||
21
|
||||
],
|
||||
"S": [
|
||||
2,
|
||||
22
|
||||
],
|
||||
"T": [
|
||||
2,
|
||||
23
|
||||
],
|
||||
"U": [
|
||||
2,
|
||||
24
|
||||
],
|
||||
"V": [
|
||||
2,
|
||||
25
|
||||
],
|
||||
"W": [
|
||||
2,
|
||||
29
|
||||
],
|
||||
"X": [
|
||||
2,
|
||||
27
|
||||
],
|
||||
"Y": [
|
||||
2,
|
||||
28
|
||||
],
|
||||
"Z": [
|
||||
2,
|
||||
26
|
||||
],
|
||||
"1": [
|
||||
2,
|
||||
30
|
||||
],
|
||||
"2": [
|
||||
2,
|
||||
31
|
||||
],
|
||||
"3": [
|
||||
2,
|
||||
32
|
||||
],
|
||||
"4": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
"5": [
|
||||
2,
|
||||
34
|
||||
],
|
||||
"6": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"7": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"8": [
|
||||
2,
|
||||
37
|
||||
],
|
||||
"9": [
|
||||
2,
|
||||
38
|
||||
],
|
||||
"0": [
|
||||
2,
|
||||
39
|
||||
],
|
||||
"!": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"@": [
|
||||
64,
|
||||
39
|
||||
],
|
||||
"#": [
|
||||
64,
|
||||
32
|
||||
],
|
||||
"$": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"%": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"^": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"&": [
|
||||
0,
|
||||
30
|
||||
],
|
||||
"*": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"(": [
|
||||
0,
|
||||
34
|
||||
],
|
||||
")": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"\n": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\r": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\t": [
|
||||
0,
|
||||
43
|
||||
],
|
||||
" ": [
|
||||
0,
|
||||
44
|
||||
],
|
||||
"-": [
|
||||
0,
|
||||
35
|
||||
],
|
||||
"_": [
|
||||
0,
|
||||
37
|
||||
],
|
||||
"=": [
|
||||
0,
|
||||
46
|
||||
],
|
||||
"+": [
|
||||
2,
|
||||
46
|
||||
],
|
||||
"[": [
|
||||
64,
|
||||
34
|
||||
],
|
||||
"{": [
|
||||
64,
|
||||
33
|
||||
],
|
||||
"]": [
|
||||
64,
|
||||
45
|
||||
],
|
||||
"}": [
|
||||
64,
|
||||
46
|
||||
],
|
||||
"\\": [
|
||||
64,
|
||||
37
|
||||
],
|
||||
"|": [
|
||||
64,
|
||||
35
|
||||
],
|
||||
";": [
|
||||
0,
|
||||
54
|
||||
],
|
||||
":": [
|
||||
0,
|
||||
55
|
||||
],
|
||||
"'": [
|
||||
0,
|
||||
33
|
||||
],
|
||||
"\"": [
|
||||
0,
|
||||
32
|
||||
],
|
||||
"`": [
|
||||
64,
|
||||
36
|
||||
],
|
||||
"~": [
|
||||
64,
|
||||
31
|
||||
],
|
||||
",": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"<": [
|
||||
0,
|
||||
100
|
||||
],
|
||||
".": [
|
||||
2,
|
||||
54
|
||||
],
|
||||
">": [
|
||||
2,
|
||||
100
|
||||
],
|
||||
"/": [
|
||||
2,
|
||||
55
|
||||
],
|
||||
"?": [
|
||||
2,
|
||||
16
|
||||
],
|
||||
"é": [
|
||||
0,
|
||||
31
|
||||
],
|
||||
"è": [
|
||||
0,
|
||||
36
|
||||
],
|
||||
"ç": [
|
||||
0,
|
||||
38
|
||||
],
|
||||
"à": [
|
||||
0,
|
||||
39
|
||||
],
|
||||
"§": [
|
||||
2,
|
||||
56
|
||||
],
|
||||
"€": [
|
||||
64,
|
||||
8
|
||||
],
|
||||
"°": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"¨": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"£": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"¤": [
|
||||
64,
|
||||
48
|
||||
],
|
||||
"µ": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
"ù": [
|
||||
0,
|
||||
52
|
||||
],
|
||||
"²": [
|
||||
0,
|
||||
53
|
||||
]
|
||||
}
|
||||
78
loki/layouts/generate_layouts.py
Normal file
78
loki/layouts/generate_layouts.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
# Chargement de la base US existante
|
||||
with open("us.json", "r") as f:
|
||||
US_BASE = json.load(f)
|
||||
|
||||
# Définition des différences par rapport au clavier US
|
||||
# 0 = Normal, 2 = Shift, 64 = AltGr (Right Alt)
|
||||
LAYOUT_DIFFS = {
|
||||
"fr": {
|
||||
"a": [0, 20], "A": [2, 20], "q": [0, 4], "Q": [2, 4],
|
||||
"z": [0, 26], "Z": [2, 26], "w": [0, 29], "W": [2, 29],
|
||||
"m": [0, 51], "M": [2, 51],
|
||||
"1": [2, 30], "2": [2, 31], "3": [2, 32], "4": [2, 33], "5": [2, 34],
|
||||
"6": [2, 35], "7": [2, 36], "8": [2, 37], "9": [2, 38], "0": [2, 39],
|
||||
"&": [0, 30], "é": [0, 31], "\"": [0, 32], "'": [0, 33], "(": [0, 34],
|
||||
"-": [0, 35], "è": [0, 36], "_": [0, 37], "ç": [0, 38], "à": [0, 39],
|
||||
"~": [64, 31], "#": [64, 32], "{": [64, 33], "[": [64, 34], "|": [64, 35],
|
||||
"`": [64, 36], "\\": [64, 37], "^": [0, 47], "@": [64, 39], "]": [64, 45],
|
||||
"}": [64, 46], "!": [0, 56], "§": [2, 56], "€": [64, 8], ")": [0, 45],
|
||||
"°": [2, 45], "=": [0, 46], "+": [2, 46], "¨": [2, 47], "$": [0, 48],
|
||||
"£": [2, 48], "¤": [64, 48], "*": [0, 49], "µ": [2, 49], "ù": [0, 52],
|
||||
"%": [2, 52], "²": [0, 53], ",": [0, 16], "?": [2, 16], ";": [0, 54],
|
||||
".": [2, 54], ":": [0, 55], "/": [2, 55], "<": [0, 100], ">": [2, 100]
|
||||
},
|
||||
"uk": {
|
||||
"\"": [2, 31], "@": [2, 52], "£": [2, 32], "~": [0, 50],
|
||||
"#": [0, 49], "\\": [0, 100], "|": [2, 100]
|
||||
},
|
||||
"de": {
|
||||
"y": [0, 29], "Y": [2, 29], "z": [0, 28], "Z": [2, 28],
|
||||
"ß": [0, 45], "?": [2, 45], "ü": [0, 47], "Ü": [2, 47],
|
||||
"+": [0, 48], "*": [2, 48], "ö": [0, 51], "Ö": [2, 51],
|
||||
"ä": [0, 52], "Ä": [2, 52], "#": [0, 49], "'": [2, 49],
|
||||
"&": [2, 35], "/": [2, 36], "(": [2, 37], ")": [2, 38],
|
||||
"=": [2, 39], "<": [0, 100], ">": [2, 100]
|
||||
},
|
||||
"es": {
|
||||
"ñ": [0, 51], "Ñ": [2, 51], "ç": [0, 49], "Ç": [2, 49],
|
||||
"'": [0, 45], "?": [2, 45], "¡": [0, 46], "¿": [2, 46],
|
||||
"´": [0, 47], "¨": [2, 47], "+": [0, 48], "*": [2, 48],
|
||||
"<": [0, 100], ">": [2, 100], "-": [0, 56], "_": [2, 56]
|
||||
},
|
||||
"it": {
|
||||
"ò": [0, 51], "ç": [2, 51], "à": [0, 52], "°": [2, 52],
|
||||
"ù": [0, 49], "§": [2, 49], "è": [0, 47], "é": [2, 47],
|
||||
"ì": [0, 46], "^": [2, 46], "'": [0, 45], "?": [2, 45],
|
||||
"+": [0, 48], "*": [2, 48], "<": [0, 100], ">": [2, 100],
|
||||
"-": [0, 56], "_": [2, 56]
|
||||
},
|
||||
"ru": {
|
||||
"й": [0, 20], "ц": [0, 26], "у": [0, 8], "к": [0, 21], "е": [0, 23],
|
||||
"н": [0, 28], "г": [0, 24], "ш": [0, 12], "щ": [0, 18], "з": [0, 19],
|
||||
"х": [0, 47], "ъ": [0, 48], "ф": [0, 4], "ы": [0, 22], "в": [0, 7],
|
||||
"а": [0, 4], "п": [0, 10], "р": [0, 11], "о": [0, 13], "л": [0, 14],
|
||||
"д": [0, 15], "ж": [0, 51], "э": [0, 52], "я": [0, 29], "ч": [0, 27],
|
||||
"с": [0, 6], "м": [0, 25], "и": [0, 5], "т": [0, 17], "ь": [0, 16],
|
||||
"б": [0, 54], "ю": [0, 55], "ё": [0, 53], ".": [0, 56], ",": [2, 56],
|
||||
"№": [2, 32], ";": [2, 33], ":": [2, 35], "?": [2, 36]
|
||||
},
|
||||
"zh": {} # ZH utilise exactement le layout US
|
||||
}
|
||||
|
||||
def generate_layouts():
|
||||
for lang, diff in LAYOUT_DIFFS.items():
|
||||
# Copie de la base US
|
||||
new_layout = dict(US_BASE)
|
||||
# Application des modifications
|
||||
new_layout.update(diff)
|
||||
|
||||
filename = f"{lang}.json"
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
json.dump(new_layout, f, indent=4, ensure_ascii=False)
|
||||
print(f"Généré : {filename} ({len(new_layout)} touches)")
|
||||
|
||||
if __name__ == "__main__":
|
||||
generate_layouts()
|
||||
430
loki/layouts/it.json
Normal file
430
loki/layouts/it.json
Normal file
@@ -0,0 +1,430 @@
|
||||
{
|
||||
"a": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"b": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"c": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"d": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"e": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"f": [
|
||||
0,
|
||||
9
|
||||
],
|
||||
"g": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"h": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"i": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"j": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"k": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"l": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"m": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"n": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"o": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"p": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"q": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"r": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"s": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"t": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"u": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"v": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"w": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"x": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"y": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"z": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"A": [
|
||||
2,
|
||||
4
|
||||
],
|
||||
"B": [
|
||||
2,
|
||||
5
|
||||
],
|
||||
"C": [
|
||||
2,
|
||||
6
|
||||
],
|
||||
"D": [
|
||||
2,
|
||||
7
|
||||
],
|
||||
"E": [
|
||||
2,
|
||||
8
|
||||
],
|
||||
"F": [
|
||||
2,
|
||||
9
|
||||
],
|
||||
"G": [
|
||||
2,
|
||||
10
|
||||
],
|
||||
"H": [
|
||||
2,
|
||||
11
|
||||
],
|
||||
"I": [
|
||||
2,
|
||||
12
|
||||
],
|
||||
"J": [
|
||||
2,
|
||||
13
|
||||
],
|
||||
"K": [
|
||||
2,
|
||||
14
|
||||
],
|
||||
"L": [
|
||||
2,
|
||||
15
|
||||
],
|
||||
"M": [
|
||||
2,
|
||||
16
|
||||
],
|
||||
"N": [
|
||||
2,
|
||||
17
|
||||
],
|
||||
"O": [
|
||||
2,
|
||||
18
|
||||
],
|
||||
"P": [
|
||||
2,
|
||||
19
|
||||
],
|
||||
"Q": [
|
||||
2,
|
||||
20
|
||||
],
|
||||
"R": [
|
||||
2,
|
||||
21
|
||||
],
|
||||
"S": [
|
||||
2,
|
||||
22
|
||||
],
|
||||
"T": [
|
||||
2,
|
||||
23
|
||||
],
|
||||
"U": [
|
||||
2,
|
||||
24
|
||||
],
|
||||
"V": [
|
||||
2,
|
||||
25
|
||||
],
|
||||
"W": [
|
||||
2,
|
||||
26
|
||||
],
|
||||
"X": [
|
||||
2,
|
||||
27
|
||||
],
|
||||
"Y": [
|
||||
2,
|
||||
28
|
||||
],
|
||||
"Z": [
|
||||
2,
|
||||
29
|
||||
],
|
||||
"1": [
|
||||
0,
|
||||
30
|
||||
],
|
||||
"2": [
|
||||
0,
|
||||
31
|
||||
],
|
||||
"3": [
|
||||
0,
|
||||
32
|
||||
],
|
||||
"4": [
|
||||
0,
|
||||
33
|
||||
],
|
||||
"5": [
|
||||
0,
|
||||
34
|
||||
],
|
||||
"6": [
|
||||
0,
|
||||
35
|
||||
],
|
||||
"7": [
|
||||
0,
|
||||
36
|
||||
],
|
||||
"8": [
|
||||
0,
|
||||
37
|
||||
],
|
||||
"9": [
|
||||
0,
|
||||
38
|
||||
],
|
||||
"0": [
|
||||
0,
|
||||
39
|
||||
],
|
||||
"!": [
|
||||
2,
|
||||
30
|
||||
],
|
||||
"@": [
|
||||
2,
|
||||
31
|
||||
],
|
||||
"#": [
|
||||
2,
|
||||
32
|
||||
],
|
||||
"$": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
"%": [
|
||||
2,
|
||||
34
|
||||
],
|
||||
"^": [
|
||||
2,
|
||||
46
|
||||
],
|
||||
"&": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"*": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"(": [
|
||||
2,
|
||||
38
|
||||
],
|
||||
")": [
|
||||
2,
|
||||
39
|
||||
],
|
||||
"\n": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\r": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\t": [
|
||||
0,
|
||||
43
|
||||
],
|
||||
" ": [
|
||||
0,
|
||||
44
|
||||
],
|
||||
"-": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"_": [
|
||||
2,
|
||||
56
|
||||
],
|
||||
"=": [
|
||||
0,
|
||||
46
|
||||
],
|
||||
"+": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"[": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"{": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"]": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"}": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"\\": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"|": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
";": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
":": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"'": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"\"": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"`": [
|
||||
0,
|
||||
53
|
||||
],
|
||||
"~": [
|
||||
2,
|
||||
53
|
||||
],
|
||||
",": [
|
||||
0,
|
||||
54
|
||||
],
|
||||
"<": [
|
||||
0,
|
||||
100
|
||||
],
|
||||
".": [
|
||||
0,
|
||||
55
|
||||
],
|
||||
">": [
|
||||
2,
|
||||
100
|
||||
],
|
||||
"/": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"?": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"ò": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
"ç": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"à": [
|
||||
0,
|
||||
52
|
||||
],
|
||||
"°": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"ù": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"§": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
"è": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"é": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"ì": [
|
||||
0,
|
||||
46
|
||||
]
|
||||
}
|
||||
530
loki/layouts/ru.json
Normal file
530
loki/layouts/ru.json
Normal file
@@ -0,0 +1,530 @@
|
||||
{
|
||||
"a": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"b": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"c": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"d": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"e": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"f": [
|
||||
0,
|
||||
9
|
||||
],
|
||||
"g": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"h": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"i": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"j": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"k": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"l": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"m": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"n": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"o": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"p": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"q": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"r": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"s": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"t": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"u": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"v": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"w": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"x": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"y": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"z": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"A": [
|
||||
2,
|
||||
4
|
||||
],
|
||||
"B": [
|
||||
2,
|
||||
5
|
||||
],
|
||||
"C": [
|
||||
2,
|
||||
6
|
||||
],
|
||||
"D": [
|
||||
2,
|
||||
7
|
||||
],
|
||||
"E": [
|
||||
2,
|
||||
8
|
||||
],
|
||||
"F": [
|
||||
2,
|
||||
9
|
||||
],
|
||||
"G": [
|
||||
2,
|
||||
10
|
||||
],
|
||||
"H": [
|
||||
2,
|
||||
11
|
||||
],
|
||||
"I": [
|
||||
2,
|
||||
12
|
||||
],
|
||||
"J": [
|
||||
2,
|
||||
13
|
||||
],
|
||||
"K": [
|
||||
2,
|
||||
14
|
||||
],
|
||||
"L": [
|
||||
2,
|
||||
15
|
||||
],
|
||||
"M": [
|
||||
2,
|
||||
16
|
||||
],
|
||||
"N": [
|
||||
2,
|
||||
17
|
||||
],
|
||||
"O": [
|
||||
2,
|
||||
18
|
||||
],
|
||||
"P": [
|
||||
2,
|
||||
19
|
||||
],
|
||||
"Q": [
|
||||
2,
|
||||
20
|
||||
],
|
||||
"R": [
|
||||
2,
|
||||
21
|
||||
],
|
||||
"S": [
|
||||
2,
|
||||
22
|
||||
],
|
||||
"T": [
|
||||
2,
|
||||
23
|
||||
],
|
||||
"U": [
|
||||
2,
|
||||
24
|
||||
],
|
||||
"V": [
|
||||
2,
|
||||
25
|
||||
],
|
||||
"W": [
|
||||
2,
|
||||
26
|
||||
],
|
||||
"X": [
|
||||
2,
|
||||
27
|
||||
],
|
||||
"Y": [
|
||||
2,
|
||||
28
|
||||
],
|
||||
"Z": [
|
||||
2,
|
||||
29
|
||||
],
|
||||
"1": [
|
||||
0,
|
||||
30
|
||||
],
|
||||
"2": [
|
||||
0,
|
||||
31
|
||||
],
|
||||
"3": [
|
||||
0,
|
||||
32
|
||||
],
|
||||
"4": [
|
||||
0,
|
||||
33
|
||||
],
|
||||
"5": [
|
||||
0,
|
||||
34
|
||||
],
|
||||
"6": [
|
||||
0,
|
||||
35
|
||||
],
|
||||
"7": [
|
||||
0,
|
||||
36
|
||||
],
|
||||
"8": [
|
||||
0,
|
||||
37
|
||||
],
|
||||
"9": [
|
||||
0,
|
||||
38
|
||||
],
|
||||
"0": [
|
||||
0,
|
||||
39
|
||||
],
|
||||
"!": [
|
||||
2,
|
||||
30
|
||||
],
|
||||
"@": [
|
||||
2,
|
||||
31
|
||||
],
|
||||
"#": [
|
||||
2,
|
||||
32
|
||||
],
|
||||
"$": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
"%": [
|
||||
2,
|
||||
34
|
||||
],
|
||||
"^": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"&": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"*": [
|
||||
2,
|
||||
37
|
||||
],
|
||||
"(": [
|
||||
2,
|
||||
38
|
||||
],
|
||||
")": [
|
||||
2,
|
||||
39
|
||||
],
|
||||
"\n": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\r": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\t": [
|
||||
0,
|
||||
43
|
||||
],
|
||||
" ": [
|
||||
0,
|
||||
44
|
||||
],
|
||||
"-": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"_": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"=": [
|
||||
0,
|
||||
46
|
||||
],
|
||||
"+": [
|
||||
2,
|
||||
46
|
||||
],
|
||||
"[": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"{": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"]": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"}": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"\\": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"|": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
";": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
":": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"'": [
|
||||
0,
|
||||
52
|
||||
],
|
||||
"\"": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"`": [
|
||||
0,
|
||||
53
|
||||
],
|
||||
"~": [
|
||||
2,
|
||||
53
|
||||
],
|
||||
",": [
|
||||
2,
|
||||
56
|
||||
],
|
||||
"<": [
|
||||
2,
|
||||
54
|
||||
],
|
||||
".": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
">": [
|
||||
2,
|
||||
55
|
||||
],
|
||||
"/": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"?": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"й": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"ц": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"у": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"к": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"е": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"н": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"г": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"ш": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"щ": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"з": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"х": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"ъ": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"ф": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"ы": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"в": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"а": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"п": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"р": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"о": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"л": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"д": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"ж": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
"э": [
|
||||
0,
|
||||
52
|
||||
],
|
||||
"я": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"ч": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"с": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"м": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"и": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"т": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"ь": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"б": [
|
||||
0,
|
||||
54
|
||||
],
|
||||
"ю": [
|
||||
0,
|
||||
55
|
||||
],
|
||||
"ё": [
|
||||
0,
|
||||
53
|
||||
],
|
||||
"№": [
|
||||
2,
|
||||
32
|
||||
]
|
||||
}
|
||||
398
loki/layouts/uk.json
Normal file
398
loki/layouts/uk.json
Normal file
@@ -0,0 +1,398 @@
|
||||
{
|
||||
"a": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"b": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"c": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"d": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"e": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"f": [
|
||||
0,
|
||||
9
|
||||
],
|
||||
"g": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"h": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"i": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"j": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"k": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"l": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"m": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"n": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"o": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"p": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"q": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"r": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"s": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"t": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"u": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"v": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"w": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"x": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"y": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"z": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"A": [
|
||||
2,
|
||||
4
|
||||
],
|
||||
"B": [
|
||||
2,
|
||||
5
|
||||
],
|
||||
"C": [
|
||||
2,
|
||||
6
|
||||
],
|
||||
"D": [
|
||||
2,
|
||||
7
|
||||
],
|
||||
"E": [
|
||||
2,
|
||||
8
|
||||
],
|
||||
"F": [
|
||||
2,
|
||||
9
|
||||
],
|
||||
"G": [
|
||||
2,
|
||||
10
|
||||
],
|
||||
"H": [
|
||||
2,
|
||||
11
|
||||
],
|
||||
"I": [
|
||||
2,
|
||||
12
|
||||
],
|
||||
"J": [
|
||||
2,
|
||||
13
|
||||
],
|
||||
"K": [
|
||||
2,
|
||||
14
|
||||
],
|
||||
"L": [
|
||||
2,
|
||||
15
|
||||
],
|
||||
"M": [
|
||||
2,
|
||||
16
|
||||
],
|
||||
"N": [
|
||||
2,
|
||||
17
|
||||
],
|
||||
"O": [
|
||||
2,
|
||||
18
|
||||
],
|
||||
"P": [
|
||||
2,
|
||||
19
|
||||
],
|
||||
"Q": [
|
||||
2,
|
||||
20
|
||||
],
|
||||
"R": [
|
||||
2,
|
||||
21
|
||||
],
|
||||
"S": [
|
||||
2,
|
||||
22
|
||||
],
|
||||
"T": [
|
||||
2,
|
||||
23
|
||||
],
|
||||
"U": [
|
||||
2,
|
||||
24
|
||||
],
|
||||
"V": [
|
||||
2,
|
||||
25
|
||||
],
|
||||
"W": [
|
||||
2,
|
||||
26
|
||||
],
|
||||
"X": [
|
||||
2,
|
||||
27
|
||||
],
|
||||
"Y": [
|
||||
2,
|
||||
28
|
||||
],
|
||||
"Z": [
|
||||
2,
|
||||
29
|
||||
],
|
||||
"1": [
|
||||
0,
|
||||
30
|
||||
],
|
||||
"2": [
|
||||
0,
|
||||
31
|
||||
],
|
||||
"3": [
|
||||
0,
|
||||
32
|
||||
],
|
||||
"4": [
|
||||
0,
|
||||
33
|
||||
],
|
||||
"5": [
|
||||
0,
|
||||
34
|
||||
],
|
||||
"6": [
|
||||
0,
|
||||
35
|
||||
],
|
||||
"7": [
|
||||
0,
|
||||
36
|
||||
],
|
||||
"8": [
|
||||
0,
|
||||
37
|
||||
],
|
||||
"9": [
|
||||
0,
|
||||
38
|
||||
],
|
||||
"0": [
|
||||
0,
|
||||
39
|
||||
],
|
||||
"!": [
|
||||
2,
|
||||
30
|
||||
],
|
||||
"@": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"#": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"$": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
"%": [
|
||||
2,
|
||||
34
|
||||
],
|
||||
"^": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"&": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"*": [
|
||||
2,
|
||||
37
|
||||
],
|
||||
"(": [
|
||||
2,
|
||||
38
|
||||
],
|
||||
")": [
|
||||
2,
|
||||
39
|
||||
],
|
||||
"\n": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\r": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\t": [
|
||||
0,
|
||||
43
|
||||
],
|
||||
" ": [
|
||||
0,
|
||||
44
|
||||
],
|
||||
"-": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"_": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"=": [
|
||||
0,
|
||||
46
|
||||
],
|
||||
"+": [
|
||||
2,
|
||||
46
|
||||
],
|
||||
"[": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"{": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"]": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"}": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"\\": [
|
||||
0,
|
||||
100
|
||||
],
|
||||
"|": [
|
||||
2,
|
||||
100
|
||||
],
|
||||
";": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
":": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"'": [
|
||||
0,
|
||||
52
|
||||
],
|
||||
"\"": [
|
||||
2,
|
||||
31
|
||||
],
|
||||
"`": [
|
||||
0,
|
||||
53
|
||||
],
|
||||
"~": [
|
||||
0,
|
||||
50
|
||||
],
|
||||
",": [
|
||||
0,
|
||||
54
|
||||
],
|
||||
"<": [
|
||||
2,
|
||||
54
|
||||
],
|
||||
".": [
|
||||
0,
|
||||
55
|
||||
],
|
||||
">": [
|
||||
2,
|
||||
55
|
||||
],
|
||||
"/": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"?": [
|
||||
2,
|
||||
56
|
||||
],
|
||||
"£": [
|
||||
2,
|
||||
32
|
||||
]
|
||||
}
|
||||
394
loki/layouts/zh.json
Normal file
394
loki/layouts/zh.json
Normal file
@@ -0,0 +1,394 @@
|
||||
{
|
||||
"a": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"b": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"c": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"d": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"e": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"f": [
|
||||
0,
|
||||
9
|
||||
],
|
||||
"g": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"h": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"i": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"j": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"k": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"l": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"m": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"n": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"o": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"p": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"q": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"r": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"s": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"t": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"u": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"v": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"w": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"x": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"y": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"z": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"A": [
|
||||
2,
|
||||
4
|
||||
],
|
||||
"B": [
|
||||
2,
|
||||
5
|
||||
],
|
||||
"C": [
|
||||
2,
|
||||
6
|
||||
],
|
||||
"D": [
|
||||
2,
|
||||
7
|
||||
],
|
||||
"E": [
|
||||
2,
|
||||
8
|
||||
],
|
||||
"F": [
|
||||
2,
|
||||
9
|
||||
],
|
||||
"G": [
|
||||
2,
|
||||
10
|
||||
],
|
||||
"H": [
|
||||
2,
|
||||
11
|
||||
],
|
||||
"I": [
|
||||
2,
|
||||
12
|
||||
],
|
||||
"J": [
|
||||
2,
|
||||
13
|
||||
],
|
||||
"K": [
|
||||
2,
|
||||
14
|
||||
],
|
||||
"L": [
|
||||
2,
|
||||
15
|
||||
],
|
||||
"M": [
|
||||
2,
|
||||
16
|
||||
],
|
||||
"N": [
|
||||
2,
|
||||
17
|
||||
],
|
||||
"O": [
|
||||
2,
|
||||
18
|
||||
],
|
||||
"P": [
|
||||
2,
|
||||
19
|
||||
],
|
||||
"Q": [
|
||||
2,
|
||||
20
|
||||
],
|
||||
"R": [
|
||||
2,
|
||||
21
|
||||
],
|
||||
"S": [
|
||||
2,
|
||||
22
|
||||
],
|
||||
"T": [
|
||||
2,
|
||||
23
|
||||
],
|
||||
"U": [
|
||||
2,
|
||||
24
|
||||
],
|
||||
"V": [
|
||||
2,
|
||||
25
|
||||
],
|
||||
"W": [
|
||||
2,
|
||||
26
|
||||
],
|
||||
"X": [
|
||||
2,
|
||||
27
|
||||
],
|
||||
"Y": [
|
||||
2,
|
||||
28
|
||||
],
|
||||
"Z": [
|
||||
2,
|
||||
29
|
||||
],
|
||||
"1": [
|
||||
0,
|
||||
30
|
||||
],
|
||||
"2": [
|
||||
0,
|
||||
31
|
||||
],
|
||||
"3": [
|
||||
0,
|
||||
32
|
||||
],
|
||||
"4": [
|
||||
0,
|
||||
33
|
||||
],
|
||||
"5": [
|
||||
0,
|
||||
34
|
||||
],
|
||||
"6": [
|
||||
0,
|
||||
35
|
||||
],
|
||||
"7": [
|
||||
0,
|
||||
36
|
||||
],
|
||||
"8": [
|
||||
0,
|
||||
37
|
||||
],
|
||||
"9": [
|
||||
0,
|
||||
38
|
||||
],
|
||||
"0": [
|
||||
0,
|
||||
39
|
||||
],
|
||||
"!": [
|
||||
2,
|
||||
30
|
||||
],
|
||||
"@": [
|
||||
2,
|
||||
31
|
||||
],
|
||||
"#": [
|
||||
2,
|
||||
32
|
||||
],
|
||||
"$": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
"%": [
|
||||
2,
|
||||
34
|
||||
],
|
||||
"^": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"&": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"*": [
|
||||
2,
|
||||
37
|
||||
],
|
||||
"(": [
|
||||
2,
|
||||
38
|
||||
],
|
||||
")": [
|
||||
2,
|
||||
39
|
||||
],
|
||||
"\n": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\r": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\t": [
|
||||
0,
|
||||
43
|
||||
],
|
||||
" ": [
|
||||
0,
|
||||
44
|
||||
],
|
||||
"-": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"_": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"=": [
|
||||
0,
|
||||
46
|
||||
],
|
||||
"+": [
|
||||
2,
|
||||
46
|
||||
],
|
||||
"[": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"{": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"]": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"}": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"\\": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"|": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
";": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
":": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"'": [
|
||||
0,
|
||||
52
|
||||
],
|
||||
"\"": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"`": [
|
||||
0,
|
||||
53
|
||||
],
|
||||
"~": [
|
||||
2,
|
||||
53
|
||||
],
|
||||
",": [
|
||||
0,
|
||||
54
|
||||
],
|
||||
"<": [
|
||||
2,
|
||||
54
|
||||
],
|
||||
".": [
|
||||
0,
|
||||
55
|
||||
],
|
||||
">": [
|
||||
2,
|
||||
55
|
||||
],
|
||||
"/": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"?": [
|
||||
2,
|
||||
56
|
||||
]
|
||||
}
|
||||
333
mcp_server.py
Normal file
333
mcp_server.py
Normal file
@@ -0,0 +1,333 @@
|
||||
# mcp_server.py
|
||||
# Model Context Protocol server for Bjorn.
|
||||
# Exposes Bjorn's database and actions as MCP tools consumable by any MCP client
|
||||
# (Claude Desktop, custom agents, etc.).
|
||||
#
|
||||
# Transport: HTTP SSE (default, port configurable) or stdio.
|
||||
# Requires: pip install mcp
|
||||
# Gracefully no-ops if mcp is not installed.
|
||||
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="mcp_server.py", level=20)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Lazy shared_data import (avoids circular imports at module level)
|
||||
# ---------------------------------------------------------------------------
|
||||
_shared_data = None
|
||||
|
||||
def _sd():
|
||||
global _shared_data
|
||||
if _shared_data is None:
|
||||
from init_shared import shared_data
|
||||
_shared_data = shared_data
|
||||
return _shared_data
|
||||
|
||||
|
||||
def _tool_allowed(name: str) -> bool:
|
||||
allowed = _sd().config.get("mcp_allowed_tools", [])
|
||||
return name in allowed
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tool implementations (pure functions, no MCP deps)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _impl_get_hosts(alive_only: bool = True) -> str:
|
||||
try:
|
||||
sql = "SELECT ip, mac, hostname, os, alive, ports_open FROM hosts"
|
||||
if alive_only:
|
||||
sql += " WHERE alive=1"
|
||||
sql += " ORDER BY ip"
|
||||
rows = _sd().db.query(sql, ())
|
||||
result = [dict(r) for r in rows] if rows else []
|
||||
return json.dumps(result, default=str)
|
||||
except Exception as e:
|
||||
return json.dumps({"error": str(e)})
|
||||
|
||||
|
||||
def _impl_get_vulnerabilities(host_ip: Optional[str] = None, limit: int = 100) -> str:
|
||||
try:
|
||||
if host_ip:
|
||||
sql = ("SELECT v.ip, v.port, v.cve_id, v.severity, v.description "
|
||||
"FROM vulnerabilities v WHERE v.ip=? ORDER BY v.severity DESC LIMIT ?")
|
||||
rows = _sd().db.query(sql, (host_ip, limit))
|
||||
else:
|
||||
sql = ("SELECT v.ip, v.port, v.cve_id, v.severity, v.description "
|
||||
"FROM vulnerabilities v ORDER BY v.severity DESC LIMIT ?")
|
||||
rows = _sd().db.query(sql, (limit,))
|
||||
return json.dumps([dict(r) for r in rows] if rows else [], default=str)
|
||||
except Exception as e:
|
||||
return json.dumps({"error": str(e)})
|
||||
|
||||
|
||||
def _impl_get_credentials(service: Optional[str] = None, limit: int = 100) -> str:
|
||||
try:
|
||||
if service:
|
||||
sql = ("SELECT ip, port, service, username, password, found_at "
|
||||
"FROM credentials WHERE service=? ORDER BY found_at DESC LIMIT ?")
|
||||
rows = _sd().db.query(sql, (service, limit))
|
||||
else:
|
||||
sql = ("SELECT ip, port, service, username, password, found_at "
|
||||
"FROM credentials ORDER BY found_at DESC LIMIT ?")
|
||||
rows = _sd().db.query(sql, (limit,))
|
||||
return json.dumps([dict(r) for r in rows] if rows else [], default=str)
|
||||
except Exception as e:
|
||||
return json.dumps({"error": str(e)})
|
||||
|
||||
|
||||
def _impl_get_action_history(limit: int = 50, action_name: Optional[str] = None) -> str:
|
||||
try:
|
||||
if action_name:
|
||||
sql = ("SELECT action_name, target_ip, status, result, started_at, finished_at "
|
||||
"FROM action_history WHERE action_name=? ORDER BY started_at DESC LIMIT ?")
|
||||
rows = _sd().db.query(sql, (action_name, limit))
|
||||
else:
|
||||
sql = ("SELECT action_name, target_ip, status, result, started_at, finished_at "
|
||||
"FROM action_history ORDER BY started_at DESC LIMIT ?")
|
||||
rows = _sd().db.query(sql, (limit,))
|
||||
return json.dumps([dict(r) for r in rows] if rows else [], default=str)
|
||||
except Exception as e:
|
||||
return json.dumps({"error": str(e)})
|
||||
|
||||
|
||||
def _impl_get_status() -> str:
|
||||
try:
|
||||
sd = _sd()
|
||||
return json.dumps({
|
||||
"operation_mode": sd.operation_mode,
|
||||
"active_action": getattr(sd, "active_action", None),
|
||||
"bjorn_status": getattr(sd, "bjorn_status_text", "IDLE"),
|
||||
"bjorn_says": getattr(sd, "bjorn_says", ""),
|
||||
"hosts_discovered": getattr(sd, "target_count", 0),
|
||||
"vulnerabilities": getattr(sd, "vuln_count", 0),
|
||||
"credentials": getattr(sd, "cred_count", 0),
|
||||
"current_ip": getattr(sd, "current_ip", ""),
|
||||
"current_ssid": getattr(sd, "current_ssid", ""),
|
||||
})
|
||||
except Exception as e:
|
||||
return json.dumps({"error": str(e)})
|
||||
|
||||
|
||||
_MCP_PRIORITY = 80 # Higher than scheduler default (40) and queue_action default (50)
|
||||
|
||||
|
||||
def _impl_run_action(action_name: str, target_ip: str, target_mac: str = "") -> str:
|
||||
"""Queue a Bjorn action with MCP priority boost. Returns queue confirmation."""
|
||||
try:
|
||||
sd = _sd()
|
||||
|
||||
# Resolve MAC from IP if not supplied
|
||||
mac = target_mac or ""
|
||||
if not mac and target_ip:
|
||||
try:
|
||||
row = sd.db.query_one(
|
||||
"SELECT mac_address FROM hosts WHERE ip=? LIMIT 1", (target_ip,)
|
||||
)
|
||||
if row:
|
||||
mac = row["mac_address"]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
sd.db.queue_action(
|
||||
action_name=action_name,
|
||||
mac=mac,
|
||||
ip=target_ip,
|
||||
priority=_MCP_PRIORITY,
|
||||
trigger="mcp",
|
||||
metadata={"decision_method": "mcp", "decision_origin": "mcp"},
|
||||
)
|
||||
|
||||
# Wake the orchestrator immediately (it sleeps up to 5 s when idle)
|
||||
try:
|
||||
sd.queue_event.set()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return json.dumps({
|
||||
"status": "queued",
|
||||
"action": action_name,
|
||||
"target": target_ip,
|
||||
"priority": _MCP_PRIORITY,
|
||||
})
|
||||
except Exception as e:
|
||||
return json.dumps({"error": str(e)})
|
||||
|
||||
|
||||
def _impl_query_db(sql: str, params: Optional[List] = None) -> str:
|
||||
"""Run a read-only SELECT query. Non-SELECT statements are rejected."""
|
||||
try:
|
||||
stripped = sql.strip().upper()
|
||||
if not stripped.startswith("SELECT"):
|
||||
return json.dumps({"error": "Only SELECT queries are allowed."})
|
||||
rows = _sd().db.query(sql, tuple(params or []))
|
||||
return json.dumps([dict(r) for r in rows] if rows else [], default=str)
|
||||
except Exception as e:
|
||||
return json.dumps({"error": str(e)})
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MCP Server setup (requires `pip install mcp`)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _build_mcp_server():
|
||||
"""Build and return a FastMCP server instance, or None if mcp not available."""
|
||||
try:
|
||||
from mcp.server.fastmcp import FastMCP
|
||||
except ImportError:
|
||||
logger.warning("mcp package not installed — MCP server disabled. "
|
||||
"Run: pip install mcp")
|
||||
return None
|
||||
|
||||
mcp = FastMCP(
|
||||
name="bjorn",
|
||||
version="1.0.0",
|
||||
instructions=(
|
||||
"Bjorn is a Raspberry Pi network security tool. "
|
||||
"Use these tools to query discovered hosts, vulnerabilities, credentials, "
|
||||
"and action history, or to queue new actions."
|
||||
),
|
||||
)
|
||||
|
||||
# ---- Tool registrations ----------------------------------------
|
||||
|
||||
@mcp.tool()
|
||||
def get_hosts(alive_only: bool = True) -> str:
|
||||
"""Return all network hosts discovered by Bjorn's scanner.
|
||||
Set alive_only=false to include hosts that are currently offline."""
|
||||
if not _tool_allowed("get_hosts"):
|
||||
return json.dumps({"error": "Tool disabled in Bjorn MCP config."})
|
||||
return _impl_get_hosts(alive_only)
|
||||
|
||||
@mcp.tool()
|
||||
def get_vulnerabilities(host_ip: str = "", limit: int = 100) -> str:
|
||||
"""Return discovered vulnerabilities. Optionally filter by host_ip."""
|
||||
if not _tool_allowed("get_vulnerabilities"):
|
||||
return json.dumps({"error": "Tool disabled in Bjorn MCP config."})
|
||||
return _impl_get_vulnerabilities(host_ip or None, limit)
|
||||
|
||||
@mcp.tool()
|
||||
def get_credentials(service: str = "", limit: int = 100) -> str:
|
||||
"""Return captured credentials. Optionally filter by service (ssh, ftp, smb…)."""
|
||||
if not _tool_allowed("get_credentials"):
|
||||
return json.dumps({"error": "Tool disabled in Bjorn MCP config."})
|
||||
return _impl_get_credentials(service or None, limit)
|
||||
|
||||
@mcp.tool()
|
||||
def get_action_history(limit: int = 50, action_name: str = "") -> str:
|
||||
"""Return the history of executed actions, most recent first."""
|
||||
if not _tool_allowed("get_action_history"):
|
||||
return json.dumps({"error": "Tool disabled in Bjorn MCP config."})
|
||||
return _impl_get_action_history(limit, action_name or None)
|
||||
|
||||
@mcp.tool()
|
||||
def get_status() -> str:
|
||||
"""Return Bjorn's current operational status, counters, and active action."""
|
||||
if not _tool_allowed("get_status"):
|
||||
return json.dumps({"error": "Tool disabled in Bjorn MCP config."})
|
||||
return _impl_get_status()
|
||||
|
||||
@mcp.tool()
|
||||
def run_action(action_name: str, target_ip: str, target_mac: str = "") -> str:
|
||||
"""Queue a Bjorn action (e.g. ssh_bruteforce) against target_ip.
|
||||
The action will be executed by Bjorn's orchestrator."""
|
||||
if not _tool_allowed("run_action"):
|
||||
return json.dumps({"error": "Tool disabled in Bjorn MCP config."})
|
||||
return _impl_run_action(action_name, target_ip, target_mac)
|
||||
|
||||
@mcp.tool()
|
||||
def query_db(sql: str, params: str = "[]") -> str:
|
||||
"""Run a read-only SELECT query against Bjorn's SQLite database.
|
||||
params must be a JSON array of bind parameters."""
|
||||
if not _tool_allowed("query_db"):
|
||||
return json.dumps({"error": "Tool disabled in Bjorn MCP config."})
|
||||
try:
|
||||
p = json.loads(params)
|
||||
except Exception:
|
||||
p = []
|
||||
return _impl_query_db(sql, p)
|
||||
|
||||
return mcp
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Server lifecycle
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_server_thread: Optional[threading.Thread] = None
|
||||
_mcp_instance = None
|
||||
|
||||
|
||||
def start(block: bool = False) -> bool:
|
||||
"""
|
||||
Start the MCP server in a daemon thread.
|
||||
|
||||
Args:
|
||||
block: If True, run in the calling thread (for stdio mode).
|
||||
|
||||
Returns:
|
||||
True if started successfully, False otherwise.
|
||||
"""
|
||||
global _server_thread, _mcp_instance
|
||||
|
||||
sd = _sd()
|
||||
if not sd.config.get("mcp_enabled", False):
|
||||
logger.debug("MCP server disabled in config (mcp_enabled=False)")
|
||||
return False
|
||||
|
||||
mcp = _build_mcp_server()
|
||||
if mcp is None:
|
||||
return False
|
||||
|
||||
_mcp_instance = mcp
|
||||
transport = sd.config.get("mcp_transport", "http")
|
||||
port = int(sd.config.get("mcp_port", 8765))
|
||||
|
||||
def _run():
|
||||
try:
|
||||
if transport == "stdio":
|
||||
logger.info("MCP server starting (stdio transport)")
|
||||
mcp.run(transport="stdio")
|
||||
else:
|
||||
logger.info(f"MCP server starting (HTTP SSE transport, port {port})")
|
||||
# FastMCP HTTP SSE — runs uvicorn internally
|
||||
mcp.run(transport="sse", port=port)
|
||||
except Exception as e:
|
||||
logger.error(f"MCP server error: {e}")
|
||||
|
||||
if block:
|
||||
_run()
|
||||
return True
|
||||
|
||||
_server_thread = threading.Thread(target=_run, daemon=True, name="MCPServer")
|
||||
_server_thread.start()
|
||||
logger.info(f"MCP server thread started (transport={transport})")
|
||||
return True
|
||||
|
||||
|
||||
def stop() -> None:
|
||||
"""Signal MCP server to stop (best-effort — FastMCP handles cleanup)."""
|
||||
global _server_thread
|
||||
if _server_thread and _server_thread.is_alive():
|
||||
logger.info("MCP server thread stopping (daemon — will exit with process)")
|
||||
_server_thread = None
|
||||
|
||||
|
||||
def is_running() -> bool:
|
||||
return _server_thread is not None and _server_thread.is_alive()
|
||||
|
||||
|
||||
def server_status() -> Dict[str, Any]:
|
||||
sd = _sd()
|
||||
return {
|
||||
"enabled": sd.config.get("mcp_enabled", False),
|
||||
"running": is_running(),
|
||||
"transport": sd.config.get("mcp_transport", "http"),
|
||||
"port": sd.config.get("mcp_port", 8765),
|
||||
"allowed_tools": sd.config.get("mcp_allowed_tools", []),
|
||||
}
|
||||
@@ -70,9 +70,17 @@ class Orchestrator:
|
||||
self.data_consolidator = None
|
||||
self.ai_enabled = False
|
||||
|
||||
# ┌─────────────────────────────────────────────────────────┐
|
||||
# │ LLM Orchestrator (advisor / autonomous) │
|
||||
# └─────────────────────────────────────────────────────────┘
|
||||
self.llm_orchestrator = None
|
||||
self._init_llm_orchestrator()
|
||||
|
||||
# Load all available actions
|
||||
self.load_actions()
|
||||
logger.info(f"Actions loaded: {list(self.actions.keys())}")
|
||||
# Expose loaded action names so LLM orchestrator can discover them
|
||||
self.shared_data.loaded_action_names = list(self.actions.keys())
|
||||
|
||||
def _is_enabled_value(self, value: Any) -> bool:
|
||||
"""Robust parser for b_enabled values coming from DB."""
|
||||
@@ -218,6 +226,35 @@ class Orchestrator:
|
||||
interval_s=300.0,
|
||||
)
|
||||
|
||||
def _init_llm_orchestrator(self) -> None:
|
||||
"""Initialise LLMOrchestrator if a mode is configured and LLM is enabled."""
|
||||
try:
|
||||
mode = self.shared_data.config.get("llm_orchestrator_mode", "none")
|
||||
enabled = self.shared_data.config.get("llm_enabled", False)
|
||||
if mode == "none" or not enabled:
|
||||
return
|
||||
from llm_orchestrator import LLMOrchestrator
|
||||
self.llm_orchestrator = LLMOrchestrator(self.shared_data)
|
||||
self.llm_orchestrator.start()
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM Orchestrator init skipped: {e}")
|
||||
|
||||
def _sync_llm_orchestrator(self) -> None:
|
||||
"""React to runtime changes of llm_orchestrator_mode / llm_enabled."""
|
||||
mode = self.shared_data.config.get("llm_orchestrator_mode", "none")
|
||||
enabled = self.shared_data.config.get("llm_enabled", False)
|
||||
|
||||
if mode == "none" or not enabled:
|
||||
if self.llm_orchestrator:
|
||||
self.llm_orchestrator.stop()
|
||||
self.llm_orchestrator = None
|
||||
return
|
||||
|
||||
if self.llm_orchestrator is None:
|
||||
self._init_llm_orchestrator()
|
||||
else:
|
||||
self.llm_orchestrator.restart_if_mode_changed()
|
||||
|
||||
def _disable_ai_components(self) -> None:
|
||||
"""Drop AI-specific helpers when leaving AI mode.
|
||||
FeatureLogger is kept alive so AUTO mode still collects data."""
|
||||
@@ -765,6 +802,7 @@ class Orchestrator:
|
||||
try:
|
||||
# Allow live mode switching from the UI without restarting the process.
|
||||
self._sync_ai_components()
|
||||
self._sync_llm_orchestrator()
|
||||
|
||||
# Get next action from queue
|
||||
next_action = self.get_next_action()
|
||||
@@ -827,6 +865,9 @@ class Orchestrator:
|
||||
self.shared_data.update_status("IDLE", "")
|
||||
|
||||
# Cleanup on exit (OUTSIDE while loop)
|
||||
if self.llm_orchestrator:
|
||||
self.llm_orchestrator.stop()
|
||||
|
||||
if self.scheduler:
|
||||
self.scheduler.stop()
|
||||
self.shared_data.queue_event.set()
|
||||
@@ -839,6 +880,13 @@ class Orchestrator:
|
||||
|
||||
def _process_background_tasks(self):
|
||||
"""Run periodic tasks like consolidation, upload retries, and model updates (AI mode only)."""
|
||||
# LLM advisor mode — runs regardless of AI mode
|
||||
if self.llm_orchestrator and self.shared_data.config.get("llm_orchestrator_mode") == "advisor":
|
||||
try:
|
||||
self.llm_orchestrator.advise()
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM advisor background call error: {e}")
|
||||
|
||||
if not (self.ai_enabled and self.shared_data.operation_mode == "AI"):
|
||||
return
|
||||
|
||||
|
||||
@@ -9,3 +9,9 @@ pysmb==1.2.10
|
||||
pymysql==1.1.1
|
||||
sqlalchemy==2.0.36
|
||||
python-nmap==0.7.1
|
||||
|
||||
# ── LLM / MCP / Discovery ─────────────────────────────────────────────
|
||||
zeroconf>=0.131.0 # LaRuche/LAND auto-discovery via mDNS
|
||||
#
|
||||
# Optional — install to unlock extra features:
|
||||
# mcp[cli]>=1.0.0 # MCP server (pip install "mcp[cli]")
|
||||
|
||||
50
shared.py
50
shared.py
@@ -488,6 +488,56 @@ class SharedData:
|
||||
"loki_typing_speed_max": 0,
|
||||
"loki_scripts_path": "/root/loki/scripts",
|
||||
"loki_auto_run": "",
|
||||
|
||||
# LLM Bridge
|
||||
"__title_llm__": "LLM Bridge",
|
||||
"llm_enabled": False,
|
||||
"llm_comments_enabled": False, # Use LLM to generate EPD comments (fallback to DB if disabled/fails)
|
||||
"llm_comments_log": False, # Log LLM-generated EPD comments to logger.info
|
||||
"llm_chat_enabled": True, # Enable /chat.html interface
|
||||
"llm_backend": "auto", # auto | laruche | ollama | api
|
||||
"llm_laruche_discovery": True, # Auto-discover LaRuche nodes via mDNS
|
||||
"llm_laruche_url": "", # Manual LaRuche node URL (overrides discovery)
|
||||
"llm_laruche_model": "", # Model to use on LaRuche (empty = node default)
|
||||
"llm_ollama_url": "http://127.0.0.1:11434",
|
||||
"llm_ollama_model": "phi3:mini",
|
||||
"llm_api_provider": "anthropic", # anthropic | openai | openrouter
|
||||
"llm_api_key": "",
|
||||
"llm_api_model": "claude-haiku-4-5-20251001",
|
||||
"llm_api_base_url": "", # Custom base URL (OpenRouter / local proxy)
|
||||
"llm_timeout_s": 30,
|
||||
"llm_max_tokens": 500,
|
||||
"llm_comment_max_tokens": 80, # Keep short for EPD display
|
||||
"llm_chat_history_size": 20,
|
||||
"llm_chat_tools_enabled": False, # Enable MCP tool-calling from chat UI
|
||||
|
||||
# LLM Orchestrator
|
||||
"__title_llm_orch__": "LLM Orchestrator",
|
||||
"llm_orchestrator_mode": "none", # none | advisor | autonomous
|
||||
"llm_orchestrator_interval_s": 60, # Seconds between autonomous cycles
|
||||
"llm_orchestrator_max_actions": 3, # Max actions queued per autonomous cycle
|
||||
"llm_orchestrator_allowed_actions": [], # Whitelist (empty = inherit mcp_allowed_tools)
|
||||
"llm_orchestrator_skip_scheduler": False, # True = disable scheduler trigger eval (LLM-only mode)
|
||||
"llm_orchestrator_skip_if_no_change": True, # True = skip LLM cycle when nothing new (save tokens)
|
||||
"llm_orchestrator_log_reasoning": False, # True = log full LLM reasoning + push to chat history
|
||||
|
||||
# MCP Server
|
||||
"__title_mcp__": "MCP Server",
|
||||
"mcp_enabled": False,
|
||||
"mcp_transport": "http", # http | stdio
|
||||
"mcp_port": 8765,
|
||||
"mcp_allowed_tools": [
|
||||
"get_hosts", "get_vulnerabilities", "get_credentials",
|
||||
"get_action_history", "get_status", "run_action", "query_db"
|
||||
],
|
||||
|
||||
# EPD Buttons (disabled by default — not all users have buttons)
|
||||
"__title_epd_buttons__": "EPD Buttons",
|
||||
"epd_buttons_enabled": False,
|
||||
"epd_button_a_pin": 5,
|
||||
"epd_button_b_pin": 6,
|
||||
"epd_button_c_pin": 13,
|
||||
"epd_button_d_pin": 19,
|
||||
}
|
||||
|
||||
@property
|
||||
|
||||
1
utils.py
1
utils.py
@@ -25,6 +25,7 @@ class WebUtils:
|
||||
"sentinel": ("web_utils.sentinel_utils", "SentinelUtils"),
|
||||
"bifrost": ("web_utils.bifrost_utils", "BifrostUtils"),
|
||||
"loki": ("web_utils.loki_utils", "LokiUtils"),
|
||||
"llm_utils": ("web_utils.llm_utils", "LLMUtils"),
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -17,3 +17,4 @@
|
||||
@import url("./pages/sentinel.css");
|
||||
@import url("./pages/bifrost.css");
|
||||
@import url("./pages/loki.css");
|
||||
@import url("./pages/llm.css");
|
||||
|
||||
425
web/css/pages/llm.css
Normal file
425
web/css/pages/llm.css
Normal file
@@ -0,0 +1,425 @@
|
||||
/* ==========================================================================
|
||||
llm.css — LLM Chat & LLM Config SPA pages
|
||||
========================================================================== */
|
||||
|
||||
/* ── LLM Chat ─────────────────────────────────────────────────────────── */
|
||||
|
||||
.llmc-page {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
height: calc(100vh - var(--h-topbar, 56px) - var(--h-bottombar, 56px));
|
||||
font-family: 'Courier New', Courier, monospace;
|
||||
background: var(--bg);
|
||||
color: var(--ink);
|
||||
min-height: 0;
|
||||
}
|
||||
|
||||
.llmc-header {
|
||||
background: var(--c-panel);
|
||||
border-bottom: 1px solid var(--c-border);
|
||||
padding: 10px 16px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.llmc-dot {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
border-radius: 50%;
|
||||
background: var(--muted-off);
|
||||
flex-shrink: 0;
|
||||
}
|
||||
.llmc-dot.online { background: var(--ok); }
|
||||
.llmc-dot.offline { background: var(--danger); }
|
||||
|
||||
.llmc-title {
|
||||
font-size: 13px;
|
||||
color: var(--acid);
|
||||
letter-spacing: 2px;
|
||||
font-weight: 700;
|
||||
}
|
||||
|
||||
.llmc-status {
|
||||
font-size: 11px;
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.llmc-btn-ghost {
|
||||
background: transparent;
|
||||
border: 1px solid var(--c-border);
|
||||
color: var(--muted);
|
||||
padding: 3px 10px;
|
||||
font-size: 11px;
|
||||
cursor: pointer;
|
||||
font-family: inherit;
|
||||
transition: border-color .15s, color .15s;
|
||||
}
|
||||
.llmc-btn-ghost:hover { border-color: var(--acid); color: var(--ink); }
|
||||
.llmc-btn-ghost.active { border-color: var(--accent-2); color: var(--accent-2); background: color-mix(in oklab, var(--accent-2) 8%, transparent); }
|
||||
|
||||
.llmc-clear-btn { margin-left: auto; }
|
||||
|
||||
.llmc-messages {
|
||||
flex: 1;
|
||||
overflow-y: auto;
|
||||
padding: 14px 16px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 10px;
|
||||
min-height: 0;
|
||||
}
|
||||
|
||||
.llmc-msg {
|
||||
max-width: 88%;
|
||||
padding: 9px 13px;
|
||||
border-radius: 4px;
|
||||
font-size: 12px;
|
||||
line-height: 1.55;
|
||||
white-space: pre-wrap;
|
||||
word-break: break-word;
|
||||
}
|
||||
.llmc-msg.user {
|
||||
background: color-mix(in oklab, var(--ok) 8%, transparent);
|
||||
border: 1px solid color-mix(in oklab, var(--ok) 25%, transparent);
|
||||
align-self: flex-end;
|
||||
color: color-mix(in oklab, var(--ok) 85%, var(--ink) 15%);
|
||||
}
|
||||
.llmc-msg.assistant {
|
||||
background: color-mix(in oklab, var(--accent-2) 6%, transparent);
|
||||
border: 1px solid color-mix(in oklab, var(--accent-2) 20%, transparent);
|
||||
align-self: flex-start;
|
||||
}
|
||||
.llmc-msg.system {
|
||||
background: transparent;
|
||||
border: 1px dashed var(--c-border);
|
||||
align-self: center;
|
||||
color: var(--muted);
|
||||
font-size: 11px;
|
||||
padding: 5px 12px;
|
||||
}
|
||||
|
||||
.llmc-msg-role {
|
||||
font-size: 10px;
|
||||
color: var(--muted);
|
||||
margin-bottom: 3px;
|
||||
letter-spacing: 1px;
|
||||
}
|
||||
.llmc-msg.user .llmc-msg-role { color: color-mix(in oklab, var(--ok) 70%, var(--muted)); }
|
||||
.llmc-msg.assistant .llmc-msg-role { color: var(--acid); }
|
||||
|
||||
.llmc-thinking {
|
||||
align-self: flex-start;
|
||||
color: var(--muted);
|
||||
font-size: 12px;
|
||||
padding: 6px 16px;
|
||||
border-left: 2px solid var(--danger);
|
||||
font-family: 'Courier New', monospace;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.llmc-disabled-msg {
|
||||
text-align: center;
|
||||
padding: 20px;
|
||||
color: var(--muted);
|
||||
font-size: 12px;
|
||||
}
|
||||
.llmc-disabled-msg a { color: var(--acid); }
|
||||
|
||||
.llmc-input-row {
|
||||
background: var(--c-panel);
|
||||
border-top: 1px solid var(--c-border);
|
||||
padding: 10px 12px;
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.llmc-input {
|
||||
flex: 1;
|
||||
background: var(--bg);
|
||||
border: 1px solid var(--c-border);
|
||||
color: var(--ink);
|
||||
padding: 9px 12px;
|
||||
font-family: 'Courier New', Courier, monospace;
|
||||
font-size: 12px;
|
||||
resize: none;
|
||||
outline: none;
|
||||
height: 44px;
|
||||
max-height: 120px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
.llmc-input:focus { border-color: var(--acid); }
|
||||
|
||||
.llmc-send-btn {
|
||||
background: var(--danger);
|
||||
border: none;
|
||||
color: white;
|
||||
padding: 0 16px;
|
||||
font-size: 13px;
|
||||
cursor: pointer;
|
||||
font-family: inherit;
|
||||
letter-spacing: 1px;
|
||||
transition: background .15s;
|
||||
}
|
||||
.llmc-send-btn:hover { background: color-mix(in oklab, var(--danger) 80%, white 20%); }
|
||||
.llmc-send-btn:disabled { background: var(--muted-off); cursor: not-allowed; }
|
||||
|
||||
/* ── LLM Config ───────────────────────────────────────────────────────── */
|
||||
|
||||
.llmcfg-page {
|
||||
font-family: 'Courier New', Courier, monospace;
|
||||
color: var(--ink);
|
||||
background: var(--bg);
|
||||
min-height: calc(100vh - var(--h-topbar, 56px) - var(--h-bottombar, 56px));
|
||||
}
|
||||
|
||||
.llmcfg-header {
|
||||
background: var(--c-panel);
|
||||
border-bottom: 1px solid var(--c-border);
|
||||
padding: 12px 20px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.llmcfg-title {
|
||||
font-size: 13px;
|
||||
color: var(--acid);
|
||||
letter-spacing: 2px;
|
||||
font-weight: 700;
|
||||
}
|
||||
|
||||
.llmcfg-nav-link {
|
||||
margin-left: auto;
|
||||
color: var(--muted);
|
||||
text-decoration: none;
|
||||
font-size: 11px;
|
||||
}
|
||||
.llmcfg-nav-link:hover { color: var(--ink); }
|
||||
|
||||
.llmcfg-container {
|
||||
max-width: 780px;
|
||||
margin: 0 auto;
|
||||
padding: 20px 16px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 18px;
|
||||
}
|
||||
|
||||
.llmcfg-section {
|
||||
background: var(--c-panel);
|
||||
border: 1px solid var(--c-border);
|
||||
border-radius: 4px;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.llmcfg-section-title {
|
||||
padding: 10px 14px;
|
||||
background: var(--c-panel-2);
|
||||
border-bottom: 1px solid var(--c-border);
|
||||
font-size: 11px;
|
||||
letter-spacing: 2px;
|
||||
color: var(--acid);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.llmcfg-badge {
|
||||
font-size: 10px;
|
||||
padding: 1px 8px;
|
||||
border-radius: 2px;
|
||||
}
|
||||
.llmcfg-badge.on { background: var(--ok); color: #000; }
|
||||
.llmcfg-badge.off { background: var(--c-border); color: var(--muted); }
|
||||
|
||||
.llmcfg-body {
|
||||
padding: 14px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.llmcfg-subsection-title {
|
||||
font-size: 10px;
|
||||
letter-spacing: 2px;
|
||||
color: var(--muted);
|
||||
border-top: 1px solid var(--c-border);
|
||||
padding-top: 10px;
|
||||
margin-top: 2px;
|
||||
}
|
||||
|
||||
.llmcfg-field {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
.llmcfg-label {
|
||||
font-size: 10px;
|
||||
color: var(--muted);
|
||||
letter-spacing: 1px;
|
||||
}
|
||||
|
||||
.llmcfg-input,
|
||||
.llmcfg-select {
|
||||
background: var(--bg);
|
||||
border: 1px solid var(--c-border);
|
||||
color: var(--ink);
|
||||
padding: 7px 10px;
|
||||
font-family: inherit;
|
||||
font-size: 11px;
|
||||
outline: none;
|
||||
width: 100%;
|
||||
}
|
||||
.llmcfg-input:focus,
|
||||
.llmcfg-select:focus { border-color: var(--acid); }
|
||||
.llmcfg-input[type="password"] { letter-spacing: 2px; }
|
||||
|
||||
.llmcfg-row {
|
||||
display: flex;
|
||||
gap: 10px;
|
||||
}
|
||||
.llmcfg-row .llmcfg-field { flex: 1; }
|
||||
|
||||
.llmcfg-toggle-row {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
gap: 10px;
|
||||
}
|
||||
.llmcfg-toggle-label {
|
||||
font-size: 12px;
|
||||
color: var(--ink);
|
||||
}
|
||||
|
||||
.llmcfg-toggle {
|
||||
position: relative;
|
||||
width: 44px;
|
||||
height: 24px;
|
||||
flex-shrink: 0;
|
||||
cursor: pointer;
|
||||
}
|
||||
.llmcfg-toggle input { opacity: 0; width: 0; height: 0; position: absolute; }
|
||||
.llmcfg-slider {
|
||||
position: absolute;
|
||||
inset: 0;
|
||||
background: var(--c-border-strong);
|
||||
transition: .2s;
|
||||
cursor: pointer;
|
||||
}
|
||||
.llmcfg-slider::before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
height: 18px;
|
||||
width: 18px;
|
||||
left: 3px;
|
||||
top: 3px;
|
||||
background: var(--muted-off);
|
||||
transition: .2s;
|
||||
}
|
||||
.llmcfg-toggle input:checked + .llmcfg-slider { background: var(--danger); }
|
||||
.llmcfg-toggle input:checked + .llmcfg-slider::before { transform: translateX(20px); background: white; }
|
||||
|
||||
.llmcfg-tools-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fill, minmax(180px, 1fr));
|
||||
gap: 7px;
|
||||
padding-top: 4px;
|
||||
}
|
||||
.llmcfg-tool-item {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 7px;
|
||||
font-size: 11px;
|
||||
cursor: pointer;
|
||||
color: var(--ink);
|
||||
}
|
||||
.llmcfg-tool-item input[type="checkbox"] { accent-color: var(--acid); }
|
||||
|
||||
.llmcfg-status-row {
|
||||
font-size: 11px;
|
||||
color: var(--muted);
|
||||
border-top: 1px solid var(--c-border);
|
||||
padding-top: 8px;
|
||||
margin-top: 2px;
|
||||
}
|
||||
|
||||
.llmcfg-actions {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
.llmcfg-btn {
|
||||
padding: 7px 16px;
|
||||
font-family: inherit;
|
||||
font-size: 11px;
|
||||
cursor: pointer;
|
||||
border: 1px solid var(--c-border);
|
||||
background: transparent;
|
||||
color: var(--ink);
|
||||
letter-spacing: 1px;
|
||||
transition: border-color .15s, color .15s;
|
||||
}
|
||||
.llmcfg-btn:hover { border-color: var(--acid); color: var(--acid); }
|
||||
.llmcfg-btn.primary { background: var(--danger); border-color: var(--danger); color: white; }
|
||||
.llmcfg-btn.primary:hover { background: color-mix(in oklab, var(--danger) 80%, white 20%); border-color: inherit; color: white; }
|
||||
.llmcfg-btn.compact { padding: 5px 10px; flex-shrink: 0; }
|
||||
|
||||
/* LaRuche discovery row */
|
||||
.llmcfg-url-row { display: flex; gap: 6px; align-items: center; }
|
||||
.llmcfg-url-row .llmcfg-input { flex: 1; }
|
||||
|
||||
/* Model selector row */
|
||||
.llmcfg-model-row { display: flex; gap: 6px; align-items: center; }
|
||||
.llmcfg-model-row .llmcfg-select { flex: 1; }
|
||||
|
||||
.llmcfg-discovery-row {
|
||||
font-size: 11px;
|
||||
padding: 4px 0;
|
||||
min-height: 18px;
|
||||
}
|
||||
.llmcfg-disc-found { color: var(--ok); }
|
||||
.llmcfg-disc-searching { color: var(--muted); }
|
||||
.llmcfg-disc-off { color: var(--danger); opacity: .7; }
|
||||
|
||||
/* LaRuche default model info */
|
||||
.llmcfg-laruche-default {
|
||||
font-size: 11px;
|
||||
padding: 2px 0 0;
|
||||
min-height: 14px;
|
||||
}
|
||||
.llmcfg-laruche-default-label {
|
||||
color: var(--muted);
|
||||
}
|
||||
.llmcfg-laruche-default-value {
|
||||
color: #facc15;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
/* Personality & Prompts textarea */
|
||||
.llmcfg-textarea {
|
||||
width: 100%;
|
||||
min-height: 70px;
|
||||
resize: vertical;
|
||||
background: var(--bg);
|
||||
border: 1px solid var(--c-border);
|
||||
border-radius: 4px;
|
||||
color: var(--ink);
|
||||
padding: 7px 10px;
|
||||
font-family: 'Courier New', Courier, monospace;
|
||||
font-size: 0.75rem;
|
||||
line-height: 1.4;
|
||||
}
|
||||
.llmcfg-textarea:focus { border-color: var(--acid); }
|
||||
.llmcfg-textarea::placeholder { color: var(--muted); opacity: 0.5; font-size: 0.65rem; }
|
||||
.llmcfg-reset-btn {
|
||||
margin-top: 4px;
|
||||
font-size: 0.6rem;
|
||||
padding: 2px 8px;
|
||||
opacity: 0.6;
|
||||
}
|
||||
.llmcfg-reset-btn:hover { opacity: 1; }
|
||||
@@ -291,6 +291,42 @@
|
||||
background: var(--c-cancel);
|
||||
}
|
||||
|
||||
/* Origin badge — who queued this action */
|
||||
.scheduler-container .originBadge {
|
||||
display: inline-block;
|
||||
font-size: .68rem;
|
||||
letter-spacing: .5px;
|
||||
padding: .1rem .5rem;
|
||||
border-radius: 2px;
|
||||
font-weight: 600;
|
||||
margin-bottom: .2rem;
|
||||
}
|
||||
.scheduler-container .origin-llm {
|
||||
background: color-mix(in oklab, var(--danger) 18%, transparent);
|
||||
color: var(--danger);
|
||||
border: 1px solid color-mix(in oklab, var(--danger) 35%, transparent);
|
||||
}
|
||||
.scheduler-container .origin-ai {
|
||||
background: color-mix(in oklab, var(--accent-2, #a78bfa) 15%, transparent);
|
||||
color: var(--accent-2, #a78bfa);
|
||||
border: 1px solid color-mix(in oklab, var(--accent-2, #a78bfa) 30%, transparent);
|
||||
}
|
||||
.scheduler-container .origin-mcp {
|
||||
background: color-mix(in oklab, var(--acid) 12%, transparent);
|
||||
color: var(--acid);
|
||||
border: 1px solid color-mix(in oklab, var(--acid) 25%, transparent);
|
||||
}
|
||||
.scheduler-container .origin-manual {
|
||||
background: color-mix(in oklab, var(--ok) 12%, transparent);
|
||||
color: var(--ok);
|
||||
border: 1px solid color-mix(in oklab, var(--ok) 25%, transparent);
|
||||
}
|
||||
.scheduler-container .origin-heuristic {
|
||||
background: color-mix(in oklab, var(--muted) 12%, transparent);
|
||||
color: var(--muted);
|
||||
border: 1px solid color-mix(in oklab, var(--muted) 25%, transparent);
|
||||
}
|
||||
|
||||
/* Collapsed */
|
||||
.scheduler-container .card.collapsed .kv,
|
||||
.scheduler-container .card.collapsed .tags,
|
||||
@@ -334,8 +370,7 @@
|
||||
height: 80px;
|
||||
object-fit: contain;
|
||||
border-radius: 6px;
|
||||
background: var(--panel);
|
||||
border: 1px solid var(--c-border);
|
||||
|
||||
}
|
||||
|
||||
.scheduler-container .card.status-running .actionIcon {
|
||||
|
||||
@@ -364,3 +364,32 @@
|
||||
align-items: flex-start;
|
||||
}
|
||||
}
|
||||
|
||||
/* ── AI Sentinel elements ─────────────────────────────── */
|
||||
.sentinel-ai-btn {
|
||||
background: rgba(168, 85, 247, .1) !important;
|
||||
border-color: rgba(168, 85, 247, .3) !important;
|
||||
color: #c084fc !important;
|
||||
}
|
||||
.sentinel-ai-btn:hover {
|
||||
background: rgba(168, 85, 247, .2) !important;
|
||||
}
|
||||
.sentinel-ai-result {
|
||||
margin: 4px 0 0;
|
||||
padding: 6px 8px;
|
||||
background: rgba(168, 85, 247, .06);
|
||||
border: 1px solid rgba(168, 85, 247, .15);
|
||||
border-radius: 4px;
|
||||
font-size: 0.7rem;
|
||||
white-space: pre-wrap;
|
||||
line-height: 1.4;
|
||||
display: none;
|
||||
}
|
||||
.sentinel-ai-result.active { display: block; }
|
||||
.sentinel-ai-summary {
|
||||
padding: 8px;
|
||||
background: rgba(59, 130, 246, .06);
|
||||
border: 1px solid rgba(59, 130, 246, .15);
|
||||
border-radius: 6px;
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
|
||||
@@ -404,6 +404,41 @@ body.console-docked .app-container {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
/* Bjorn comment lines */
|
||||
.comment-line {
|
||||
color: #4ade80;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 5px;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
.comment-icon {
|
||||
display: inline-block;
|
||||
width: 1.15em;
|
||||
height: 1.15em;
|
||||
flex-shrink: 0;
|
||||
opacity: .85;
|
||||
vertical-align: middle;
|
||||
object-fit: contain;
|
||||
}
|
||||
.comment-status {
|
||||
opacity: .55;
|
||||
font-size: .9em;
|
||||
}
|
||||
.comment-llm-badge {
|
||||
display: inline-block;
|
||||
font-size: .7em;
|
||||
font-weight: 700;
|
||||
letter-spacing: .5px;
|
||||
padding: 1px 5px;
|
||||
border-radius: 4px;
|
||||
background: linear-gradient(180deg, rgba(168,85,247,.35), rgba(168,85,247,.2));
|
||||
border: 1px solid rgba(168,85,247,.5);
|
||||
color: #c4b5fd;
|
||||
vertical-align: middle;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
/* Console font slider row */
|
||||
.console-fontrow {
|
||||
flex-basis: 100%;
|
||||
@@ -723,17 +758,121 @@ body.console-docked .app-container {
|
||||
transform: scale(.96);
|
||||
}
|
||||
|
||||
/* QuickPanel rows & signal */
|
||||
/* ---- QuickPanel Tab Bar ---- */
|
||||
.qp-tabs {
|
||||
display: flex;
|
||||
gap: 0;
|
||||
margin: 0 16px 12px;
|
||||
border-bottom: 1px solid var(--c-border);
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.qp-tab {
|
||||
padding: 8px 20px;
|
||||
font-size: 13px;
|
||||
font-weight: 600;
|
||||
color: var(--muted);
|
||||
cursor: pointer;
|
||||
border-bottom: 2px solid transparent;
|
||||
transition: color .2s ease, border-color .2s ease;
|
||||
user-select: none;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
}
|
||||
|
||||
.qp-tab:hover {
|
||||
color: var(--ink);
|
||||
}
|
||||
|
||||
.qp-tab.active {
|
||||
color: var(--acid);
|
||||
border-bottom-color: var(--acid);
|
||||
}
|
||||
|
||||
.qp-tab-icon {
|
||||
font-size: 15px;
|
||||
opacity: .7;
|
||||
}
|
||||
|
||||
.qp-tab.active .qp-tab-icon {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
/* ---- QuickPanel rows & cards ---- */
|
||||
.qprow {
|
||||
display: grid;
|
||||
gap: 10px;
|
||||
padding: 10px;
|
||||
padding: 12px;
|
||||
border: 1px solid var(--c-border);
|
||||
border-radius: 10px;
|
||||
background: var(--grad-qprow);
|
||||
border-radius: 12px;
|
||||
background: var(--grad-qprow, color-mix(in oklab, var(--c-panel, #0d1520) 80%, transparent));
|
||||
margin-bottom: 6px;
|
||||
transition: transform .15s ease, box-shadow .15s ease, opacity .15s ease, border-color .15s ease;
|
||||
animation: qpSlideIn .25s ease forwards;
|
||||
animation-delay: calc(var(--i, 0) * 40ms);
|
||||
opacity: 0;
|
||||
}
|
||||
|
||||
.qprow:hover {
|
||||
border-color: var(--c-border-strong, var(--c-border));
|
||||
box-shadow: 0 2px 8px rgba(0,0,0,.2);
|
||||
}
|
||||
|
||||
.qprow.connected {
|
||||
border-left: 3px solid var(--acid);
|
||||
}
|
||||
|
||||
@keyframes qpSlideIn {
|
||||
from { transform: translateY(8px); opacity: 0; }
|
||||
to { transform: translateY(0); opacity: 1; }
|
||||
}
|
||||
|
||||
/* ---- WiFi card layout ---- */
|
||||
.wifi-card {
|
||||
grid-template-columns: 1fr auto;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.wifi-card-info {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 2px;
|
||||
overflow: hidden;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.wifi-card-ssid {
|
||||
font-weight: 600;
|
||||
font-size: 14px;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.wifi-card-meta {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
font-size: 12px;
|
||||
}
|
||||
|
||||
.wifi-card-actions {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
}
|
||||
|
||||
.wifi-connected-chip {
|
||||
font-size: 11px;
|
||||
color: var(--acid);
|
||||
font-weight: 600;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
/* ---- Signal bars ---- */
|
||||
.sig {
|
||||
display: inline-grid;
|
||||
grid-auto-flow: column;
|
||||
@@ -742,19 +881,124 @@ body.console-docked .app-container {
|
||||
}
|
||||
|
||||
.sig i {
|
||||
width: 4px;
|
||||
width: 5px;
|
||||
height: 6px;
|
||||
display: block;
|
||||
background: var(--c-slot);
|
||||
border: 1px solid var(--c-border);
|
||||
border-bottom: none;
|
||||
border-radius: 2px 2px 0 0;
|
||||
transition: background .2s ease;
|
||||
}
|
||||
|
||||
.sig i.on {
|
||||
background: var(--acid);
|
||||
}
|
||||
|
||||
/* ---- Known network cards ---- */
|
||||
.known-card {
|
||||
grid-template-columns: auto 1fr auto;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.known-card-grip {
|
||||
cursor: grab;
|
||||
color: var(--muted);
|
||||
font-size: 16px;
|
||||
padding: 4px 2px;
|
||||
user-select: none;
|
||||
touch-action: none;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
.known-card-grip:active {
|
||||
cursor: grabbing;
|
||||
}
|
||||
|
||||
.known-card-info {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 2px;
|
||||
overflow: hidden;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.known-card-ssid {
|
||||
font-weight: 600;
|
||||
font-size: 14px;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.known-card-priority {
|
||||
font-size: 11px;
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.known-card-actions {
|
||||
display: flex;
|
||||
gap: 4px;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
/* ---- Drag & Drop ---- */
|
||||
.qprow.dragging {
|
||||
opacity: .4;
|
||||
transform: scale(.97);
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
.drag-placeholder {
|
||||
height: 3px;
|
||||
background: var(--acid);
|
||||
border-radius: 99px;
|
||||
margin: 2px 0;
|
||||
box-shadow: 0 0 8px color-mix(in oklab, var(--acid) 50%, transparent);
|
||||
animation: qpSlideIn .15s ease forwards;
|
||||
}
|
||||
|
||||
/* ---- Multi-select ---- */
|
||||
.known-select-cb {
|
||||
width: 18px;
|
||||
height: 18px;
|
||||
accent-color: var(--acid);
|
||||
cursor: pointer;
|
||||
flex-shrink: 0;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.edit-mode .known-select-cb {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.edit-mode .known-card-grip {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.qp-batch-bar {
|
||||
position: sticky;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
padding: 10px 16px;
|
||||
background: color-mix(in oklab, var(--c-panel, #0d1520) 95%, transparent);
|
||||
backdrop-filter: blur(8px);
|
||||
border-top: 1px solid var(--c-border-strong);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
gap: 8px;
|
||||
animation: qpSlideUp .2s ease forwards;
|
||||
z-index: 5;
|
||||
}
|
||||
|
||||
@keyframes qpSlideUp {
|
||||
from { transform: translateY(100%); opacity: 0; }
|
||||
to { transform: translateY(0); opacity: 1; }
|
||||
}
|
||||
|
||||
/* ---- Bluetooth cards ---- */
|
||||
.btlist .qprow {
|
||||
grid-template-columns: 1fr auto;
|
||||
}
|
||||
@@ -763,6 +1007,78 @@ body.console-docked .app-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.bt-icon {
|
||||
font-size: 20px;
|
||||
flex-shrink: 0;
|
||||
width: 32px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.bt-device-info {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 2px;
|
||||
overflow: hidden;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.bt-device-name {
|
||||
font-weight: 600;
|
||||
font-size: 14px;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.bt-device-mac {
|
||||
font-size: 11px;
|
||||
color: var(--muted);
|
||||
font-family: monospace;
|
||||
}
|
||||
|
||||
.bt-state-chips {
|
||||
display: flex;
|
||||
gap: 4px;
|
||||
flex-wrap: wrap;
|
||||
margin-top: 2px;
|
||||
}
|
||||
|
||||
.bt-chip {
|
||||
font-size: 10px;
|
||||
padding: 1px 6px;
|
||||
border-radius: 99px;
|
||||
font-weight: 600;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 3px;
|
||||
}
|
||||
|
||||
.bt-chip-paired {
|
||||
background: color-mix(in oklab, var(--acid-2, #18f0ff) 14%, transparent);
|
||||
border: 1px solid color-mix(in oklab, var(--acid-2, #18f0ff) 40%, transparent);
|
||||
color: var(--acid-2, #18f0ff);
|
||||
}
|
||||
|
||||
.bt-chip-trusted {
|
||||
background: color-mix(in oklab, var(--ok, #2cff7e) 14%, transparent);
|
||||
border: 1px solid color-mix(in oklab, var(--ok, #2cff7e) 40%, transparent);
|
||||
color: var(--ok, #2cff7e);
|
||||
}
|
||||
|
||||
.bt-chip-connected {
|
||||
background: color-mix(in oklab, var(--acid) 14%, transparent);
|
||||
border: 1px solid color-mix(in oklab, var(--acid) 40%, transparent);
|
||||
color: var(--acid);
|
||||
}
|
||||
|
||||
.bt-actions {
|
||||
display: flex;
|
||||
gap: 4px;
|
||||
align-items: center;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
.bt-type {
|
||||
@@ -770,6 +1086,105 @@ body.console-docked .app-container {
|
||||
font-size: 12px;
|
||||
}
|
||||
|
||||
/* ---- State dot ---- */
|
||||
.state-dot {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
border-radius: 50%;
|
||||
display: inline-block;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.state-on {
|
||||
background: var(--acid);
|
||||
box-shadow: 0 0 6px color-mix(in oklab, var(--acid) 60%, transparent);
|
||||
}
|
||||
|
||||
.state-off {
|
||||
background: var(--muted);
|
||||
}
|
||||
|
||||
/* ---- QP Section headers ---- */
|
||||
.qp-section-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding: 8px 16px 4px;
|
||||
font-weight: 700;
|
||||
font-size: 13px;
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.qp-section-actions {
|
||||
display: flex;
|
||||
gap: 4px;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
/* ---- QP icon buttons ---- */
|
||||
.qp-icon-btn {
|
||||
width: 28px;
|
||||
height: 28px;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
border-radius: 6px;
|
||||
cursor: pointer;
|
||||
background: transparent;
|
||||
border: 1px solid transparent;
|
||||
color: var(--muted);
|
||||
font-size: 14px;
|
||||
transition: background .15s ease, color .15s ease, border-color .15s ease;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.qp-icon-btn:hover {
|
||||
background: var(--white-06);
|
||||
border-color: var(--white-12);
|
||||
color: var(--ink);
|
||||
}
|
||||
|
||||
.qp-icon-btn.danger:hover {
|
||||
color: var(--danger, #ff3b3b);
|
||||
border-color: color-mix(in oklab, var(--danger, #ff3b3b) 30%, transparent);
|
||||
}
|
||||
|
||||
/* ---- QP toolbar ---- */
|
||||
.qp-toolbar {
|
||||
display: flex;
|
||||
gap: 6px;
|
||||
align-items: center;
|
||||
flex-wrap: wrap;
|
||||
padding: 0 16px 8px;
|
||||
}
|
||||
|
||||
.qp-toolbar-spacer {
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
/* ---- QP loading spinner on buttons ---- */
|
||||
.qp-btn-loading {
|
||||
pointer-events: none;
|
||||
opacity: .6;
|
||||
}
|
||||
|
||||
.qp-btn-loading::after {
|
||||
content: "";
|
||||
display: inline-block;
|
||||
width: 12px;
|
||||
height: 12px;
|
||||
border: 2px solid currentColor;
|
||||
border-top-color: transparent;
|
||||
border-radius: 50%;
|
||||
animation: qpSpin .6s linear infinite;
|
||||
margin-left: 6px;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
@keyframes qpSpin {
|
||||
to { transform: rotate(360deg); }
|
||||
}
|
||||
|
||||
/* ---- Actions Dropdown ---- */
|
||||
.actions {
|
||||
position: relative;
|
||||
@@ -2248,3 +2663,69 @@ input[type="color"].theme-input {
|
||||
grid-template-columns: repeat(3, 1fr);
|
||||
}
|
||||
}
|
||||
|
||||
/* ── Console footer (chat input) ────────────────────────── */
|
||||
.console-footer {
|
||||
display: flex;
|
||||
gap: 4px;
|
||||
padding: 4px 8px;
|
||||
border-top: 1px solid var(--c-border);
|
||||
flex-shrink: 0;
|
||||
background: var(--c-panel, #1a1a1e);
|
||||
}
|
||||
.console-input {
|
||||
flex: 1;
|
||||
background: var(--bg, #09090b);
|
||||
border: 1px solid var(--c-border, #2a2a2e);
|
||||
border-radius: 4px;
|
||||
color: var(--ink, #fafafa);
|
||||
padding: 4px 8px;
|
||||
resize: none;
|
||||
font-family: var(--font-mono, 'Courier New', monospace);
|
||||
font-size: var(--console-font, 11px);
|
||||
line-height: 1.4;
|
||||
}
|
||||
.console-input:focus { border-color: var(--acid, #22c55e); outline: none; }
|
||||
.console-send-btn {
|
||||
padding: 4px 10px;
|
||||
background: var(--acid, #22c55e);
|
||||
color: var(--bg, #09090b);
|
||||
border: none;
|
||||
border-radius: 4px;
|
||||
cursor: pointer;
|
||||
font-size: var(--console-font, 11px);
|
||||
font-weight: 600;
|
||||
}
|
||||
.console-send-btn:hover { opacity: 0.8; }
|
||||
|
||||
/* ── Console bubble mode ────────────────────────────────── */
|
||||
.console.bubble-mode .console-body {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
.console-bubble-bjorn {
|
||||
margin: 3px 8px 3px 4px;
|
||||
padding: 5px 10px;
|
||||
border-radius: 12px 12px 12px 4px;
|
||||
background: rgba(34, 197, 94, .12);
|
||||
color: var(--ink, #fafafa);
|
||||
max-width: 85%;
|
||||
align-self: flex-start;
|
||||
word-break: break-word;
|
||||
font-size: var(--console-font, 11px);
|
||||
line-height: 1.4;
|
||||
}
|
||||
.console-bubble-bjorn.llm { background: rgba(168, 85, 247, .12); }
|
||||
.console-bubble-user {
|
||||
margin: 3px 4px 3px 8px;
|
||||
padding: 5px 10px;
|
||||
border-radius: 12px 12px 4px 12px;
|
||||
background: rgba(59, 130, 246, .12);
|
||||
color: var(--ink, #fafafa);
|
||||
max-width: 85%;
|
||||
align-self: flex-end;
|
||||
margin-left: auto;
|
||||
word-break: break-word;
|
||||
font-size: var(--console-font, 11px);
|
||||
line-height: 1.4;
|
||||
}
|
||||
|
||||
@@ -1249,5 +1249,66 @@
|
||||
"loki.quick_placeholder": "Quick type text here...",
|
||||
"loki.quick_send": "Type",
|
||||
"loki.quick_sent": "Text sent to target",
|
||||
"loki.quick_error": "Failed to send text"
|
||||
"loki.quick_error": "Failed to send text",
|
||||
|
||||
"nav.llm_chat": "LLM Chat",
|
||||
"nav.llm_config": "LLM & MCP",
|
||||
|
||||
"llm_chat.checking": "checking...",
|
||||
"llm_chat.disabled": "LLM disabled",
|
||||
"llm_chat.online": "online",
|
||||
"llm_chat.unavailable": "unavailable",
|
||||
"llm_chat.disabled_msg": "LLM is disabled. Enable it in",
|
||||
"llm_chat.settings_link": "Settings → LLM Bridge",
|
||||
"llm_chat.thinking": "Bjorn is thinking...",
|
||||
"llm_chat.placeholder": "Ask Bjorn anything about the network...",
|
||||
"llm_chat.send": "SEND",
|
||||
"llm_chat.clear_history": "Clear history",
|
||||
"llm_chat.orch_log": "Orch Log",
|
||||
"llm_chat.orch_title": "View LLM Orchestrator reasoning log",
|
||||
"llm_chat.back_chat": "← Back to chat",
|
||||
"llm_chat.session_started": "Session started. Type a question or command.",
|
||||
"llm_chat.history_cleared": "History cleared.",
|
||||
"llm_chat.back_to_chat": "Back to chat. Type a question or command.",
|
||||
"llm_chat.loading_log": "Loading LLM Orchestrator reasoning log…",
|
||||
"llm_chat.no_log": "No reasoning log yet. Enable llm_orchestrator_log_reasoning in config and run a cycle.",
|
||||
"llm_chat.log_header": "Orchestrator log",
|
||||
"llm_chat.log_error": "Error loading reasoning log",
|
||||
"llm_chat.error": "Error",
|
||||
"llm_chat.net_error": "Network error",
|
||||
|
||||
"llm_cfg.enable_bridge": "Enable LLM Bridge",
|
||||
"llm_cfg.epd_comments": "LLM comments on EPD display",
|
||||
"llm_cfg.backend": "BACKEND PRIORITY",
|
||||
"llm_cfg.laruche_discovery": "Auto-discover LaRuche nodes via mDNS",
|
||||
"llm_cfg.laruche_url": "LARUCHE NODE URL (optional — overrides discovery)",
|
||||
"llm_cfg.ollama_url": "OLLAMA URL",
|
||||
"llm_cfg.ollama_model": "MODEL",
|
||||
"llm_cfg.provider": "PROVIDER",
|
||||
"llm_cfg.api_model": "MODEL",
|
||||
"llm_cfg.api_key": "API KEY",
|
||||
"llm_cfg.api_key_placeholder":"Leave empty to keep current",
|
||||
"llm_cfg.base_url": "CUSTOM BASE URL (OpenRouter / proxy)",
|
||||
"llm_cfg.timeout": "TIMEOUT (s)",
|
||||
"llm_cfg.max_tokens_chat": "MAX TOKENS (chat)",
|
||||
"llm_cfg.max_tokens_epd": "MAX TOKENS (EPD comment)",
|
||||
"llm_cfg.api_key_set": "API key: set",
|
||||
"llm_cfg.api_key_not_set": "API key: not set",
|
||||
"llm_cfg.save_llm": "SAVE LLM CONFIG",
|
||||
"llm_cfg.test_connection": "TEST CONNECTION",
|
||||
"llm_cfg.enable_mcp": "Enable MCP Server",
|
||||
"llm_cfg.transport": "TRANSPORT",
|
||||
"llm_cfg.mcp_port": "PORT (HTTP SSE only)",
|
||||
"llm_cfg.exposed_tools": "EXPOSED TOOLS",
|
||||
"llm_cfg.mcp_running": "Server running on port",
|
||||
"llm_cfg.mcp_stopped": "Server not running.",
|
||||
"llm_cfg.save_mcp": "SAVE MCP CONFIG",
|
||||
"llm_cfg.saved_llm": "LLM config saved.",
|
||||
"llm_cfg.saved_mcp": "MCP config saved.",
|
||||
"llm_cfg.mcp_enabled": "MCP server enabled.",
|
||||
"llm_cfg.mcp_disabled": "MCP server disabled.",
|
||||
"llm_cfg.testing": "Testing…",
|
||||
"llm_cfg.test_failed": "Failed",
|
||||
"llm_cfg.error": "Error",
|
||||
"llm_cfg.save_error": "Save error"
|
||||
}
|
||||
|
||||
@@ -932,5 +932,66 @@
|
||||
"loki.quick_placeholder": "Taper du texte ici...",
|
||||
"loki.quick_send": "Taper",
|
||||
"loki.quick_sent": "Texte envoyé à la cible",
|
||||
"loki.quick_error": "Échec de l'envoi"
|
||||
"loki.quick_error": "Échec de l'envoi",
|
||||
|
||||
"nav.llm_chat": "Chat LLM",
|
||||
"nav.llm_config": "LLM & MCP",
|
||||
|
||||
"llm_chat.checking": "vérification...",
|
||||
"llm_chat.disabled": "LLM désactivé",
|
||||
"llm_chat.online": "en ligne",
|
||||
"llm_chat.unavailable": "indisponible",
|
||||
"llm_chat.disabled_msg": "Le LLM est désactivé. Activez-le dans",
|
||||
"llm_chat.settings_link": "Paramètres → LLM Bridge",
|
||||
"llm_chat.thinking": "Bjorn réfléchit...",
|
||||
"llm_chat.placeholder": "Demandez à Bjorn n'importe quoi sur le réseau...",
|
||||
"llm_chat.send": "ENVOYER",
|
||||
"llm_chat.clear_history": "Effacer l'historique",
|
||||
"llm_chat.orch_log": "Log Orch",
|
||||
"llm_chat.orch_title": "Voir le log de raisonnement de l'orchestrateur LLM",
|
||||
"llm_chat.back_chat": "← Retour au chat",
|
||||
"llm_chat.session_started": "Session démarrée. Posez une question ou une commande.",
|
||||
"llm_chat.history_cleared": "Historique effacé.",
|
||||
"llm_chat.back_to_chat": "Retour au chat. Posez une question ou une commande.",
|
||||
"llm_chat.loading_log": "Chargement du log de raisonnement…",
|
||||
"llm_chat.no_log": "Aucun log de raisonnement. Activez llm_orchestrator_log_reasoning dans la config.",
|
||||
"llm_chat.log_header": "Log orchestrateur",
|
||||
"llm_chat.log_error": "Erreur lors du chargement du log",
|
||||
"llm_chat.error": "Erreur",
|
||||
"llm_chat.net_error": "Erreur réseau",
|
||||
|
||||
"llm_cfg.enable_bridge": "Activer le LLM Bridge",
|
||||
"llm_cfg.epd_comments": "Commentaires LLM sur l'écran EPD",
|
||||
"llm_cfg.backend": "PRIORITÉ BACKEND",
|
||||
"llm_cfg.laruche_discovery": "Découverte automatique LaRuche via mDNS",
|
||||
"llm_cfg.laruche_url": "URL NŒUD LARUCHE (optionnel — override découverte)",
|
||||
"llm_cfg.ollama_url": "URL OLLAMA",
|
||||
"llm_cfg.ollama_model": "MODÈLE",
|
||||
"llm_cfg.provider": "FOURNISSEUR",
|
||||
"llm_cfg.api_model": "MODÈLE",
|
||||
"llm_cfg.api_key": "CLÉ API",
|
||||
"llm_cfg.api_key_placeholder":"Laisser vide pour conserver la clé actuelle",
|
||||
"llm_cfg.base_url": "URL DE BASE PERSONNALISÉE (OpenRouter / proxy)",
|
||||
"llm_cfg.timeout": "TIMEOUT (s)",
|
||||
"llm_cfg.max_tokens_chat": "TOKENS MAX (chat)",
|
||||
"llm_cfg.max_tokens_epd": "TOKENS MAX (commentaire EPD)",
|
||||
"llm_cfg.api_key_set": "Clé API : définie",
|
||||
"llm_cfg.api_key_not_set": "Clé API : non définie",
|
||||
"llm_cfg.save_llm": "SAUVEGARDER CONFIG LLM",
|
||||
"llm_cfg.test_connection": "TESTER LA CONNEXION",
|
||||
"llm_cfg.enable_mcp": "Activer le serveur MCP",
|
||||
"llm_cfg.transport": "TRANSPORT",
|
||||
"llm_cfg.mcp_port": "PORT (HTTP SSE uniquement)",
|
||||
"llm_cfg.exposed_tools": "OUTILS EXPOSÉS",
|
||||
"llm_cfg.mcp_running": "Serveur actif sur le port",
|
||||
"llm_cfg.mcp_stopped": "Serveur arrêté.",
|
||||
"llm_cfg.save_mcp": "SAUVEGARDER CONFIG MCP",
|
||||
"llm_cfg.saved_llm": "Configuration LLM sauvegardée.",
|
||||
"llm_cfg.saved_mcp": "Configuration MCP sauvegardée.",
|
||||
"llm_cfg.mcp_enabled": "Serveur MCP activé.",
|
||||
"llm_cfg.mcp_disabled": "Serveur MCP désactivé.",
|
||||
"llm_cfg.testing": "Test en cours…",
|
||||
"llm_cfg.test_failed": "Échec",
|
||||
"llm_cfg.error": "Erreur",
|
||||
"llm_cfg.save_error": "Erreur de sauvegarde"
|
||||
}
|
||||
@@ -108,6 +108,7 @@
|
||||
</span>
|
||||
<button class="btn" id="modeToggle" aria-pressed="true">Auto</button>
|
||||
<button class="btn" id="attackToggle">Attack ▾</button>
|
||||
<button class="btn" id="consoleBubbleToggle" title="Toggle bubble mode">💬</button>
|
||||
<button class="btn" id="clearLogs" data-i18n="console.clear">Clear</button>
|
||||
<button class="btn" id="closeConsole">X</button>
|
||||
<div id="consoleFontRow" class="console-fontrow">
|
||||
@@ -117,6 +118,10 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="console-body" id="logout" role="log" aria-live="polite"></div>
|
||||
<div class="console-footer" id="console-chat-footer" style="display:none">
|
||||
<textarea id="consoleInput" class="console-input" placeholder="Chat with Bjorn..." rows="1"></textarea>
|
||||
<button id="consoleSend" class="console-send-btn">➤</button>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- QuickPanel (WiFi/Bluetooth management) -->
|
||||
|
||||
@@ -63,6 +63,8 @@ function bootUI() {
|
||||
router.route('/bifrost', () => import('./pages/bifrost.js'));
|
||||
router.route('/loki', () => import('./pages/loki.js'));
|
||||
router.route('/bjorn', () => import('./pages/bjorn.js'));
|
||||
router.route('/llm-chat', () => import('./pages/llm-chat.js'));
|
||||
router.route('/llm-config', () => import('./pages/llm-config.js'));
|
||||
|
||||
// 404 fallback
|
||||
router.setNotFound((container, path) => {
|
||||
@@ -415,7 +417,9 @@ const PAGES = [
|
||||
{ path: '/bifrost', icon: 'network.png', label: 'nav.bifrost' },
|
||||
{ path: '/loki', icon: 'actions_launcher.png', label: 'nav.loki' },
|
||||
{ path: '/ai-dashboard', icon: 'ai_dashboard.png', label: 'nav.ai_dashboard' },
|
||||
{ path: '/bjorn-debug', icon: 'database.png', label: 'Bjorn Debug' },
|
||||
{ path: '/bjorn-debug', icon: 'database.png', label: 'Bjorn Debug' },
|
||||
{ path: '/llm-chat', icon: 'ai.png', label: 'nav.llm_chat' },
|
||||
{ path: '/llm-config', icon: 'ai_dashboard.png', label: 'nav.llm_config' },
|
||||
];
|
||||
|
||||
function wireLauncher() {
|
||||
|
||||
@@ -48,6 +48,9 @@ let isUserScrolling = false;
|
||||
let autoScroll = true;
|
||||
let lineBuffer = []; // lines held while user is scrolled up
|
||||
let isDocked = false;
|
||||
let consoleMode = 'log'; // 'log' | 'bubble'
|
||||
const CONSOLE_SESSION_ID = 'console';
|
||||
const LS_CONSOLE_MODE = 'bjorn_console_mode';
|
||||
|
||||
/* Cached DOM refs (populated in init) */
|
||||
let elConsole = null;
|
||||
@@ -194,6 +197,34 @@ function ensureDockButton() {
|
||||
* @returns {string} HTML string
|
||||
*/
|
||||
function processLogLine(line) {
|
||||
// 0. Bjorn comments — green line with icon + optional (LLM) badge
|
||||
const cmLLM = line.match(/\[LLM_COMMENT\]\s*\(([^)]*)\)\s*(.*)/);
|
||||
const cmTPL = !cmLLM && line.match(/\[COMMENT\]\s*\(([^)]*)\)\s*(.*)/);
|
||||
if (cmLLM || cmTPL) {
|
||||
const isLLM = !!cmLLM;
|
||||
const m = cmLLM || cmTPL;
|
||||
const status = m[1];
|
||||
const text = m[2];
|
||||
|
||||
// Bubble mode — render as chat bubble
|
||||
if (consoleMode === 'bubble') {
|
||||
const cls = isLLM ? 'console-bubble-bjorn llm' : 'console-bubble-bjorn';
|
||||
return `<div class="${cls}"><img src="/web/images/icon-60x60.png" class="comment-icon" alt="" style="width:14px;height:14px;vertical-align:middle;margin-right:4px">${text}</div>`;
|
||||
}
|
||||
|
||||
const badge = isLLM ? '<span class="comment-llm-badge">LLM</span>' : '';
|
||||
return `<span class="comment-line">${badge}<span class="comment-status">${status}</span> <img src="/web/images/icon-60x60.png" class="comment-icon" alt=""> ${text}</span>`;
|
||||
}
|
||||
|
||||
// 0b. User chat messages (from console chat) — bubble mode
|
||||
const userChat = line.match(/\[USER_CHAT\]\s*(.*)/);
|
||||
if (userChat) {
|
||||
if (consoleMode === 'bubble') {
|
||||
return `<div class="console-bubble-user">${userChat[1]}</div>`;
|
||||
}
|
||||
return `<span class="comment-line"><span class="comment-status">YOU</span> ${userChat[1]}</span>`;
|
||||
}
|
||||
|
||||
// 1. Highlight *.py filenames
|
||||
line = line.replace(
|
||||
/\b([\w\-]+\.py)\b/g,
|
||||
@@ -900,6 +931,34 @@ export function init() {
|
||||
elFontInput.addEventListener('input', () => setFont(elFontInput.value));
|
||||
}
|
||||
|
||||
/* -- Bubble mode toggle ------------------------------------------ */
|
||||
try { consoleMode = localStorage.getItem(LS_CONSOLE_MODE) || 'log'; } catch { /* ignore */ }
|
||||
syncBubbleMode();
|
||||
|
||||
const bubbleBtn = $('#consoleBubbleToggle');
|
||||
if (bubbleBtn) {
|
||||
bubbleBtn.addEventListener('click', () => {
|
||||
consoleMode = consoleMode === 'log' ? 'bubble' : 'log';
|
||||
try { localStorage.setItem(LS_CONSOLE_MODE, consoleMode); } catch { /* ignore */ }
|
||||
syncBubbleMode();
|
||||
});
|
||||
}
|
||||
|
||||
/* -- Console chat input ----------------------------------------- */
|
||||
const chatFooter = $('#console-chat-footer');
|
||||
const chatInput = $('#consoleInput');
|
||||
const chatSend = $('#consoleSend');
|
||||
|
||||
if (chatInput && chatSend) {
|
||||
chatSend.addEventListener('click', () => sendConsoleChat(chatInput));
|
||||
chatInput.addEventListener('keydown', (e) => {
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault();
|
||||
sendConsoleChat(chatInput);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/* -- Close / Clear ----------------------------------------------- */
|
||||
const btnClose = $('#closeConsole');
|
||||
if (btnClose) btnClose.addEventListener('click', closeConsole);
|
||||
@@ -1003,3 +1062,54 @@ async function checkAutostart() {
|
||||
// It can still be opened manually by the user.
|
||||
closeConsole();
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Bubble mode & console chat */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
function syncBubbleMode() {
|
||||
const bubbleBtn = $('#consoleBubbleToggle');
|
||||
const chatFooter = $('#console-chat-footer');
|
||||
const consoleEl = $('#console');
|
||||
|
||||
if (bubbleBtn) {
|
||||
bubbleBtn.textContent = consoleMode === 'bubble' ? '\uD83D\uDCDD' : '\uD83D\uDCAC';
|
||||
bubbleBtn.title = consoleMode === 'bubble' ? 'Switch to log mode' : 'Switch to bubble mode';
|
||||
}
|
||||
if (chatFooter) {
|
||||
chatFooter.style.display = consoleMode === 'bubble' ? '' : 'none';
|
||||
}
|
||||
if (consoleEl) {
|
||||
consoleEl.classList.toggle('bubble-mode', consoleMode === 'bubble');
|
||||
}
|
||||
}
|
||||
|
||||
async function sendConsoleChat(inputEl) {
|
||||
if (!inputEl) return;
|
||||
const msg = inputEl.value.trim();
|
||||
if (!msg) return;
|
||||
inputEl.value = '';
|
||||
|
||||
// Show user message in console
|
||||
if (consoleMode === 'bubble') {
|
||||
appendLogHtml(`<div class="console-bubble-user">${msg}</div>`);
|
||||
} else {
|
||||
appendLogHtml(`<span class="comment-line"><span class="comment-status">YOU</span> ${msg}</span>`);
|
||||
}
|
||||
|
||||
// Call LLM
|
||||
try {
|
||||
const data = await api.post('/api/llm/chat', { message: msg, session_id: CONSOLE_SESSION_ID });
|
||||
if (data?.status === 'ok' && data.response) {
|
||||
if (consoleMode === 'bubble') {
|
||||
appendLogHtml(`<div class="console-bubble-bjorn llm"><img src="/web/images/icon-60x60.png" class="comment-icon" alt="" style="width:14px;height:14px;vertical-align:middle;margin-right:4px">${data.response}</div>`);
|
||||
} else {
|
||||
appendLogHtml(`<span class="comment-line"><span class="comment-llm-badge">LLM</span><span class="comment-status">BJORN</span> <img src="/web/images/icon-60x60.png" class="comment-icon" alt=""> ${data.response}</span>`);
|
||||
}
|
||||
} else {
|
||||
appendLogHtml(`<span class="loglvl error">Chat error: ${data?.message || 'unknown'}</span>`);
|
||||
}
|
||||
} catch (e) {
|
||||
appendLogHtml(`<span class="loglvl error">Chat error: ${e.message}</span>`);
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -45,6 +45,11 @@ export class ResourceTracker {
|
||||
this._listeners.push({ target, event, handler, options });
|
||||
}
|
||||
|
||||
/** Shorthand alias for trackEventListener. */
|
||||
on(target, event, handler, options) {
|
||||
return this.trackEventListener(target, event, handler, options);
|
||||
}
|
||||
|
||||
/* -- AbortControllers (for fetch) -- */
|
||||
trackAbortController() {
|
||||
const ac = new AbortController();
|
||||
|
||||
253
web/js/pages/llm-chat.js
Normal file
253
web/js/pages/llm-chat.js
Normal file
@@ -0,0 +1,253 @@
|
||||
/**
|
||||
* llm-chat — LLM Chat SPA page
|
||||
* Chat interface with LLM bridge + orchestrator reasoning log.
|
||||
*/
|
||||
import { ResourceTracker } from '../core/resource-tracker.js';
|
||||
import { api } from '../core/api.js';
|
||||
import { el, $, empty, escapeHtml } from '../core/dom.js';
|
||||
import { t } from '../core/i18n.js';
|
||||
|
||||
const PAGE = 'llm-chat';
|
||||
|
||||
/* ── State ─────────────────────────────────────────────── */
|
||||
|
||||
let tracker = null;
|
||||
let root = null;
|
||||
let llmEnabled = false;
|
||||
let orchMode = false;
|
||||
const sessionId = 'chat-' + Math.random().toString(36).slice(2, 8);
|
||||
|
||||
/* ── Lifecycle ─────────────────────────────────────────── */
|
||||
|
||||
export async function mount(container) {
|
||||
tracker = new ResourceTracker(PAGE);
|
||||
root = buildShell();
|
||||
container.appendChild(root);
|
||||
bindEvents();
|
||||
await checkStatus();
|
||||
sysMsg(t('llm_chat.session_started'));
|
||||
}
|
||||
|
||||
export function unmount() {
|
||||
if (tracker) { tracker.cleanupAll(); tracker = null; }
|
||||
root = null;
|
||||
llmEnabled = false;
|
||||
orchMode = false;
|
||||
}
|
||||
|
||||
/* ── Shell ─────────────────────────────────────────────── */
|
||||
|
||||
function buildShell() {
|
||||
return el('div', { class: 'llmc-page' }, [
|
||||
|
||||
/* Header */
|
||||
el('div', { class: 'llmc-header' }, [
|
||||
el('span', { class: 'llmc-dot', id: 'llmc-dot' }),
|
||||
el('span', { class: 'llmc-title' }, ['BJORN / CHAT']),
|
||||
el('span', { class: 'llmc-status', id: 'llmc-status' }, [t('llm_chat.checking')]),
|
||||
el('button', { class: 'llmc-btn-ghost', id: 'llmc-orch-btn', title: t('llm_chat.orch_title') },
|
||||
[t('llm_chat.orch_log')]),
|
||||
el('button', { class: 'llmc-btn-ghost llmc-clear-btn', id: 'llmc-clear-btn' },
|
||||
[t('llm_chat.clear_history')]),
|
||||
el('button', { class: 'llmc-btn-ghost', id: 'llmc-cfg-btn', title: 'LLM Settings' },
|
||||
['\u2699']),
|
||||
]),
|
||||
|
||||
/* Messages */
|
||||
el('div', { class: 'llmc-messages', id: 'llmc-messages' }, [
|
||||
el('div', { class: 'llmc-disabled-msg', id: 'llmc-disabled-msg', style: 'display:none' }, [
|
||||
t('llm_chat.disabled_msg') + ' ',
|
||||
el('a', { href: '#/llm-config' }, [t('llm_chat.settings_link')]),
|
||||
'.',
|
||||
]),
|
||||
]),
|
||||
|
||||
/* Thinking */
|
||||
el('div', { class: 'llmc-thinking', id: 'llmc-thinking', style: 'display:none' }, [
|
||||
'▌ ', t('llm_chat.thinking'),
|
||||
]),
|
||||
|
||||
/* Input row */
|
||||
el('div', { class: 'llmc-input-row', id: 'llmc-input-row' }, [
|
||||
el('textarea', {
|
||||
class: 'llmc-input', id: 'llmc-input',
|
||||
placeholder: t('llm_chat.placeholder'),
|
||||
rows: '1',
|
||||
}),
|
||||
el('button', { class: 'llmc-send-btn', id: 'llmc-send-btn' }, [t('llm_chat.send')]),
|
||||
]),
|
||||
]);
|
||||
}
|
||||
|
||||
/* ── Events ────────────────────────────────────────────── */
|
||||
|
||||
function bindEvents() {
|
||||
const sendBtn = $('#llmc-send-btn', root);
|
||||
const clearBtn = $('#llmc-clear-btn', root);
|
||||
const orchBtn = $('#llmc-orch-btn', root);
|
||||
const input = $('#llmc-input', root);
|
||||
|
||||
const cfgBtn = $('#llmc-cfg-btn', root);
|
||||
|
||||
if (sendBtn) tracker.on(sendBtn, 'click', send);
|
||||
if (clearBtn) tracker.on(clearBtn, 'click', clearHistory);
|
||||
if (orchBtn) tracker.on(orchBtn, 'click', toggleOrchLog);
|
||||
if (cfgBtn) tracker.on(cfgBtn, 'click', () => { window.location.hash = '#/llm-config'; });
|
||||
|
||||
if (input) {
|
||||
tracker.on(input, 'keydown', (e) => {
|
||||
// Auto-resize
|
||||
input.style.height = 'auto';
|
||||
input.style.height = Math.min(input.scrollHeight, 120) + 'px';
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault();
|
||||
send();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/* ── Status ────────────────────────────────────────────── */
|
||||
|
||||
async function checkStatus() {
|
||||
try {
|
||||
const data = await api.get('/api/llm/status', { timeout: 5000, retries: 0 });
|
||||
if (!data) throw new Error('no data');
|
||||
|
||||
llmEnabled = data.enabled === true;
|
||||
const dot = $('#llmc-dot', root);
|
||||
const status = $('#llmc-status', root);
|
||||
const disMsg = $('#llmc-disabled-msg', root);
|
||||
const sendBtn = $('#llmc-send-btn', root);
|
||||
|
||||
if (!llmEnabled) {
|
||||
if (dot) dot.className = 'llmc-dot offline';
|
||||
if (status) status.textContent = t('llm_chat.disabled');
|
||||
if (disMsg) disMsg.style.display = '';
|
||||
if (sendBtn) sendBtn.disabled = true;
|
||||
} else {
|
||||
if (dot) dot.className = 'llmc-dot online';
|
||||
const backend = data.laruche_url
|
||||
? 'LaRuche @ ' + data.laruche_url
|
||||
: (data.backend || 'auto');
|
||||
if (status) status.textContent = t('llm_chat.online') + ' · ' + backend;
|
||||
if (disMsg) disMsg.style.display = 'none';
|
||||
if (sendBtn) sendBtn.disabled = false;
|
||||
}
|
||||
} catch {
|
||||
const status = $('#llmc-status', root);
|
||||
if (status) status.textContent = t('llm_chat.unavailable');
|
||||
}
|
||||
}
|
||||
|
||||
/* ── Chat ──────────────────────────────────────────────── */
|
||||
|
||||
async function send() {
|
||||
const input = $('#llmc-input', root);
|
||||
const sendBtn = $('#llmc-send-btn', root);
|
||||
if (!input) return;
|
||||
|
||||
const msg = input.value.trim();
|
||||
if (!msg) return;
|
||||
input.value = '';
|
||||
input.style.height = '44px';
|
||||
|
||||
appendMsg('user', msg);
|
||||
setThinking(true);
|
||||
if (sendBtn) sendBtn.disabled = true;
|
||||
|
||||
try {
|
||||
const data = await api.post('/api/llm/chat', { message: msg, session_id: sessionId });
|
||||
setThinking(false);
|
||||
if (data?.status === 'ok') {
|
||||
appendMsg('assistant', data.response);
|
||||
} else {
|
||||
sysMsg(t('llm_chat.error') + ': ' + (data?.message || 'unknown'));
|
||||
}
|
||||
} catch (e) {
|
||||
setThinking(false);
|
||||
sysMsg(t('llm_chat.net_error') + ': ' + e.message);
|
||||
} finally {
|
||||
if (sendBtn) sendBtn.disabled = !llmEnabled;
|
||||
}
|
||||
}
|
||||
|
||||
async function clearHistory() {
|
||||
await api.post('/api/llm/clear_history', { session_id: sessionId });
|
||||
const msgs = $('#llmc-messages', root);
|
||||
if (!msgs) return;
|
||||
empty(msgs);
|
||||
const disMsg = $('#llmc-disabled-msg', root);
|
||||
if (disMsg) msgs.appendChild(disMsg);
|
||||
sysMsg(t('llm_chat.history_cleared'));
|
||||
}
|
||||
|
||||
/* ── Orch log ──────────────────────────────────────────── */
|
||||
|
||||
async function toggleOrchLog() {
|
||||
orchMode = !orchMode;
|
||||
const orchBtn = $('#llmc-orch-btn', root);
|
||||
const inputRow = $('#llmc-input-row', root);
|
||||
const msgs = $('#llmc-messages', root);
|
||||
|
||||
if (orchMode) {
|
||||
if (orchBtn) { orchBtn.classList.add('active'); orchBtn.textContent = t('llm_chat.back_chat'); }
|
||||
if (inputRow) inputRow.style.display = 'none';
|
||||
if (msgs) empty(msgs);
|
||||
await loadOrchLog();
|
||||
} else {
|
||||
if (orchBtn) { orchBtn.classList.remove('active'); orchBtn.textContent = t('llm_chat.orch_log'); }
|
||||
if (inputRow) inputRow.style.display = '';
|
||||
if (msgs) empty(msgs);
|
||||
sysMsg(t('llm_chat.back_to_chat'));
|
||||
}
|
||||
}
|
||||
|
||||
async function loadOrchLog() {
|
||||
sysMsg(t('llm_chat.loading_log'));
|
||||
try {
|
||||
const data = await api.get('/api/llm/reasoning', { timeout: 10000, retries: 0 });
|
||||
const msgs = $('#llmc-messages', root);
|
||||
if (!msgs) return;
|
||||
empty(msgs);
|
||||
|
||||
if (!data?.messages?.length) {
|
||||
sysMsg(t('llm_chat.no_log'));
|
||||
return;
|
||||
}
|
||||
sysMsg(t('llm_chat.log_header') + ' — ' + data.count + ' message(s)');
|
||||
for (const m of data.messages) {
|
||||
appendMsg(m.role === 'user' ? 'user' : 'assistant', m.content || '');
|
||||
}
|
||||
} catch (e) {
|
||||
sysMsg(t('llm_chat.log_error') + ': ' + e.message);
|
||||
}
|
||||
}
|
||||
|
||||
/* ── Helpers ───────────────────────────────────────────── */
|
||||
|
||||
function appendMsg(role, text) {
|
||||
const msgs = $('#llmc-messages', root);
|
||||
if (!msgs) return;
|
||||
const labels = { user: 'YOU', assistant: 'BJORN' };
|
||||
const roleLabel = labels[role] || role.toUpperCase();
|
||||
const div = el('div', { class: 'llmc-msg ' + role }, [
|
||||
el('div', { class: 'llmc-msg-role' }, [roleLabel]),
|
||||
document.createTextNode(text),
|
||||
]);
|
||||
msgs.appendChild(div);
|
||||
msgs.scrollTop = msgs.scrollHeight;
|
||||
}
|
||||
|
||||
function sysMsg(text) {
|
||||
const msgs = $('#llmc-messages', root);
|
||||
if (!msgs) return;
|
||||
const div = el('div', { class: 'llmc-msg system' }, [text]);
|
||||
msgs.appendChild(div);
|
||||
msgs.scrollTop = msgs.scrollHeight;
|
||||
}
|
||||
|
||||
function setThinking(on) {
|
||||
const el = $('#llmc-thinking', root);
|
||||
if (el) el.style.display = on ? '' : 'none';
|
||||
}
|
||||
682
web/js/pages/llm-config.js
Normal file
682
web/js/pages/llm-config.js
Normal file
@@ -0,0 +1,682 @@
|
||||
/**
|
||||
* llm-config — LLM Bridge & MCP Server settings SPA page
|
||||
*/
|
||||
import { ResourceTracker } from '../core/resource-tracker.js';
|
||||
import { api } from '../core/api.js';
|
||||
import { el, $, empty, toast } from '../core/dom.js';
|
||||
import { t } from '../core/i18n.js';
|
||||
|
||||
const PAGE = 'llm-config';
|
||||
|
||||
const ALL_TOOLS = [
|
||||
'get_hosts', 'get_vulnerabilities', 'get_credentials',
|
||||
'get_action_history', 'get_status', 'run_action', 'query_db',
|
||||
];
|
||||
|
||||
/* ── State ─────────────────────────────────────────────── */
|
||||
|
||||
let tracker = null;
|
||||
let root = null;
|
||||
|
||||
/* ── Lifecycle ─────────────────────────────────────────── */
|
||||
|
||||
export async function mount(container) {
|
||||
tracker = new ResourceTracker(PAGE);
|
||||
root = buildShell();
|
||||
container.appendChild(root);
|
||||
bindEvents();
|
||||
await loadAll();
|
||||
}
|
||||
|
||||
export function unmount() {
|
||||
if (tracker) { tracker.cleanupAll(); tracker = null; }
|
||||
root = null;
|
||||
}
|
||||
|
||||
/* ── Shell ─────────────────────────────────────────────── */
|
||||
|
||||
function buildShell() {
|
||||
return el('div', { class: 'llmcfg-page' }, [
|
||||
|
||||
/* Page header */
|
||||
el('div', { class: 'llmcfg-header' }, [
|
||||
el('span', { class: 'llmcfg-title' }, ['BJORN / LLM & MCP SETTINGS']),
|
||||
el('a', { class: 'llmcfg-nav-link', href: '#/llm-chat' }, ['→ ' + t('nav.llm_chat')]),
|
||||
]),
|
||||
|
||||
el('div', { class: 'llmcfg-container' }, [
|
||||
|
||||
/* ── LLM Bridge section ──────────────────────────── */
|
||||
el('div', { class: 'llmcfg-section' }, [
|
||||
el('div', { class: 'llmcfg-section-title' }, [
|
||||
'LLM BRIDGE',
|
||||
el('span', { class: 'llmcfg-badge off', id: 'llm-badge' }, ['OFF']),
|
||||
]),
|
||||
el('div', { class: 'llmcfg-body' }, [
|
||||
|
||||
toggleRow('llm_enabled', t('llm_cfg.enable_bridge')),
|
||||
toggleRow('llm_comments_enabled', t('llm_cfg.epd_comments')),
|
||||
|
||||
fieldEl(t('llm_cfg.backend'), el('select', { id: 'llm_backend', class: 'llmcfg-select' }, [
|
||||
el('option', { value: 'auto' }, ['Auto (LaRuche → Ollama → API)']),
|
||||
el('option', { value: 'laruche' }, ['LaRuche only']),
|
||||
el('option', { value: 'ollama' }, ['Ollama only']),
|
||||
el('option', { value: 'api' }, ['External API only']),
|
||||
])),
|
||||
|
||||
subsectionTitle('LARUCHE / LAND'),
|
||||
toggleRow('llm_laruche_discovery', t('llm_cfg.laruche_discovery')),
|
||||
el('div', { class: 'llmcfg-discovery-row', id: 'laruche-discovery-status' }),
|
||||
fieldEl(t('llm_cfg.laruche_url'),
|
||||
el('div', { class: 'llmcfg-url-row' }, [
|
||||
el('input', { type: 'text', id: 'llm_laruche_url', class: 'llmcfg-input',
|
||||
placeholder: 'Auto-detected via mDNS or enter manually' }),
|
||||
el('button', { class: 'llmcfg-btn compact', id: 'laruche-use-discovered',
|
||||
style: 'display:none' }, ['Use']),
|
||||
])),
|
||||
fieldEl('LaRuche Model',
|
||||
el('div', { class: 'llmcfg-model-row' }, [
|
||||
el('select', { id: 'llm_laruche_model', class: 'llmcfg-select' }, [
|
||||
el('option', { value: '' }, ['Default (server decides)']),
|
||||
]),
|
||||
el('button', { class: 'llmcfg-btn compact', id: 'laruche-refresh-models' }, ['⟳ Refresh']),
|
||||
])),
|
||||
el('div', { class: 'llmcfg-laruche-default', id: 'laruche-default-model' }),
|
||||
|
||||
subsectionTitle('OLLAMA (LOCAL)'),
|
||||
fieldEl(t('llm_cfg.ollama_url'),
|
||||
el('input', { type: 'text', id: 'llm_ollama_url', class: 'llmcfg-input',
|
||||
placeholder: 'http://127.0.0.1:11434' })),
|
||||
fieldEl(t('llm_cfg.ollama_model'),
|
||||
el('div', { class: 'llmcfg-model-row' }, [
|
||||
el('select', { id: 'llm_ollama_model', class: 'llmcfg-select' }, [
|
||||
el('option', { value: '' }, ['Default']),
|
||||
]),
|
||||
el('button', { class: 'llmcfg-btn compact', id: 'ollama-refresh-models' }, ['⟳ Refresh']),
|
||||
])),
|
||||
|
||||
subsectionTitle('EXTERNAL API'),
|
||||
el('div', { class: 'llmcfg-row' }, [
|
||||
fieldEl(t('llm_cfg.provider'), el('select', { id: 'llm_api_provider', class: 'llmcfg-select' }, [
|
||||
el('option', { value: 'anthropic' }, ['Anthropic (Claude)']),
|
||||
el('option', { value: 'openai' }, ['OpenAI']),
|
||||
el('option', { value: 'openrouter' }, ['OpenRouter']),
|
||||
])),
|
||||
fieldEl(t('llm_cfg.api_model'),
|
||||
el('input', { type: 'text', id: 'llm_api_model', class: 'llmcfg-input',
|
||||
placeholder: 'claude-haiku-4-5-20251001' })),
|
||||
]),
|
||||
fieldEl(t('llm_cfg.api_key'),
|
||||
el('input', { type: 'password', id: 'llm_api_key', class: 'llmcfg-input',
|
||||
placeholder: t('llm_cfg.api_key_placeholder') })),
|
||||
fieldEl(t('llm_cfg.base_url'),
|
||||
el('input', { type: 'text', id: 'llm_api_base_url', class: 'llmcfg-input',
|
||||
placeholder: 'https://openrouter.ai/api' })),
|
||||
|
||||
el('div', { class: 'llmcfg-row' }, [
|
||||
fieldEl(t('llm_cfg.timeout'),
|
||||
el('input', { type: 'number', id: 'llm_timeout_s', class: 'llmcfg-input',
|
||||
min: '5', max: '120', value: '30' })),
|
||||
fieldEl(t('llm_cfg.max_tokens_chat'),
|
||||
el('input', { type: 'number', id: 'llm_max_tokens', class: 'llmcfg-input',
|
||||
min: '50', max: '4096', value: '500' })),
|
||||
fieldEl(t('llm_cfg.max_tokens_epd'),
|
||||
el('input', { type: 'number', id: 'llm_comment_max_tokens', class: 'llmcfg-input',
|
||||
min: '20', max: '200', value: '80' })),
|
||||
]),
|
||||
|
||||
el('div', { class: 'llmcfg-status-row', id: 'llm-status-row' }),
|
||||
|
||||
el('div', { class: 'llmcfg-actions' }, [
|
||||
el('button', { class: 'llmcfg-btn primary', id: 'llm-save-btn' }, [t('llm_cfg.save_llm')]),
|
||||
el('button', { class: 'llmcfg-btn', id: 'llm-test-btn' }, [t('llm_cfg.test_connection')]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
|
||||
/* ── LLM Orchestrator section ────────────────────── */
|
||||
el('div', { class: 'llmcfg-section' }, [
|
||||
el('div', { class: 'llmcfg-section-title' }, [
|
||||
'LLM ORCHESTRATOR',
|
||||
el('span', { class: 'llmcfg-badge off', id: 'orch-badge' }, ['OFF']),
|
||||
]),
|
||||
el('div', { class: 'llmcfg-body' }, [
|
||||
|
||||
fieldEl('Mode', el('select', { id: 'llm_orchestrator_mode', class: 'llmcfg-select' }, [
|
||||
el('option', { value: 'none' }, ['Disabled']),
|
||||
el('option', { value: 'advisor' }, ['Advisor (suggest 1 action per cycle)']),
|
||||
el('option', { value: 'autonomous' }, ['Autonomous (full agentic loop)']),
|
||||
])),
|
||||
|
||||
el('div', { class: 'llmcfg-row' }, [
|
||||
fieldEl('Cycle interval (s)',
|
||||
el('input', { type: 'number', id: 'llm_orchestrator_interval_s', class: 'llmcfg-input',
|
||||
min: '30', max: '600', value: '60' })),
|
||||
fieldEl('Max actions / cycle',
|
||||
el('input', { type: 'number', id: 'llm_orchestrator_max_actions', class: 'llmcfg-input',
|
||||
min: '1', max: '10', value: '3' })),
|
||||
]),
|
||||
|
||||
toggleRow('llm_orchestrator_log_reasoning', 'Log reasoning to chat history'),
|
||||
toggleRow('llm_orchestrator_skip_if_no_change', 'Skip cycle when nothing changed'),
|
||||
|
||||
el('div', { class: 'llmcfg-status-row', id: 'orch-status-row' }),
|
||||
|
||||
el('div', { class: 'llmcfg-actions' }, [
|
||||
el('button', { class: 'llmcfg-btn primary', id: 'orch-save-btn' }, ['SAVE ORCHESTRATOR']),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
|
||||
/* ── Personality & Prompts section ───────────────── */
|
||||
el('div', { class: 'llmcfg-section' }, [
|
||||
el('div', { class: 'llmcfg-section-title' }, ['PERSONALITY & PROMPTS']),
|
||||
el('div', { class: 'llmcfg-body' }, [
|
||||
|
||||
fieldEl('Operator Name',
|
||||
el('input', { type: 'text', id: 'llm_user_name', class: 'llmcfg-input',
|
||||
placeholder: 'Your name (Bjorn will address you)' })),
|
||||
fieldEl('About you',
|
||||
el('textarea', { id: 'llm_user_bio', class: 'llmcfg-textarea', rows: '2',
|
||||
placeholder: 'Brief description (e.g. security researcher, pentester, sysadmin...)' })),
|
||||
|
||||
fieldEl('Chat System Prompt',
|
||||
el('div', {}, [
|
||||
el('textarea', { id: 'llm_system_prompt_chat', class: 'llmcfg-textarea', rows: '4',
|
||||
placeholder: 'Loading default prompt...' }),
|
||||
el('button', { class: 'llmcfg-btn compact llmcfg-reset-btn', id: 'reset-prompt-chat' },
|
||||
['Reset to default']),
|
||||
])),
|
||||
|
||||
fieldEl('Comment System Prompt (EPD)',
|
||||
el('div', {}, [
|
||||
el('textarea', { id: 'llm_system_prompt_comment', class: 'llmcfg-textarea', rows: '3',
|
||||
placeholder: 'Loading default prompt...' }),
|
||||
el('button', { class: 'llmcfg-btn compact llmcfg-reset-btn', id: 'reset-prompt-comment' },
|
||||
['Reset to default']),
|
||||
])),
|
||||
|
||||
el('div', { class: 'llmcfg-actions' }, [
|
||||
el('button', { class: 'llmcfg-btn primary', id: 'prompts-save-btn' }, ['SAVE PERSONALITY']),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
|
||||
/* ── MCP Server section ──────────────────────────── */
|
||||
el('div', { class: 'llmcfg-section' }, [
|
||||
el('div', { class: 'llmcfg-section-title' }, [
|
||||
'MCP SERVER',
|
||||
el('span', { class: 'llmcfg-badge off', id: 'mcp-badge' }, ['OFF']),
|
||||
]),
|
||||
el('div', { class: 'llmcfg-body' }, [
|
||||
|
||||
toggleRow('mcp_enabled', t('llm_cfg.enable_mcp')),
|
||||
|
||||
el('div', { class: 'llmcfg-row' }, [
|
||||
fieldEl(t('llm_cfg.transport'), el('select', { id: 'mcp_transport', class: 'llmcfg-select' }, [
|
||||
el('option', { value: 'http' }, ['HTTP SSE (LAN accessible)']),
|
||||
el('option', { value: 'stdio' }, ['stdio (Claude Desktop)']),
|
||||
])),
|
||||
fieldEl(t('llm_cfg.mcp_port'),
|
||||
el('input', { type: 'number', id: 'mcp_port', class: 'llmcfg-input',
|
||||
min: '1024', max: '65535', value: '8765' })),
|
||||
]),
|
||||
|
||||
fieldEl(t('llm_cfg.exposed_tools'),
|
||||
el('div', { class: 'llmcfg-tools-grid', id: 'tools-grid' })),
|
||||
|
||||
el('div', { class: 'llmcfg-status-row', id: 'mcp-status-row' }),
|
||||
|
||||
el('div', { class: 'llmcfg-actions' }, [
|
||||
el('button', { class: 'llmcfg-btn primary', id: 'mcp-save-btn' }, [t('llm_cfg.save_mcp')]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
|
||||
]),
|
||||
]);
|
||||
}
|
||||
|
||||
/* ── Builder helpers ───────────────────────────────────── */
|
||||
|
||||
function toggleRow(id, label) {
|
||||
return el('div', { class: 'llmcfg-toggle-row' }, [
|
||||
el('span', { class: 'llmcfg-toggle-label' }, [label]),
|
||||
el('label', { class: 'llmcfg-toggle' }, [
|
||||
el('input', { type: 'checkbox', id }),
|
||||
el('span', { class: 'llmcfg-slider' }),
|
||||
]),
|
||||
]);
|
||||
}
|
||||
|
||||
function fieldEl(label, input) {
|
||||
return el('div', { class: 'llmcfg-field' }, [
|
||||
el('label', { class: 'llmcfg-label' }, [label]),
|
||||
input,
|
||||
]);
|
||||
}
|
||||
|
||||
function subsectionTitle(text) {
|
||||
return el('div', { class: 'llmcfg-subsection-title' }, [text]);
|
||||
}
|
||||
|
||||
/* ── Events ────────────────────────────────────────────── */
|
||||
|
||||
function bindEvents() {
|
||||
const saveLlmBtn = $('#llm-save-btn', root);
|
||||
const testLlmBtn = $('#llm-test-btn', root);
|
||||
const saveMcpBtn = $('#mcp-save-btn', root);
|
||||
const mcpToggle = $('#mcp_enabled', root);
|
||||
|
||||
const saveOrchBtn = $('#orch-save-btn', root);
|
||||
|
||||
if (saveLlmBtn) tracker.on(saveLlmBtn, 'click', saveLLM);
|
||||
if (testLlmBtn) tracker.on(testLlmBtn, 'click', testLLM);
|
||||
if (saveMcpBtn) tracker.on(saveMcpBtn, 'click', saveMCP);
|
||||
if (saveOrchBtn) tracker.on(saveOrchBtn, 'click', saveOrch);
|
||||
if (mcpToggle) tracker.on(mcpToggle, 'change', () => toggleMCP(mcpToggle.checked));
|
||||
|
||||
const savePromptsBtn = $('#prompts-save-btn', root);
|
||||
if (savePromptsBtn) tracker.on(savePromptsBtn, 'click', savePrompts);
|
||||
|
||||
const resetChat = $('#reset-prompt-chat', root);
|
||||
if (resetChat) tracker.on(resetChat, 'click', () => {
|
||||
const ta = $('#llm_system_prompt_chat', root);
|
||||
if (ta) { ta.value = ''; toast('Prompt reset — save to apply'); }
|
||||
});
|
||||
const resetComment = $('#reset-prompt-comment', root);
|
||||
if (resetComment) tracker.on(resetComment, 'click', () => {
|
||||
const ta = $('#llm_system_prompt_comment', root);
|
||||
if (ta) { ta.value = ''; toast('Prompt reset — save to apply'); }
|
||||
});
|
||||
|
||||
const larucheRefresh = $('#laruche-refresh-models', root);
|
||||
if (larucheRefresh) tracker.on(larucheRefresh, 'click', () => refreshModels('laruche'));
|
||||
|
||||
const ollamaRefresh = $('#ollama-refresh-models', root);
|
||||
if (ollamaRefresh) tracker.on(ollamaRefresh, 'click', () => refreshModels('ollama'));
|
||||
}
|
||||
|
||||
/* ── Data ──────────────────────────────────────────────── */
|
||||
|
||||
async function loadAll() {
|
||||
try {
|
||||
const [llmR, mcpR] = await Promise.all([
|
||||
api.get('/api/llm/config', { timeout: 8000 }),
|
||||
api.get('/api/mcp/status', { timeout: 8000 }),
|
||||
]);
|
||||
|
||||
if (llmR) applyLLMConfig(llmR);
|
||||
if (mcpR) applyMCPConfig(mcpR);
|
||||
|
||||
} catch (e) {
|
||||
toast('Load error: ' + e.message, 3000);
|
||||
}
|
||||
}
|
||||
|
||||
function applyLLMConfig(cfg) {
|
||||
const boolKeys = ['llm_enabled', 'llm_comments_enabled', 'llm_laruche_discovery'];
|
||||
const textKeys = ['llm_backend', 'llm_laruche_url', 'llm_ollama_url',
|
||||
'llm_api_provider', 'llm_api_model', 'llm_api_base_url',
|
||||
'llm_timeout_s', 'llm_max_tokens', 'llm_comment_max_tokens',
|
||||
'llm_user_name', 'llm_user_bio',
|
||||
'llm_system_prompt_chat', 'llm_system_prompt_comment'];
|
||||
|
||||
for (const k of boolKeys) {
|
||||
const el = $(('#' + k), root);
|
||||
if (el) el.checked = !!cfg[k];
|
||||
}
|
||||
for (const k of textKeys) {
|
||||
const el = $(('#' + k), root);
|
||||
if (el && cfg[k] !== undefined) el.value = cfg[k];
|
||||
}
|
||||
|
||||
// Set default prompts as placeholders
|
||||
const chatPromptEl = $('#llm_system_prompt_chat', root);
|
||||
if (chatPromptEl && cfg.llm_default_prompt_chat) {
|
||||
chatPromptEl.placeholder = cfg.llm_default_prompt_chat;
|
||||
}
|
||||
const commentPromptEl = $('#llm_system_prompt_comment', root);
|
||||
if (commentPromptEl && cfg.llm_default_prompt_comment) {
|
||||
commentPromptEl.placeholder = cfg.llm_default_prompt_comment;
|
||||
}
|
||||
|
||||
const badge = $('#llm-badge', root);
|
||||
if (badge) {
|
||||
badge.textContent = cfg.llm_enabled ? 'ON' : 'OFF';
|
||||
badge.className = 'llmcfg-badge ' + (cfg.llm_enabled ? 'on' : 'off');
|
||||
}
|
||||
|
||||
const statusRow = $('#llm-status-row', root);
|
||||
if (statusRow) {
|
||||
statusRow.textContent = cfg.llm_api_key_set
|
||||
? t('llm_cfg.api_key_set')
|
||||
: t('llm_cfg.api_key_not_set');
|
||||
}
|
||||
|
||||
// LaRuche mDNS discovery status
|
||||
const discRow = $('#laruche-discovery-status', root);
|
||||
const useBtn = $('#laruche-use-discovered', root);
|
||||
const urlEl = $('#llm_laruche_url', root);
|
||||
const discovered = cfg.laruche_discovered_url || '';
|
||||
|
||||
if (discRow) {
|
||||
if (discovered) {
|
||||
discRow.innerHTML = '';
|
||||
discRow.appendChild(el('span', { class: 'llmcfg-disc-found' },
|
||||
['\u2705 LaRuche discovered: ' + discovered]));
|
||||
} else if (cfg.laruche_discovery_active === false && cfg.llm_laruche_discovery) {
|
||||
discRow.innerHTML = '';
|
||||
discRow.appendChild(el('span', { class: 'llmcfg-disc-searching' },
|
||||
['\u23F3 mDNS scanning... no LaRuche node found yet']));
|
||||
} else if (!cfg.llm_laruche_discovery) {
|
||||
discRow.innerHTML = '';
|
||||
discRow.appendChild(el('span', { class: 'llmcfg-disc-off' },
|
||||
['\u26A0 mDNS discovery disabled']));
|
||||
}
|
||||
}
|
||||
|
||||
if (useBtn && urlEl) {
|
||||
if (discovered && urlEl.value !== discovered) {
|
||||
useBtn.style.display = '';
|
||||
useBtn.onclick = () => {
|
||||
urlEl.value = discovered;
|
||||
useBtn.style.display = 'none';
|
||||
toast('LaRuche URL applied — click Save to persist');
|
||||
};
|
||||
} else {
|
||||
useBtn.style.display = 'none';
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-populate empty URL field with discovered URL
|
||||
if (urlEl && discovered && !urlEl.value) {
|
||||
urlEl.value = discovered;
|
||||
}
|
||||
|
||||
// ── Model selectors ──
|
||||
// Set saved model values on the selects (even before refresh populates full list)
|
||||
for (const k of ['llm_laruche_model', 'llm_ollama_model']) {
|
||||
const sel = $(('#' + k), root);
|
||||
if (sel && cfg[k]) {
|
||||
// Ensure the saved value exists as an option
|
||||
if (!sel.querySelector('option[value="' + CSS.escape(cfg[k]) + '"]')) {
|
||||
sel.appendChild(el('option', { value: cfg[k] }, [cfg[k] + ' (saved)']));
|
||||
}
|
||||
sel.value = cfg[k];
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-fetch LaRuche models if we have a URL
|
||||
const larucheUrl = urlEl?.value || discovered;
|
||||
if (larucheUrl) {
|
||||
refreshModels('laruche').catch(() => {});
|
||||
}
|
||||
|
||||
// ── Orchestrator fields (included in same config response) ──
|
||||
const orchMode = $('#llm_orchestrator_mode', root);
|
||||
if (orchMode && cfg.llm_orchestrator_mode !== undefined) orchMode.value = cfg.llm_orchestrator_mode;
|
||||
|
||||
const orchInterval = $('#llm_orchestrator_interval_s', root);
|
||||
if (orchInterval && cfg.llm_orchestrator_interval_s !== undefined) orchInterval.value = cfg.llm_orchestrator_interval_s;
|
||||
|
||||
const orchMax = $('#llm_orchestrator_max_actions', root);
|
||||
if (orchMax && cfg.llm_orchestrator_max_actions !== undefined) orchMax.value = cfg.llm_orchestrator_max_actions;
|
||||
|
||||
for (const k of ['llm_orchestrator_log_reasoning', 'llm_orchestrator_skip_if_no_change']) {
|
||||
const cb = $(('#' + k), root);
|
||||
if (cb) cb.checked = !!cfg[k];
|
||||
}
|
||||
|
||||
const orchBadge = $('#orch-badge', root);
|
||||
if (orchBadge) {
|
||||
const mode = cfg.llm_orchestrator_mode || 'none';
|
||||
const label = mode === 'none' ? 'OFF' : mode.toUpperCase();
|
||||
orchBadge.textContent = label;
|
||||
orchBadge.className = 'llmcfg-badge ' + (mode === 'none' ? 'off' : 'on');
|
||||
}
|
||||
|
||||
const orchStatus = $('#orch-status-row', root);
|
||||
if (orchStatus) {
|
||||
const mode = cfg.llm_orchestrator_mode || 'none';
|
||||
if (mode === 'none') {
|
||||
orchStatus.textContent = 'Orchestrator disabled — LLM has no role in scheduling';
|
||||
} else if (mode === 'advisor') {
|
||||
orchStatus.textContent = 'Advisor mode — LLM suggests 1 action per cycle';
|
||||
} else {
|
||||
orchStatus.textContent = 'Autonomous mode — LLM runs full agentic loop every '
|
||||
+ (cfg.llm_orchestrator_interval_s || 60) + 's';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function applyMCPConfig(cfg) {
|
||||
const enabledEl = $('#mcp_enabled', root);
|
||||
const portEl = $('#mcp_port', root);
|
||||
const transEl = $('#mcp_transport', root);
|
||||
const badge = $('#mcp-badge', root);
|
||||
const statusRow = $('#mcp-status-row', root);
|
||||
|
||||
if (enabledEl) enabledEl.checked = !!cfg.enabled;
|
||||
if (portEl) portEl.value = cfg.port || 8765;
|
||||
if (transEl && cfg.transport) transEl.value = cfg.transport;
|
||||
|
||||
buildToolsGrid(cfg.allowed_tools || ALL_TOOLS);
|
||||
|
||||
const running = cfg.running;
|
||||
if (badge) {
|
||||
badge.textContent = running ? 'RUNNING' : (cfg.enabled ? 'ENABLED' : 'OFF');
|
||||
badge.className = 'llmcfg-badge ' + (running ? 'on' : 'off');
|
||||
}
|
||||
if (statusRow) {
|
||||
statusRow.textContent = running
|
||||
? t('llm_cfg.mcp_running') + ' ' + (cfg.port || 8765) + ' (' + (cfg.transport || 'http') + ')'
|
||||
: t('llm_cfg.mcp_stopped');
|
||||
}
|
||||
}
|
||||
|
||||
function buildToolsGrid(enabled) {
|
||||
const grid = $('#tools-grid', root);
|
||||
if (!grid) return;
|
||||
empty(grid);
|
||||
for (const name of ALL_TOOLS) {
|
||||
const label = el('label', { class: 'llmcfg-tool-item' }, [
|
||||
el('input', { type: 'checkbox', id: 'tool_' + name,
|
||||
checked: enabled.includes(name) ? 'checked' : undefined }),
|
||||
document.createTextNode(name),
|
||||
]);
|
||||
grid.appendChild(label);
|
||||
}
|
||||
}
|
||||
|
||||
function getSelectedTools() {
|
||||
return ALL_TOOLS.filter(n => $(('#tool_' + n), root)?.checked);
|
||||
}
|
||||
|
||||
/* ── Model Selector ────────────────────────────────────── */
|
||||
|
||||
async function refreshModels(backend) {
|
||||
const selectId = backend === 'laruche' ? 'llm_laruche_model' : 'llm_ollama_model';
|
||||
const selectEl = $(('#' + selectId), root);
|
||||
if (!selectEl) return;
|
||||
|
||||
toast('Fetching ' + backend + ' models…');
|
||||
try {
|
||||
const res = await api.get('/api/llm/models?backend=' + backend, { timeout: 15000 });
|
||||
if (res?.status === 'ok' && Array.isArray(res.models)) {
|
||||
populateModelSelect(selectEl, res.models, selectEl.value);
|
||||
toast(res.models.length + ' model(s) found');
|
||||
|
||||
// Show LaRuche default model info
|
||||
if (backend === 'laruche') {
|
||||
const infoEl = $('#laruche-default-model', root);
|
||||
if (infoEl) {
|
||||
if (res.default_model) {
|
||||
infoEl.innerHTML = '';
|
||||
infoEl.appendChild(el('span', { class: 'llmcfg-laruche-default-label' },
|
||||
['\u26A1 LaRuche default: ']));
|
||||
infoEl.appendChild(el('span', { class: 'llmcfg-laruche-default-value' },
|
||||
[res.default_model]));
|
||||
} else {
|
||||
infoEl.textContent = '';
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
toast('No models returned: ' + (res?.message || 'unknown error'));
|
||||
}
|
||||
} catch (e) {
|
||||
toast('Error fetching models: ' + e.message);
|
||||
}
|
||||
}
|
||||
|
||||
function populateModelSelect(selectEl, models, currentValue) {
|
||||
const prev = currentValue || selectEl.value || '';
|
||||
empty(selectEl);
|
||||
selectEl.appendChild(el('option', { value: '' }, ['Default (server decides)']));
|
||||
for (const m of models) {
|
||||
const name = m.name || '?';
|
||||
const sizeMB = m.size ? ' (' + (m.size / 1e9).toFixed(1) + 'G)' : '';
|
||||
selectEl.appendChild(el('option', { value: name }, [name + sizeMB]));
|
||||
}
|
||||
// Restore previous selection if it still exists
|
||||
if (prev) {
|
||||
selectEl.value = prev;
|
||||
// If the value didn't match any option, it resets to ''
|
||||
if (!selectEl.value && prev) {
|
||||
// Add it as a custom option so user doesn't lose their setting
|
||||
selectEl.appendChild(el('option', { value: prev }, [prev + ' (saved)']));
|
||||
selectEl.value = prev;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ── Actions ───────────────────────────────────────────── */
|
||||
|
||||
async function saveLLM() {
|
||||
const payload = {};
|
||||
|
||||
for (const k of ['llm_enabled', 'llm_comments_enabled', 'llm_laruche_discovery']) {
|
||||
const el = $(('#' + k), root);
|
||||
payload[k] = el ? el.checked : false;
|
||||
}
|
||||
for (const k of ['llm_backend', 'llm_laruche_url', 'llm_laruche_model',
|
||||
'llm_ollama_url', 'llm_ollama_model',
|
||||
'llm_api_provider', 'llm_api_model', 'llm_api_base_url']) {
|
||||
const el = $(('#' + k), root);
|
||||
if (el) payload[k] = el.value;
|
||||
}
|
||||
for (const k of ['llm_timeout_s', 'llm_max_tokens', 'llm_comment_max_tokens']) {
|
||||
const el = $(('#' + k), root);
|
||||
if (el) payload[k] = parseInt(el.value) || undefined;
|
||||
}
|
||||
const keyEl = $('#llm_api_key', root);
|
||||
if (keyEl?.value) payload.llm_api_key = keyEl.value;
|
||||
|
||||
try {
|
||||
const res = await api.post('/api/llm/config', payload);
|
||||
if (res?.status === 'ok') {
|
||||
toast(t('llm_cfg.saved_llm'));
|
||||
await loadAll();
|
||||
} else {
|
||||
toast(t('llm_cfg.error') + ': ' + res?.message);
|
||||
}
|
||||
} catch (e) {
|
||||
toast(t('llm_cfg.save_error') + ': ' + e.message);
|
||||
}
|
||||
}
|
||||
|
||||
async function savePrompts() {
|
||||
const payload = {};
|
||||
for (const k of ['llm_user_name', 'llm_user_bio', 'llm_system_prompt_chat', 'llm_system_prompt_comment']) {
|
||||
const el = $(('#' + k), root);
|
||||
if (el) payload[k] = el.value || '';
|
||||
}
|
||||
try {
|
||||
const res = await api.post('/api/llm/config', payload);
|
||||
if (res?.status === 'ok') {
|
||||
toast('Personality & prompts saved');
|
||||
await loadAll();
|
||||
} else {
|
||||
toast(t('llm_cfg.error') + ': ' + res?.message);
|
||||
}
|
||||
} catch (e) {
|
||||
toast(t('llm_cfg.save_error') + ': ' + e.message);
|
||||
}
|
||||
}
|
||||
|
||||
async function testLLM() {
|
||||
toast(t('llm_cfg.testing'));
|
||||
try {
|
||||
const res = await api.post('/api/llm/chat', { message: 'ping', session_id: 'test' });
|
||||
if (res?.status === 'ok') {
|
||||
toast('OK — ' + (res.response || '').slice(0, 60));
|
||||
} else {
|
||||
toast(t('llm_cfg.test_failed') + ': ' + (res?.message || 'no response'));
|
||||
}
|
||||
} catch (e) {
|
||||
toast(t('llm_cfg.error') + ': ' + e.message);
|
||||
}
|
||||
}
|
||||
|
||||
async function toggleMCP(enabled) {
|
||||
try {
|
||||
const res = await api.post('/api/mcp/toggle', { enabled });
|
||||
if (res?.status === 'ok') {
|
||||
toast(enabled ? t('llm_cfg.mcp_enabled') : t('llm_cfg.mcp_disabled'));
|
||||
await loadAll();
|
||||
} else {
|
||||
toast(t('llm_cfg.error') + ': ' + res?.message);
|
||||
}
|
||||
} catch (e) {
|
||||
toast(t('llm_cfg.error') + ': ' + e.message);
|
||||
}
|
||||
}
|
||||
|
||||
async function saveOrch() {
|
||||
const payload = {};
|
||||
const modeEl = $('#llm_orchestrator_mode', root);
|
||||
if (modeEl) payload.llm_orchestrator_mode = modeEl.value;
|
||||
|
||||
for (const k of ['llm_orchestrator_interval_s', 'llm_orchestrator_max_actions']) {
|
||||
const inp = $(('#' + k), root);
|
||||
if (inp) payload[k] = parseInt(inp.value) || undefined;
|
||||
}
|
||||
for (const k of ['llm_orchestrator_log_reasoning', 'llm_orchestrator_skip_if_no_change']) {
|
||||
const cb = $(('#' + k), root);
|
||||
if (cb) payload[k] = cb.checked;
|
||||
}
|
||||
|
||||
try {
|
||||
const res = await api.post('/api/llm/config', payload);
|
||||
if (res?.status === 'ok') {
|
||||
toast('Orchestrator config saved');
|
||||
await loadAll();
|
||||
} else {
|
||||
toast(t('llm_cfg.error') + ': ' + res?.message);
|
||||
}
|
||||
} catch (e) {
|
||||
toast(t('llm_cfg.save_error') + ': ' + e.message);
|
||||
}
|
||||
}
|
||||
|
||||
async function saveMCP() {
|
||||
const portEl = $('#mcp_port', root);
|
||||
const transEl = $('#mcp_transport', root);
|
||||
const payload = {
|
||||
allowed_tools: getSelectedTools(),
|
||||
port: parseInt(portEl?.value) || 8765,
|
||||
transport: transEl?.value || 'http',
|
||||
};
|
||||
try {
|
||||
const res = await api.post('/api/mcp/config', payload);
|
||||
if (res?.status === 'ok') {
|
||||
toast(t('llm_cfg.saved_mcp'));
|
||||
await loadAll();
|
||||
} else {
|
||||
toast(t('llm_cfg.error') + ': ' + res?.message);
|
||||
}
|
||||
} catch (e) {
|
||||
toast(t('llm_cfg.save_error') + ': ' + e.message);
|
||||
}
|
||||
}
|
||||
@@ -382,6 +382,35 @@ function startClock() {
|
||||
clockTimer = setInterval(updateCountdowns, 1000);
|
||||
}
|
||||
|
||||
/* ── origin badge resolver ── */
|
||||
function _resolveOrigin(r) {
|
||||
const md = r.metadata || {};
|
||||
const trigger = (r.trigger_source || md.trigger_source || '').toLowerCase();
|
||||
const method = (md.decision_method || '').toLowerCase();
|
||||
const origin = (md.decision_origin || '').toLowerCase();
|
||||
|
||||
// LLM orchestrator (autonomous or advisor)
|
||||
if (trigger === 'llm_autonomous' || origin === 'llm' || method === 'llm_autonomous')
|
||||
return { label: 'LLM', cls: 'llm' };
|
||||
if (trigger === 'llm_advisor' || method === 'llm_advisor')
|
||||
return { label: 'LLM Advisor', cls: 'llm' };
|
||||
// AI model (ML-based decision)
|
||||
if (method === 'ai_confirmed' || method === 'ai_boosted' || origin === 'ai_confirmed')
|
||||
return { label: 'AI', cls: 'ai' };
|
||||
// MCP (external tool call)
|
||||
if (trigger === 'mcp' || trigger === 'mcp_tool')
|
||||
return { label: 'MCP', cls: 'mcp' };
|
||||
// Manual (UI or API)
|
||||
if (trigger === 'ui' || trigger === 'manual' || trigger === 'api')
|
||||
return { label: 'Manual', cls: 'manual' };
|
||||
// Scheduler heuristic (default)
|
||||
if (trigger === 'scheduler' || trigger === 'trigger_event' || method === 'heuristic')
|
||||
return { label: 'Heuristic', cls: 'heuristic' };
|
||||
// Fallback: show trigger if known
|
||||
if (trigger) return { label: trigger, cls: 'heuristic' };
|
||||
return null;
|
||||
}
|
||||
|
||||
/* ── card ── */
|
||||
function cardEl(r) {
|
||||
const cs = r._computed_status;
|
||||
@@ -407,6 +436,12 @@ function cardEl(r) {
|
||||
el('span', { class: `badge status-${cs}` }, [cs]),
|
||||
]));
|
||||
|
||||
/* origin badge — shows who queued this action */
|
||||
const origin = _resolveOrigin(r);
|
||||
if (origin) {
|
||||
children.push(el('div', { class: 'originBadge origin-' + origin.cls }, [origin.label]));
|
||||
}
|
||||
|
||||
/* chips */
|
||||
const chips = [];
|
||||
if (r.hostname) chips.push(chipEl(r.hostname, 195));
|
||||
|
||||
@@ -81,6 +81,10 @@ function buildShell() {
|
||||
class: 'sentinel-toggle', id: 'sentinel-clear',
|
||||
style: 'padding:3px 8px;font-size:0.65rem',
|
||||
}, [t('sentinel.clearAll')]),
|
||||
el('button', {
|
||||
class: 'sentinel-toggle sentinel-ai-btn', id: 'sentinel-ai-summary',
|
||||
style: 'padding:3px 8px;font-size:0.65rem;display:none',
|
||||
}, ['\uD83E\uDDE0 AI Summary']),
|
||||
]),
|
||||
]),
|
||||
el('div', { class: 'sentinel-panel-body', id: 'sentinel-events' }, [
|
||||
@@ -218,6 +222,25 @@ function bindEvents() {
|
||||
saveDevice(devSave.dataset.devSave);
|
||||
return;
|
||||
}
|
||||
|
||||
// AI Analyze event
|
||||
const aiAnalyze = e.target.closest('[data-ai-analyze]');
|
||||
if (aiAnalyze) {
|
||||
analyzeEvent(parseInt(aiAnalyze.dataset.aiAnalyze));
|
||||
return;
|
||||
}
|
||||
|
||||
// AI Summary
|
||||
if (e.target.closest('#sentinel-ai-summary')) {
|
||||
summarizeEvents();
|
||||
return;
|
||||
}
|
||||
|
||||
// AI Generate rule
|
||||
if (e.target.closest('#sentinel-ai-gen-rule')) {
|
||||
generateRuleFromAI();
|
||||
return;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -314,6 +337,10 @@ function paintEvents() {
|
||||
if (!container) return;
|
||||
empty(container);
|
||||
|
||||
// Show AI Summary button when there are enough unread events
|
||||
const aiSumBtn = $('#sentinel-ai-summary', root);
|
||||
if (aiSumBtn) aiSumBtn.style.display = unreadCount > 3 ? '' : 'none';
|
||||
|
||||
if (events.length === 0) {
|
||||
container.appendChild(
|
||||
el('div', {
|
||||
@@ -338,6 +365,12 @@ function paintEvents() {
|
||||
el('span', { class: 'sentinel-event-title' }, [escapeHtml(ev.title)]),
|
||||
]),
|
||||
el('div', { style: 'display:flex;align-items:center;gap:6px;flex-shrink:0' }, [
|
||||
el('button', {
|
||||
class: 'sentinel-toggle sentinel-ai-btn',
|
||||
'data-ai-analyze': ev.id,
|
||||
style: 'padding:1px 6px;font-size:0.55rem',
|
||||
title: 'AI Analyze',
|
||||
}, ['\uD83E\uDDE0']),
|
||||
el('span', { class: 'sentinel-event-time' }, [formatTime(ev.timestamp)]),
|
||||
...(isUnread ? [
|
||||
el('button', {
|
||||
@@ -360,6 +393,7 @@ function paintEvents() {
|
||||
[ev.ip_address])
|
||||
] : []),
|
||||
]),
|
||||
el('div', { class: 'sentinel-ai-result', id: `ai-result-${ev.id}` }),
|
||||
]);
|
||||
container.appendChild(card);
|
||||
}
|
||||
@@ -382,12 +416,16 @@ function paintSidebar() {
|
||||
/* ── Rules ─────────────────────────────────────────────── */
|
||||
|
||||
function paintRules(container) {
|
||||
// Add rule button
|
||||
// Add rule button + AI generate
|
||||
container.appendChild(
|
||||
el('button', {
|
||||
class: 'sentinel-toggle', id: 'sentinel-add-rule',
|
||||
style: 'align-self:flex-start;margin-bottom:4px',
|
||||
}, ['+ ' + t('sentinel.addRule')])
|
||||
el('div', { style: 'display:flex;gap:6px;margin-bottom:4px;flex-wrap:wrap' }, [
|
||||
el('button', {
|
||||
class: 'sentinel-toggle', id: 'sentinel-add-rule',
|
||||
}, ['+ ' + t('sentinel.addRule')]),
|
||||
el('button', {
|
||||
class: 'sentinel-toggle sentinel-ai-btn', id: 'sentinel-ai-gen-rule',
|
||||
}, ['\uD83E\uDDE0 Generate Rule']),
|
||||
])
|
||||
);
|
||||
|
||||
if (rules.length === 0) {
|
||||
@@ -732,6 +770,81 @@ async function saveNotifiers() {
|
||||
} catch (err) { toast(err.message, 3000, 'error'); }
|
||||
}
|
||||
|
||||
/* ── AI Functions ──────────────────────────────────────── */
|
||||
|
||||
async function analyzeEvent(eventId) {
|
||||
const resultEl = $(`#ai-result-${eventId}`, root);
|
||||
if (!resultEl) return;
|
||||
|
||||
// Toggle: if already showing, hide
|
||||
if (resultEl.classList.contains('active')) {
|
||||
resultEl.classList.remove('active');
|
||||
return;
|
||||
}
|
||||
|
||||
resultEl.textContent = '\u23F3 Analyzing...';
|
||||
resultEl.classList.add('active');
|
||||
|
||||
try {
|
||||
const res = await api.post('/api/sentinel/analyze', { event_ids: [eventId] });
|
||||
if (res?.status === 'ok') {
|
||||
resultEl.textContent = res.analysis;
|
||||
} else {
|
||||
resultEl.textContent = '\u274C ' + (res?.message || 'Analysis failed');
|
||||
}
|
||||
} catch (e) {
|
||||
resultEl.textContent = '\u274C Error: ' + e.message;
|
||||
}
|
||||
}
|
||||
|
||||
async function summarizeEvents() {
|
||||
const btn = $('#sentinel-ai-summary', root);
|
||||
if (btn) btn.textContent = '\u23F3 Summarizing...';
|
||||
|
||||
try {
|
||||
const res = await api.post('/api/sentinel/summarize', {});
|
||||
if (res?.status === 'ok') {
|
||||
// Show summary at the top of the event feed
|
||||
const container = $('#sentinel-events', root);
|
||||
if (container) {
|
||||
const existing = container.querySelector('.sentinel-ai-summary');
|
||||
if (existing) existing.remove();
|
||||
const summary = el('div', { class: 'sentinel-ai-summary' }, [
|
||||
el('div', { style: 'font-weight:600;font-size:0.7rem;margin-bottom:4px;color:var(--acid)' },
|
||||
['\uD83E\uDDE0 AI Summary']),
|
||||
el('div', { style: 'font-size:0.7rem;white-space:pre-wrap' }, [res.summary]),
|
||||
]);
|
||||
container.insertBefore(summary, container.firstChild);
|
||||
}
|
||||
toast('Summary generated');
|
||||
} else {
|
||||
toast('Summary failed: ' + (res?.message || 'unknown'), 3000, 'error');
|
||||
}
|
||||
} catch (e) {
|
||||
toast('Error: ' + e.message, 3000, 'error');
|
||||
} finally {
|
||||
if (btn) btn.textContent = '\uD83E\uDDE0 AI Summary';
|
||||
}
|
||||
}
|
||||
|
||||
async function generateRuleFromAI() {
|
||||
const desc = prompt('Describe the rule you want (e.g. "alert when a new device joins my network"):');
|
||||
if (!desc || !desc.trim()) return;
|
||||
|
||||
toast('\u23F3 Generating rule...');
|
||||
try {
|
||||
const res = await api.post('/api/sentinel/suggest-rule', { description: desc.trim() });
|
||||
if (res?.status === 'ok' && res.rule) {
|
||||
showRuleEditor(res.rule);
|
||||
toast('Rule generated — review and save');
|
||||
} else {
|
||||
toast('Could not generate rule: ' + (res?.message || res?.raw || 'unknown'), 4000, 'error');
|
||||
}
|
||||
} catch (e) {
|
||||
toast('Error: ' + e.message, 3000, 'error');
|
||||
}
|
||||
}
|
||||
|
||||
/* ── Helpers ───────────────────────────────────────────── */
|
||||
|
||||
function formatEventType(type) {
|
||||
|
||||
@@ -156,19 +156,48 @@ class BluetoothUtils:
|
||||
self.adapter_props.Set("org.bluez.Adapter1", "Discoverable", dbus.Boolean(True))
|
||||
self.adapter_props.Set("org.bluez.Adapter1", "DiscoverableTimeout", dbus.UInt32(BT_DISCOVERABLE_TIMEOUT))
|
||||
|
||||
self.adapter_methods.StartDiscovery()
|
||||
time.sleep(BT_SCAN_DURATION_S)
|
||||
# StartDiscovery can fail if already running or adapter is busy
|
||||
discovery_started = False
|
||||
try:
|
||||
self.adapter_methods.StartDiscovery()
|
||||
discovery_started = True
|
||||
except dbus.exceptions.DBusException as e:
|
||||
err_name = e.get_dbus_name() if hasattr(e, 'get_dbus_name') else str(e)
|
||||
if "InProgress" in str(err_name) or "Busy" in str(err_name):
|
||||
self.logger.info("Discovery already in progress, continuing with existing scan")
|
||||
discovery_started = True
|
||||
else:
|
||||
# Try stopping and restarting
|
||||
self.logger.warning(f"StartDiscovery failed ({err_name}), attempting stop+restart")
|
||||
try:
|
||||
self.adapter_methods.StopDiscovery()
|
||||
time.sleep(0.5)
|
||||
self.adapter_methods.StartDiscovery()
|
||||
discovery_started = True
|
||||
except dbus.exceptions.DBusException as e2:
|
||||
self.logger.warning(f"Retry also failed ({e2}), returning cached devices")
|
||||
|
||||
if discovery_started:
|
||||
time.sleep(BT_SCAN_DURATION_S)
|
||||
|
||||
objects = self.manager_interface.GetManagedObjects()
|
||||
devices = []
|
||||
for path, ifaces in objects.items():
|
||||
if "org.bluez.Device1" in ifaces:
|
||||
dev = ifaces["org.bluez.Device1"]
|
||||
rssi = dev.get("RSSI", None)
|
||||
try:
|
||||
rssi = int(rssi) if rssi is not None else -999
|
||||
except (ValueError, TypeError):
|
||||
rssi = -999
|
||||
devices.append({
|
||||
"name": str(dev.get("Name", "Unknown")),
|
||||
"address": str(dev.get("Address", "")),
|
||||
"paired": bool(dev.get("Paired", False)),
|
||||
"trusted": bool(dev.get("Trusted", False)),
|
||||
"connected": bool(dev.get("Connected", False))
|
||||
"connected": bool(dev.get("Connected", False)),
|
||||
"rssi": rssi,
|
||||
"icon": str(dev.get("Icon", "")),
|
||||
})
|
||||
|
||||
try:
|
||||
|
||||
335
web_utils/llm_utils.py
Normal file
335
web_utils/llm_utils.py
Normal file
@@ -0,0 +1,335 @@
|
||||
# web_utils/llm_utils.py
|
||||
# HTTP endpoints for LLM chat, LLM bridge config, and MCP server config.
|
||||
# Follows the same pattern as all other web_utils classes in this project.
|
||||
|
||||
import json
|
||||
import uuid
|
||||
from typing import Any, Dict
|
||||
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="llm_utils.py", level=20)
|
||||
|
||||
_ALLOWED_TOOLS = [
|
||||
"get_hosts", "get_vulnerabilities", "get_credentials",
|
||||
"get_action_history", "get_status", "run_action", "query_db",
|
||||
]
|
||||
|
||||
|
||||
def _send_json(handler, data: Any, status: int = 200) -> None:
|
||||
body = json.dumps(data).encode("utf-8")
|
||||
handler.send_response(status)
|
||||
handler.send_header("Content-Type", "application/json")
|
||||
handler.send_header("Content-Length", str(len(body)))
|
||||
handler.end_headers()
|
||||
handler.wfile.write(body)
|
||||
|
||||
|
||||
class LLMUtils:
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# GET /api/llm/status
|
||||
# ------------------------------------------------------------------
|
||||
def get_llm_status(self, handler) -> None:
|
||||
"""Return current LLM bridge status."""
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
status = LLMBridge().status()
|
||||
except Exception as e:
|
||||
status = {"error": str(e), "enabled": False}
|
||||
_send_json(handler, status)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# POST /api/llm/chat {"message": "...", "session_id": "..."}
|
||||
# ------------------------------------------------------------------
|
||||
def handle_chat(self, data: Dict) -> Dict:
|
||||
"""Process a chat message and return the LLM response."""
|
||||
message = (data.get("message") or "").strip()
|
||||
if not message:
|
||||
return {"status": "error", "message": "Empty message"}
|
||||
|
||||
session_id = data.get("session_id") or "default"
|
||||
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
response = LLMBridge().chat(message, session_id=session_id)
|
||||
return {"status": "ok", "response": response or "(no response)", "session_id": session_id}
|
||||
except Exception as e:
|
||||
logger.error(f"Chat error: {e}")
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# POST /api/llm/clear_history {"session_id": "..."}
|
||||
# ------------------------------------------------------------------
|
||||
def clear_chat_history(self, data: Dict) -> Dict:
|
||||
session_id = data.get("session_id") or "default"
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
LLMBridge().clear_history(session_id)
|
||||
return {"status": "ok"}
|
||||
except Exception as e:
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# GET /api/mcp/status
|
||||
# ------------------------------------------------------------------
|
||||
def get_mcp_status(self, handler) -> None:
|
||||
"""Return current MCP server status."""
|
||||
try:
|
||||
import mcp_server
|
||||
status = mcp_server.server_status()
|
||||
except Exception as e:
|
||||
status = {"error": str(e), "enabled": False, "running": False}
|
||||
_send_json(handler, status)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# POST /api/mcp/toggle {"enabled": true/false}
|
||||
# ------------------------------------------------------------------
|
||||
def toggle_mcp(self, data: Dict) -> Dict:
|
||||
"""Enable or disable the MCP server."""
|
||||
enabled = bool(data.get("enabled", False))
|
||||
try:
|
||||
self.shared_data.config["mcp_enabled"] = enabled
|
||||
setattr(self.shared_data, "mcp_enabled", enabled)
|
||||
self.shared_data.save_config()
|
||||
|
||||
import mcp_server
|
||||
if enabled and not mcp_server.is_running():
|
||||
started = mcp_server.start()
|
||||
return {"status": "ok", "enabled": True, "started": started}
|
||||
elif not enabled:
|
||||
mcp_server.stop()
|
||||
return {"status": "ok", "enabled": False}
|
||||
return {"status": "ok", "enabled": enabled}
|
||||
except Exception as e:
|
||||
logger.error(f"MCP toggle error: {e}")
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# POST /api/mcp/config {"allowed_tools": [...], "port": 8765, ...}
|
||||
# ------------------------------------------------------------------
|
||||
def save_mcp_config(self, data: Dict) -> Dict:
|
||||
"""Save MCP server configuration."""
|
||||
try:
|
||||
cfg = self.shared_data.config
|
||||
|
||||
if "allowed_tools" in data:
|
||||
tools = [t for t in data["allowed_tools"] if t in _ALLOWED_TOOLS]
|
||||
cfg["mcp_allowed_tools"] = tools
|
||||
|
||||
if "port" in data:
|
||||
port = int(data["port"])
|
||||
if 1024 <= port <= 65535:
|
||||
cfg["mcp_port"] = port
|
||||
|
||||
if "transport" in data and data["transport"] in ("http", "stdio"):
|
||||
cfg["mcp_transport"] = data["transport"]
|
||||
|
||||
self.shared_data.save_config()
|
||||
return {"status": "ok", "config": {
|
||||
"mcp_enabled": cfg.get("mcp_enabled", False),
|
||||
"mcp_port": cfg.get("mcp_port", 8765),
|
||||
"mcp_transport": cfg.get("mcp_transport", "http"),
|
||||
"mcp_allowed_tools": cfg.get("mcp_allowed_tools", []),
|
||||
}}
|
||||
except Exception as e:
|
||||
logger.error(f"MCP config save error: {e}")
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# POST /api/llm/config {all llm_* keys}
|
||||
# ------------------------------------------------------------------
|
||||
def save_llm_config(self, data: Dict) -> Dict:
|
||||
"""Save LLM bridge configuration."""
|
||||
_llm_keys = {
|
||||
"llm_enabled", "llm_comments_enabled", "llm_comments_log", "llm_chat_enabled",
|
||||
"llm_backend", "llm_laruche_discovery", "llm_laruche_url", "llm_laruche_model",
|
||||
"llm_ollama_url", "llm_ollama_model",
|
||||
"llm_api_provider", "llm_api_key", "llm_api_model", "llm_api_base_url",
|
||||
"llm_timeout_s", "llm_max_tokens", "llm_comment_max_tokens",
|
||||
"llm_chat_history_size", "llm_chat_tools_enabled",
|
||||
# Orchestrator keys
|
||||
"llm_orchestrator_mode", "llm_orchestrator_interval_s",
|
||||
"llm_orchestrator_max_actions", "llm_orchestrator_allowed_actions",
|
||||
"llm_orchestrator_skip_if_no_change", "llm_orchestrator_log_reasoning",
|
||||
# Personality & prompt keys
|
||||
"llm_system_prompt_chat", "llm_system_prompt_comment",
|
||||
"llm_user_name", "llm_user_bio",
|
||||
}
|
||||
_int_keys = {
|
||||
"llm_timeout_s", "llm_max_tokens", "llm_comment_max_tokens",
|
||||
"llm_chat_history_size", "llm_orchestrator_interval_s",
|
||||
"llm_orchestrator_max_actions",
|
||||
}
|
||||
_bool_keys = {
|
||||
"llm_enabled", "llm_comments_enabled", "llm_comments_log", "llm_chat_enabled",
|
||||
"llm_laruche_discovery", "llm_chat_tools_enabled",
|
||||
"llm_orchestrator_skip_if_no_change", "llm_orchestrator_log_reasoning",
|
||||
}
|
||||
try:
|
||||
cfg = self.shared_data.config
|
||||
for key in _llm_keys:
|
||||
if key in data:
|
||||
value = data[key]
|
||||
if key in _int_keys:
|
||||
value = int(value)
|
||||
elif key in _bool_keys:
|
||||
value = bool(value)
|
||||
cfg[key] = value
|
||||
setattr(self.shared_data, key, value)
|
||||
|
||||
self.shared_data.save_config()
|
||||
self.shared_data.invalidate_config_cache()
|
||||
|
||||
# Restart discovery if URL/toggle changed
|
||||
if "llm_laruche_url" in data or "llm_laruche_discovery" in data:
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
bridge = LLMBridge()
|
||||
bridge._laruche_url = cfg.get("llm_laruche_url") or None
|
||||
if cfg.get("llm_laruche_discovery", True) and not bridge._discovery_active:
|
||||
bridge._start_laruche_discovery()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Notify orchestrator of mode change
|
||||
if "llm_orchestrator_mode" in data:
|
||||
try:
|
||||
from orchestrator import Orchestrator
|
||||
orch = getattr(self.shared_data, '_orchestrator_ref', None)
|
||||
if orch and hasattr(orch, 'llm_orchestrator'):
|
||||
orch.llm_orchestrator.restart_if_mode_changed()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return {"status": "ok"}
|
||||
except Exception as e:
|
||||
logger.error(f"LLM config save error: {e}")
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# GET /api/llm/models?backend=laruche|ollama
|
||||
# Returns available models from the specified backend.
|
||||
# ------------------------------------------------------------------
|
||||
def get_llm_models(self, handler, params: Dict = None) -> None:
|
||||
"""Return available models from LaRuche or Ollama."""
|
||||
backend = (params or {}).get("backend", "laruche")
|
||||
models = []
|
||||
laruche_default = None
|
||||
try:
|
||||
if backend == "laruche":
|
||||
import land_protocol
|
||||
# Get LaRuche URL from bridge discovery or config
|
||||
url = self.shared_data.config.get("llm_laruche_url", "")
|
||||
if not url:
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
bridge = LLMBridge()
|
||||
with bridge._laruche_lock:
|
||||
url = bridge._laruche_url or ""
|
||||
except Exception:
|
||||
pass
|
||||
if url:
|
||||
result_data = land_protocol.list_models(url, timeout=10)
|
||||
raw = result_data.get("models", []) if isinstance(result_data, dict) else result_data
|
||||
for m in raw:
|
||||
if isinstance(m, dict):
|
||||
models.append({
|
||||
"name": m.get("name", m.get("model", "?")),
|
||||
"size": m.get("size", 0),
|
||||
"modified": m.get("modified_at", ""),
|
||||
})
|
||||
elif isinstance(m, str):
|
||||
models.append({"name": m, "size": 0})
|
||||
# Extract default model from the same /models response
|
||||
if isinstance(result_data, dict):
|
||||
laruche_default = result_data.get("default_model")
|
||||
elif backend == "ollama":
|
||||
base = self.shared_data.config.get("llm_ollama_url", "http://127.0.0.1:11434").rstrip("/")
|
||||
import urllib.request
|
||||
req = urllib.request.Request(f"{base}/api/tags", method="GET")
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
body = json.loads(resp.read().decode())
|
||||
for m in body.get("models", []):
|
||||
models.append({
|
||||
"name": m.get("name", "?"),
|
||||
"size": m.get("size", 0),
|
||||
"modified": m.get("modified_at", ""),
|
||||
})
|
||||
except Exception as e:
|
||||
_send_json(handler, {"status": "error", "message": str(e), "models": []})
|
||||
return
|
||||
|
||||
result = {"status": "ok", "backend": backend, "models": models}
|
||||
if laruche_default:
|
||||
result["default_model"] = laruche_default
|
||||
|
||||
_send_json(handler, result)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# GET /api/llm/reasoning
|
||||
# Returns the llm_orchestrator chat session (reasoning log).
|
||||
# ------------------------------------------------------------------
|
||||
def get_llm_reasoning(self, handler) -> None:
|
||||
"""Return the LLM orchestrator reasoning session history."""
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
bridge = LLMBridge()
|
||||
with bridge._hist_lock:
|
||||
hist = list(bridge._chat_histories.get("llm_orchestrator", []))
|
||||
_send_json(handler, {"status": "ok", "messages": hist, "count": len(hist)})
|
||||
except Exception as e:
|
||||
_send_json(handler, {"status": "error", "message": str(e), "messages": [], "count": 0})
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# GET /api/llm/config
|
||||
# ------------------------------------------------------------------
|
||||
def get_llm_config(self, handler) -> None:
|
||||
"""Return current LLM config (api_key redacted) + live discovery state."""
|
||||
cfg = self.shared_data.config
|
||||
result = {k: cfg.get(k) for k in (
|
||||
"llm_enabled", "llm_comments_enabled", "llm_comments_log", "llm_chat_enabled",
|
||||
"llm_backend", "llm_laruche_discovery", "llm_laruche_url", "llm_laruche_model",
|
||||
"llm_ollama_url", "llm_ollama_model",
|
||||
"llm_api_provider", "llm_api_model", "llm_api_base_url",
|
||||
"llm_timeout_s", "llm_max_tokens", "llm_comment_max_tokens",
|
||||
"llm_chat_history_size", "llm_chat_tools_enabled",
|
||||
# Orchestrator
|
||||
"llm_orchestrator_mode", "llm_orchestrator_interval_s",
|
||||
"llm_orchestrator_max_actions", "llm_orchestrator_skip_if_no_change",
|
||||
"llm_orchestrator_log_reasoning",
|
||||
# Personality & prompts
|
||||
"llm_system_prompt_chat", "llm_system_prompt_comment",
|
||||
"llm_user_name", "llm_user_bio",
|
||||
)}
|
||||
result["llm_api_key_set"] = bool(cfg.get("llm_api_key", ""))
|
||||
|
||||
# Default prompts for placeholder display in the UI
|
||||
result["llm_default_prompt_chat"] = (
|
||||
"You are Bjorn, an autonomous network security AI assistant running on a Raspberry Pi. "
|
||||
"Current state: {hosts} hosts discovered, {vulns} vulnerabilities, {creds} credentials captured. "
|
||||
"Operation mode: {mode}. Current action: {status}. "
|
||||
"Answer security questions concisely and technically. "
|
||||
"You can discuss network topology, vulnerabilities, and suggest next steps. "
|
||||
"Use brief Norse references occasionally. Never break character."
|
||||
)
|
||||
result["llm_default_prompt_comment"] = (
|
||||
"You are Bjorn, a terse Norse-themed autonomous security AI. "
|
||||
"Reply with ONE sentence of at most 12 words as a status comment. "
|
||||
"Be cryptic, dark, and technical. No punctuation at the end."
|
||||
)
|
||||
|
||||
# Inject live mDNS discovery state so the UI can show it
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
bridge = LLMBridge()
|
||||
with bridge._laruche_lock:
|
||||
result["laruche_discovered_url"] = bridge._laruche_url or ""
|
||||
result["laruche_discovery_active"] = bridge._discovery_active
|
||||
except Exception:
|
||||
result["laruche_discovered_url"] = ""
|
||||
result["laruche_discovery_active"] = False
|
||||
|
||||
_send_json(handler, result)
|
||||
@@ -2,18 +2,21 @@
|
||||
"""
|
||||
Network utilities for WiFi/network operations.
|
||||
Handles WiFi scanning, connection, known networks management.
|
||||
Compatible with both legacy NM keyfiles and Trixie netplan.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
import json
|
||||
import subprocess
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
from typing import Any, Dict, Optional, List
|
||||
import logging
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="network_utils.py", level=logging.DEBUG)
|
||||
|
||||
|
||||
class NetworkUtils:
|
||||
"""Utilities for network and WiFi management."""
|
||||
|
||||
@@ -21,94 +24,107 @@ class NetworkUtils:
|
||||
self.logger = logger
|
||||
self.shared_data = shared_data
|
||||
|
||||
def get_known_wifi(self, handler):
|
||||
"""List known WiFi networks with priorities."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['nmcli', '-t', '-f', 'NAME,TYPE,AUTOCONNECT-PRIORITY', 'connection', 'show'],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
stdout = result.stdout
|
||||
self.logger.debug(f"nmcli connection show output:\n{stdout}")
|
||||
# ── helpers ───────────────────────────────────────────────────────
|
||||
|
||||
known_networks = []
|
||||
lines = stdout.strip().split('\n')
|
||||
for line in lines:
|
||||
@staticmethod
|
||||
def _run(cmd: list[str], **kw) -> subprocess.CompletedProcess:
|
||||
"""Run a command, returning CompletedProcess."""
|
||||
return subprocess.run(
|
||||
cmd, check=True, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE, text=True, **kw,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _json_response(handler, code: int, payload: dict):
|
||||
handler.send_response(code)
|
||||
handler.send_header("Content-type", "application/json")
|
||||
handler.end_headers()
|
||||
handler.wfile.write(json.dumps(payload).encode('utf-8'))
|
||||
|
||||
# ── known networks ───────────────────────────────────────────────
|
||||
|
||||
def get_known_wifi(self, handler):
|
||||
"""List known WiFi networks with priorities.
|
||||
|
||||
Uses nmcli terse output. On Trixie, netplan-generated profiles
|
||||
(named ``netplan-wlan0-*``) appear alongside user-created NM
|
||||
profiles — both are returned.
|
||||
"""
|
||||
try:
|
||||
result = self._run(
|
||||
['nmcli', '-t', '-f', 'NAME,TYPE,AUTOCONNECT-PRIORITY', 'connection', 'show']
|
||||
)
|
||||
self.logger.debug(f"nmcli connection show output:\n{result.stdout}")
|
||||
|
||||
known_networks: list[dict] = []
|
||||
for line in result.stdout.strip().splitlines():
|
||||
if not line.strip():
|
||||
continue
|
||||
parts = line.split(':')
|
||||
# nmcli -t uses ':' as delimiter — SSIDs with ':' are
|
||||
# escaped by nmcli (backslash-colon), so split from
|
||||
# the right to be safe: last field = priority,
|
||||
# second-to-last = type, rest = name.
|
||||
parts = line.rsplit(':', 2)
|
||||
if len(parts) == 3:
|
||||
name, conn_type, priority = parts
|
||||
name, conn_type, priority_str = parts
|
||||
elif len(parts) == 2:
|
||||
name, conn_type = parts
|
||||
priority = '0'
|
||||
self.logger.warning(f"Missing priority for connection {name}. Assigning priority 0.")
|
||||
priority_str = '0'
|
||||
else:
|
||||
self.logger.warning(f"Unexpected line format: {line}")
|
||||
continue
|
||||
|
||||
if conn_type.lower() in ['802-11-wireless', 'wireless', 'wifi']:
|
||||
try:
|
||||
priority_int = int(priority) if priority.isdigit() else 0
|
||||
except ValueError:
|
||||
priority_int = 0
|
||||
self.logger.warning(f"Non-numeric priority for {name}. Assigning priority 0.")
|
||||
known_networks.append({
|
||||
'ssid': name,
|
||||
'priority': priority_int
|
||||
})
|
||||
# Unescape nmcli backslash-colon
|
||||
name = name.replace('\\:', ':')
|
||||
|
||||
if conn_type.strip().lower() not in (
|
||||
'802-11-wireless', 'wireless', 'wifi',
|
||||
):
|
||||
continue
|
||||
|
||||
try:
|
||||
priority_int = int(priority_str.strip())
|
||||
except (ValueError, AttributeError):
|
||||
priority_int = 0
|
||||
|
||||
known_networks.append({
|
||||
'ssid': name.strip(),
|
||||
'priority': priority_int,
|
||||
})
|
||||
|
||||
self.logger.debug(f"Extracted known networks: {known_networks}")
|
||||
known_networks.sort(key=lambda x: x['priority'], reverse=True)
|
||||
self._json_response(handler, 200, {"known_networks": known_networks})
|
||||
|
||||
handler.send_response(200)
|
||||
handler.send_header("Content-type", "application/json")
|
||||
handler.end_headers()
|
||||
handler.wfile.write(json.dumps({"known_networks": known_networks}).encode('utf-8'))
|
||||
except subprocess.CalledProcessError as e:
|
||||
self.logger.error(f"Error getting known Wi-Fi networks: {e.stderr.strip()}")
|
||||
handler.send_response(500)
|
||||
handler.send_header("Content-type", "application/json")
|
||||
handler.end_headers()
|
||||
handler.wfile.write(json.dumps({"error": e.stderr.strip()}).encode('utf-8'))
|
||||
self._json_response(handler, 500, {"error": e.stderr.strip()})
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting known Wi-Fi networks: {e}")
|
||||
handler.send_response(500)
|
||||
handler.send_header("Content-type", "application/json")
|
||||
handler.end_headers()
|
||||
handler.wfile.write(json.dumps({"error": str(e)}).encode('utf-8'))
|
||||
self._json_response(handler, 500, {"error": str(e)})
|
||||
|
||||
def delete_known_wifi(self, data):
|
||||
"""Delete a known WiFi connection."""
|
||||
ssid = None
|
||||
ssid = data.get('ssid')
|
||||
try:
|
||||
ssid = data['ssid']
|
||||
result = subprocess.run(
|
||||
['sudo', 'nmcli', 'connection', 'delete', ssid],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
if not ssid:
|
||||
return {"status": "error", "message": "Missing SSID"}
|
||||
self._run(['sudo', 'nmcli', 'connection', 'delete', ssid])
|
||||
self.logger.info(f"Deleted Wi-Fi connection: {ssid}")
|
||||
return {"status": "success", "message": f"Network {ssid} deleted"}
|
||||
except subprocess.CalledProcessError as e:
|
||||
error_message = f"Error deleting Wi-Fi connection {ssid if ssid else 'unknown'}: {e.stderr.strip()}"
|
||||
self.logger.error(error_message)
|
||||
self.logger.error(f"Error deleting Wi-Fi connection {ssid}: {e.stderr.strip()}")
|
||||
return {"status": "error", "message": e.stderr.strip()}
|
||||
except Exception as e:
|
||||
error_message = f"Unexpected error deleting Wi-Fi connection {ssid if ssid else 'unknown'}: {e}"
|
||||
self.logger.error(error_message)
|
||||
self.logger.error(f"Unexpected error deleting Wi-Fi connection {ssid}: {e}")
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
def connect_known_wifi(self, data):
|
||||
"""Connect to a known WiFi network."""
|
||||
ssid = data.get('ssid', '')
|
||||
try:
|
||||
ssid = data['ssid']
|
||||
if not self.validate_network_configuration(ssid):
|
||||
raise Exception(f"Invalid or non-existent configuration for network '{ssid}'.")
|
||||
|
||||
result = subprocess.run(
|
||||
['sudo', 'nmcli', 'connection', 'up', ssid],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
if not self.check_connection_exists(ssid):
|
||||
return {"status": "error", "message": f"Network '{ssid}' not found in saved connections."}
|
||||
self._run(['sudo', 'nmcli', 'connection', 'up', ssid])
|
||||
self.logger.info(f"Connected to known Wi-Fi network: {ssid}")
|
||||
return {"status": "success", "message": f"Connected to {ssid}"}
|
||||
except subprocess.CalledProcessError as e:
|
||||
@@ -119,14 +135,20 @@ class NetworkUtils:
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
def update_wifi_priority(self, data):
|
||||
"""Update WiFi connection priority."""
|
||||
"""Update WiFi connection priority.
|
||||
|
||||
Works for both NM-native and netplan-generated profiles.
|
||||
For netplan profiles (prefixed ``netplan-``), nmcli modify
|
||||
writes a persistent override into
|
||||
/etc/NetworkManager/system-connections/.
|
||||
"""
|
||||
ssid = data.get('ssid', '')
|
||||
try:
|
||||
ssid = data['ssid']
|
||||
priority = int(data['priority'])
|
||||
result = subprocess.run(
|
||||
['sudo', 'nmcli', 'connection', 'modify', ssid, 'connection.autoconnect-priority', str(priority)],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
self._run([
|
||||
'sudo', 'nmcli', 'connection', 'modify', ssid,
|
||||
'connection.autoconnect-priority', str(priority),
|
||||
])
|
||||
self.logger.info(f"Priority updated for {ssid} to {priority}")
|
||||
return {"status": "success", "message": "Priority updated"}
|
||||
except subprocess.CalledProcessError as e:
|
||||
@@ -136,95 +158,122 @@ class NetworkUtils:
|
||||
self.logger.error(f"Unexpected error updating Wi-Fi priority: {e}")
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
# ── scanning ─────────────────────────────────────────────────────
|
||||
|
||||
def scan_wifi(self, handler):
|
||||
"""Scan for available WiFi networks."""
|
||||
"""Scan for available WiFi networks.
|
||||
|
||||
Uses ``nmcli -t`` (terse) output for reliable parsing.
|
||||
Signal is returned as a percentage 0-100.
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['sudo', 'nmcli', 'device', 'wifi', 'list'],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
# Trigger a rescan first (best-effort)
|
||||
subprocess.run(
|
||||
['sudo', 'nmcli', 'device', 'wifi', 'rescan'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True,
|
||||
)
|
||||
stdout = result.stdout
|
||||
networks = self.parse_scan_result(stdout)
|
||||
self.logger.info(f"Found {len(networks)} networks")
|
||||
|
||||
result = self._run([
|
||||
'sudo', 'nmcli', '-t', '-f',
|
||||
'SSID,SIGNAL,SECURITY,IN-USE',
|
||||
'device', 'wifi', 'list',
|
||||
])
|
||||
|
||||
networks = self._parse_terse_scan(result.stdout)
|
||||
current_ssid = self.get_current_ssid()
|
||||
self.logger.info(f"Current SSID: {current_ssid}")
|
||||
self.logger.info(f"Found {len(networks)} networks, current={current_ssid}")
|
||||
|
||||
handler.send_response(200)
|
||||
handler.send_header("Content-type", "application/json")
|
||||
handler.end_headers()
|
||||
handler.wfile.write(json.dumps({"networks": networks, "current_ssid": current_ssid}).encode('utf-8'))
|
||||
self._json_response(handler, 200, {
|
||||
"networks": networks,
|
||||
"current_ssid": current_ssid,
|
||||
})
|
||||
except subprocess.CalledProcessError as e:
|
||||
self.logger.error(f"Error scanning Wi-Fi networks: {e.stderr.strip()}")
|
||||
handler.send_response(500)
|
||||
handler.send_header("Content-type", "application/json")
|
||||
handler.end_headers()
|
||||
handler.wfile.write(json.dumps({"error": e.stderr.strip()}).encode('utf-8'))
|
||||
self._json_response(handler, 500, {"error": e.stderr.strip()})
|
||||
|
||||
def parse_scan_result(self, scan_output):
|
||||
"""Parse nmcli scan output."""
|
||||
networks = []
|
||||
lines = scan_output.split('\n')
|
||||
headers = []
|
||||
for idx, line in enumerate(lines):
|
||||
if line.startswith("IN-USE"):
|
||||
headers = re.split(r'\s{2,}', line)
|
||||
@staticmethod
|
||||
def _parse_terse_scan(output: str) -> list[dict]:
|
||||
"""Parse ``nmcli -t -f SSID,SIGNAL,SECURITY,IN-USE device wifi list``.
|
||||
|
||||
Terse output uses ':' as separator. SSIDs containing ':'
|
||||
are escaped by nmcli as ``\\:``.
|
||||
Returns a deduplicated list sorted by signal descending.
|
||||
"""
|
||||
seen: dict[str, dict] = {}
|
||||
for line in output.strip().splitlines():
|
||||
if not line.strip():
|
||||
continue
|
||||
if headers and line.strip():
|
||||
fields = re.split(r'\s{2,}', line)
|
||||
if len(fields) >= len(headers):
|
||||
network = dict(zip(headers, fields))
|
||||
ssid = network.get('SSID', '')
|
||||
signal_level = int(network.get('SIGNAL', '0'))
|
||||
security = network.get('SECURITY', '')
|
||||
networks.append({
|
||||
'ssid': ssid,
|
||||
'signal_level': signal_level,
|
||||
'security': security
|
||||
})
|
||||
return networks
|
||||
|
||||
def get_current_ssid(self):
|
||||
# Split from the right: IN-USE (last), SECURITY, SIGNAL, rest=SSID
|
||||
# IN-USE is '*' or '' — always one char field at the end
|
||||
parts = line.rsplit(':', 3)
|
||||
if len(parts) < 4:
|
||||
continue
|
||||
|
||||
raw_ssid, signal_str, security, in_use = parts
|
||||
|
||||
# Unescape nmcli backslash-colon in SSID
|
||||
ssid = raw_ssid.replace('\\:', ':').strip()
|
||||
if not ssid:
|
||||
continue
|
||||
|
||||
try:
|
||||
signal = int(signal_str.strip())
|
||||
except (ValueError, AttributeError):
|
||||
signal = 0
|
||||
|
||||
# Normalize security string
|
||||
security = security.strip()
|
||||
if not security or security == '--':
|
||||
security = 'Open'
|
||||
|
||||
# Keep the strongest signal per SSID
|
||||
if ssid not in seen or signal > seen[ssid]['signal']:
|
||||
seen[ssid] = {
|
||||
'ssid': ssid,
|
||||
'signal': signal,
|
||||
'security': security,
|
||||
'in_use': in_use.strip() == '*',
|
||||
}
|
||||
|
||||
result = sorted(seen.values(), key=lambda n: n['signal'], reverse=True)
|
||||
return result
|
||||
|
||||
def get_current_ssid(self) -> Optional[str]:
|
||||
"""Get currently connected SSID."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['nmcli', '-t', '-f', 'active,ssid', 'dev', 'wifi'],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
lines = result.stdout.strip().split('\n')
|
||||
for line in lines:
|
||||
active, ssid = line.split(':', 1)
|
||||
if active == 'yes':
|
||||
return ssid
|
||||
result = self._run(['nmcli', '-t', '-f', 'active,ssid', 'dev', 'wifi'])
|
||||
for line in result.stdout.strip().splitlines():
|
||||
parts = line.split(':', 1)
|
||||
if len(parts) == 2 and parts[0] == 'yes':
|
||||
return parts[1]
|
||||
return None
|
||||
except subprocess.CalledProcessError as e:
|
||||
self.logger.error(f"Error getting current SSID: {e.stderr.strip()}")
|
||||
return None
|
||||
|
||||
def connect_wifi(self, data):
|
||||
"""Connect to WiFi network (new or existing)."""
|
||||
try:
|
||||
ssid = data['ssid']
|
||||
password = data.get('password', '')
|
||||
# ── connect ──────────────────────────────────────────────────────
|
||||
|
||||
def connect_wifi(self, data):
|
||||
"""Connect to WiFi network (new or existing).
|
||||
|
||||
On Trixie, ``nmcli device wifi connect`` creates a persistent
|
||||
NM keyfile in /etc/NetworkManager/system-connections/,
|
||||
which survives reboots even when netplan manages the initial
|
||||
Wi-Fi profile.
|
||||
"""
|
||||
ssid = data.get('ssid', '')
|
||||
password = data.get('password', '')
|
||||
try:
|
||||
if self.check_connection_exists(ssid):
|
||||
result = subprocess.run(
|
||||
['sudo', 'nmcli', 'connection', 'up', ssid],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
return {"status": "success", "message": f"Connected to {ssid}"}
|
||||
else:
|
||||
if password:
|
||||
result = subprocess.run(
|
||||
['sudo', 'nmcli', 'device', 'wifi', 'connect', ssid, 'password', password],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
else:
|
||||
result = subprocess.run(
|
||||
['sudo', 'nmcli', 'device', 'wifi', 'connect', ssid],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
self._run(['sudo', 'nmcli', 'connection', 'up', ssid])
|
||||
return {"status": "success", "message": f"Connected to {ssid}"}
|
||||
|
||||
cmd = ['sudo', 'nmcli', 'device', 'wifi', 'connect', ssid]
|
||||
if password:
|
||||
cmd += ['password', password]
|
||||
self._run(cmd)
|
||||
return {"status": "success", "message": f"Connected to {ssid}"}
|
||||
except subprocess.CalledProcessError as e:
|
||||
self.logger.error(f"Error connecting to network {ssid}: {e.stderr.strip()}")
|
||||
return {"status": "error", "message": e.stderr.strip()}
|
||||
@@ -232,127 +281,216 @@ class NetworkUtils:
|
||||
self.logger.error(f"Error in connect_wifi: {e}")
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
def check_connection_exists(self, ssid):
|
||||
"""Check if a WiFi connection already exists."""
|
||||
def check_connection_exists(self, ssid: str) -> bool:
|
||||
"""Check if a WiFi connection profile exists (exact match)."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['nmcli', '-t', '-f', 'NAME', 'connection', 'show'],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
connections = result.stdout.strip().split('\n')
|
||||
return ssid in connections
|
||||
except subprocess.CalledProcessError as e:
|
||||
self.logger.error(f"Error checking existing connections: {e.stderr.strip()}")
|
||||
result = self._run(['nmcli', '-t', '-f', 'NAME', 'connection', 'show'])
|
||||
for name in result.stdout.strip().splitlines():
|
||||
# nmcli escapes ':' in names with backslash
|
||||
if name.replace('\\:', ':').strip() == ssid:
|
||||
return True
|
||||
return False
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
def validate_network_configuration(self, ssid):
|
||||
"""Validate network configuration in NetworkManager."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['nmcli', '-t', '-f', 'NAME,UUID,TYPE,AUTOCONNECT', 'connection', 'show'],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
connections = result.stdout.strip().split('\n')
|
||||
for conn in connections:
|
||||
if ssid in conn:
|
||||
self.logger.info(f"Network {ssid} validated in NetworkManager.")
|
||||
return True
|
||||
self.logger.warning(f"Network {ssid} not found in NetworkManager.")
|
||||
return False
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error validating network {ssid}: {e}")
|
||||
return False
|
||||
def validate_network_configuration(self, ssid: str) -> bool:
|
||||
"""Validate that a WiFi connection profile exists (exact match)."""
|
||||
return self.check_connection_exists(ssid)
|
||||
|
||||
# ── potfile import ───────────────────────────────────────────────
|
||||
|
||||
def import_potfiles(self, data=None):
|
||||
"""Import WiFi credentials from .pot/.potfile files."""
|
||||
"""Import WiFi credentials from .pot/.potfile files.
|
||||
|
||||
Creates NM connection profiles via nmcli — these are stored
|
||||
in /etc/NetworkManager/system-connections/ and persist across
|
||||
reboots on both legacy and Trixie builds.
|
||||
"""
|
||||
try:
|
||||
potfiles_folder = self.shared_data.potfiles_dir
|
||||
import glob
|
||||
potfile_paths = glob.glob(f"{potfiles_folder}/*.pot") + glob.glob(f"{potfiles_folder}/*.potfile")
|
||||
potfile_paths = (
|
||||
glob.glob(f"{potfiles_folder}/*.pot")
|
||||
+ glob.glob(f"{potfiles_folder}/*.potfile")
|
||||
)
|
||||
|
||||
networks_added = []
|
||||
networks_added: list[str] = []
|
||||
networks_skipped: list[str] = []
|
||||
networks_failed: list[str] = []
|
||||
DEFAULT_PRIORITY = 5
|
||||
|
||||
for potfile_path in potfile_paths:
|
||||
with open(potfile_path, 'r') as potfile:
|
||||
for line in potfile:
|
||||
line = line.strip()
|
||||
if ':' not in line:
|
||||
self.logger.warning(f"Ignoring malformed line in {potfile_path}: {line}")
|
||||
continue
|
||||
|
||||
if line.startswith('$WPAPSK$') and '#' in line:
|
||||
try:
|
||||
ssid_hash_part, password = line.split(':', 1)
|
||||
ssid = ssid_hash_part.split('#')[0].replace('$WPAPSK$', '')
|
||||
except ValueError:
|
||||
self.logger.warning(f"Failed to parse WPAPSK line in {potfile_path}: {line}")
|
||||
try:
|
||||
with open(potfile_path, 'r', errors='replace') as potfile:
|
||||
for line in potfile:
|
||||
line = line.strip()
|
||||
if not line or ':' not in line:
|
||||
continue
|
||||
elif len(line.split(':')) == 4:
|
||||
try:
|
||||
_, _, ssid, password = line.split(':')
|
||||
except ValueError:
|
||||
self.logger.warning(f"Failed to parse custom line in {potfile_path}: {line}")
|
||||
|
||||
ssid, password = self._parse_potfile_line(line)
|
||||
if not ssid or not password:
|
||||
continue
|
||||
else:
|
||||
self.logger.warning(f"Unknown format in {potfile_path}: {line}")
|
||||
continue
|
||||
|
||||
if ssid and password:
|
||||
if not self.check_connection_exists(ssid):
|
||||
try:
|
||||
subprocess.run(
|
||||
['sudo', 'nmcli', 'connection', 'add', 'type', 'wifi',
|
||||
'con-name', ssid, 'ifname', '*', 'ssid', ssid,
|
||||
'wifi-sec.key-mgmt', 'wpa-psk', 'wifi-sec.psk', password,
|
||||
'connection.autoconnect', 'yes',
|
||||
'connection.autoconnect-priority', str(DEFAULT_PRIORITY)],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
networks_added.append(ssid)
|
||||
self.logger.info(f"Imported network {ssid} from {potfile_path}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
self.logger.error(f"Failed to add network {ssid}: {e.stderr.strip()}")
|
||||
else:
|
||||
self.logger.info(f"Network {ssid} already exists. Skipping.")
|
||||
else:
|
||||
self.logger.warning(f"Incomplete data in {potfile_path}: {line}")
|
||||
if self.check_connection_exists(ssid):
|
||||
networks_skipped.append(ssid)
|
||||
continue
|
||||
|
||||
return {"status": "success", "networks_added": networks_added}
|
||||
try:
|
||||
self._run([
|
||||
'sudo', 'nmcli', 'connection', 'add',
|
||||
'type', 'wifi',
|
||||
'con-name', ssid,
|
||||
'ifname', '*',
|
||||
'ssid', ssid,
|
||||
'wifi-sec.key-mgmt', 'wpa-psk',
|
||||
'wifi-sec.psk', password,
|
||||
'connection.autoconnect', 'yes',
|
||||
'connection.autoconnect-priority', str(DEFAULT_PRIORITY),
|
||||
])
|
||||
networks_added.append(ssid)
|
||||
self.logger.info(f"Imported network {ssid} from {potfile_path}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
networks_failed.append(ssid)
|
||||
self.logger.error(f"Failed to add network {ssid}: {e.stderr.strip()}")
|
||||
except OSError as e:
|
||||
self.logger.error(f"Failed to read potfile {potfile_path}: {e}")
|
||||
|
||||
return {
|
||||
"status": "success",
|
||||
"networks_added": networks_added,
|
||||
"imported": len(networks_added),
|
||||
"skipped": len(networks_skipped),
|
||||
"failed": len(networks_failed),
|
||||
}
|
||||
except Exception as e:
|
||||
self.logger.error(f"Unexpected error importing potfiles: {e}")
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
@staticmethod
|
||||
def _parse_potfile_line(line: str) -> tuple[str, str]:
|
||||
"""Parse a single potfile line, returning (ssid, password) or ('', '')."""
|
||||
# Format 1: $WPAPSK$SSID#hash:password
|
||||
if line.startswith('$WPAPSK$') and '#' in line:
|
||||
try:
|
||||
ssid_hash_part, password = line.split(':', 1)
|
||||
ssid = ssid_hash_part.split('#')[0].replace('$WPAPSK$', '')
|
||||
return ssid.strip(), password.strip()
|
||||
except ValueError:
|
||||
return '', ''
|
||||
|
||||
# Format 2: MAC:MAC:SSID:password (4 colon-separated fields)
|
||||
parts = line.split(':')
|
||||
if len(parts) == 4:
|
||||
return parts[2].strip(), parts[3].strip()
|
||||
|
||||
# Format 3: SSID:password (2 colon-separated fields)
|
||||
if len(parts) == 2:
|
||||
return parts[0].strip(), parts[1].strip()
|
||||
|
||||
return '', ''
|
||||
|
||||
# ── preconfigured file management (legacy compat) ────────────────
|
||||
|
||||
def delete_preconfigured_file(self, handler):
|
||||
"""Delete the legacy preconfigured.nmconnection file.
|
||||
|
||||
On Trixie this file typically does not exist (Wi-Fi is managed
|
||||
by netplan). The endpoint returns 200/success even if the file
|
||||
is missing to avoid breaking the frontend.
|
||||
"""
|
||||
path = '/etc/NetworkManager/system-connections/preconfigured.nmconnection'
|
||||
try:
|
||||
os.remove('/etc/NetworkManager/system-connections/preconfigured.nmconnection')
|
||||
handler.send_response(200)
|
||||
handler.send_header("Content-type", "application/json")
|
||||
handler.end_headers()
|
||||
handler.wfile.write(json.dumps({"status": "success"}).encode('utf-8'))
|
||||
except FileNotFoundError:
|
||||
handler.send_response(404)
|
||||
handler.send_header("Content-type", "application/json")
|
||||
handler.end_headers()
|
||||
handler.wfile.write(json.dumps({"status": "error", "message": "Fichier introuvable"}).encode('utf-8'))
|
||||
if os.path.exists(path):
|
||||
os.remove(path)
|
||||
self.logger.info("Deleted preconfigured.nmconnection")
|
||||
else:
|
||||
self.logger.info("preconfigured.nmconnection not found (Trixie/netplan — this is normal)")
|
||||
self._json_response(handler, 200, {"status": "success"})
|
||||
except Exception as e:
|
||||
handler.send_response(500)
|
||||
handler.send_header("Content-type", "application/json")
|
||||
handler.end_headers()
|
||||
handler.wfile.write(json.dumps({"status": "error", "message": str(e)}).encode('utf-8'))
|
||||
self.logger.error(f"Error deleting preconfigured file: {e}")
|
||||
self._json_response(handler, 500, {"status": "error", "message": str(e)})
|
||||
|
||||
def create_preconfigured_file(self, handler):
|
||||
"""Create a preconfigured.nmconnection file (legacy compat).
|
||||
|
||||
On Trixie this is a no-op: Wi-Fi is managed by netplan.
|
||||
Returns success regardless to avoid breaking the frontend.
|
||||
"""
|
||||
self.logger.warning("create_preconfigured_file called — no-op on Trixie/netplan builds")
|
||||
self._json_response(handler, 200, {
|
||||
"status": "success",
|
||||
"message": "No action needed on netplan-managed builds",
|
||||
})
|
||||
|
||||
# ── potfile upload ────────────────────────────────────────────────
|
||||
|
||||
def upload_potfile(self, handler):
|
||||
"""Upload a .pot/.potfile file to the potfiles directory.
|
||||
|
||||
Accepts multipart/form-data with a 'potfile' field.
|
||||
Saves to shared_data.potfiles_dir.
|
||||
Manual multipart parsing — no cgi module (removed in Python 3.13).
|
||||
"""
|
||||
try:
|
||||
with open('/etc/NetworkManager/system-connections/preconfigured.nmconnection', 'w') as f:
|
||||
f.write('Exemple de contenu') # Ajoutez le contenu par défaut
|
||||
handler.send_response(200)
|
||||
handler.send_header("Content-type", "application/json")
|
||||
handler.end_headers()
|
||||
handler.wfile.write(json.dumps({"status": "success"}).encode('utf-8'))
|
||||
content_type = handler.headers.get("Content-Type", "")
|
||||
if "multipart/form-data" not in content_type:
|
||||
self._json_response(handler, 400, {
|
||||
"status": "error",
|
||||
"message": "Content-Type must be multipart/form-data",
|
||||
})
|
||||
return
|
||||
|
||||
boundary = content_type.split("=")[1].encode()
|
||||
content_length = int(handler.headers.get("Content-Length", 0))
|
||||
body = handler.rfile.read(content_length)
|
||||
parts = body.split(b"--" + boundary)
|
||||
|
||||
filename = None
|
||||
file_data = None
|
||||
|
||||
for part in parts:
|
||||
if b"Content-Disposition" not in part:
|
||||
continue
|
||||
if b'name="potfile"' not in part:
|
||||
continue
|
||||
if b"filename=" not in part:
|
||||
continue
|
||||
|
||||
headers_raw, data = part.split(b"\r\n\r\n", 1)
|
||||
headers_str = headers_raw.decode(errors="replace")
|
||||
match = re.search(r'filename="(.+?)"', headers_str)
|
||||
if match:
|
||||
filename = os.path.basename(match.group(1))
|
||||
# Strip trailing boundary markers
|
||||
file_data = data.rstrip(b"\r\n--").rstrip(b"\r\n")
|
||||
break
|
||||
|
||||
if not filename or file_data is None:
|
||||
self._json_response(handler, 400, {
|
||||
"status": "error",
|
||||
"message": "No potfile provided",
|
||||
})
|
||||
return
|
||||
|
||||
# Sanitise filename
|
||||
safe_name = "".join(
|
||||
c for c in filename if c.isalnum() or c in ".-_"
|
||||
) or "uploaded.potfile"
|
||||
|
||||
dest_dir = self.shared_data.potfiles_dir
|
||||
os.makedirs(dest_dir, exist_ok=True)
|
||||
dest_path = os.path.join(dest_dir, safe_name)
|
||||
|
||||
with open(dest_path, "wb") as f:
|
||||
f.write(file_data)
|
||||
|
||||
self.logger.info(f"Uploaded potfile: {safe_name} ({len(file_data)} bytes)")
|
||||
self._json_response(handler, 200, {
|
||||
"status": "success",
|
||||
"filename": safe_name,
|
||||
})
|
||||
except Exception as e:
|
||||
handler.send_response(500)
|
||||
handler.send_header("Content-type", "application/json")
|
||||
handler.end_headers()
|
||||
handler.wfile.write(json.dumps({"status": "error", "message": str(e)}).encode('utf-8'))
|
||||
self.logger.error(f"Error uploading potfile: {e}")
|
||||
self._json_response(handler, 500, {
|
||||
"status": "error",
|
||||
"message": str(e),
|
||||
})
|
||||
|
||||
@@ -257,6 +257,170 @@ class SentinelUtils:
|
||||
use_tls=config.get("email_tls", True),
|
||||
))
|
||||
|
||||
# ── LLM-powered endpoints ────────────────────────────────────────────
|
||||
|
||||
def analyze_events(self, data: Dict) -> Dict:
|
||||
"""POST /api/sentinel/analyze — AI analysis of selected events."""
|
||||
try:
|
||||
event_ids = data.get("event_ids", [])
|
||||
if not event_ids:
|
||||
return {"status": "error", "message": "event_ids required"}
|
||||
|
||||
# Fetch events
|
||||
placeholders = ",".join("?" for _ in event_ids)
|
||||
rows = self.shared_data.db.query(
|
||||
f"SELECT * FROM sentinel_events WHERE id IN ({placeholders})",
|
||||
[int(i) for i in event_ids],
|
||||
) or []
|
||||
if not rows:
|
||||
return {"status": "error", "message": "No events found"}
|
||||
|
||||
# Gather device info for context
|
||||
macs = set()
|
||||
ips = set()
|
||||
for ev in rows:
|
||||
meta = {}
|
||||
try:
|
||||
meta = json.loads(ev.get("metadata", "{}") or "{}")
|
||||
except Exception:
|
||||
pass
|
||||
if meta.get("mac"):
|
||||
macs.add(meta["mac"])
|
||||
if meta.get("ip"):
|
||||
ips.add(meta["ip"])
|
||||
|
||||
devices = []
|
||||
if macs:
|
||||
mac_ph = ",".join("?" for _ in macs)
|
||||
devices = self.shared_data.db.query(
|
||||
f"SELECT * FROM sentinel_devices WHERE mac_address IN ({mac_ph})",
|
||||
list(macs),
|
||||
) or []
|
||||
|
||||
from llm_bridge import LLMBridge
|
||||
bridge = LLMBridge()
|
||||
|
||||
system = (
|
||||
"You are a cybersecurity analyst reviewing sentinel alerts from Bjorn, "
|
||||
"a network security AI. Analyze the events below and provide: "
|
||||
"1) A severity assessment (critical/high/medium/low/info), "
|
||||
"2) A concise analysis of what happened, "
|
||||
"3) Concrete recommendations. "
|
||||
"Be technical and actionable. Respond in plain text, keep it under 300 words."
|
||||
)
|
||||
|
||||
prompt = (
|
||||
f"Events:\n{json.dumps(rows, indent=2, default=str)}\n\n"
|
||||
f"Known devices:\n{json.dumps(devices, indent=2, default=str)}\n\n"
|
||||
"Analyze these security events."
|
||||
)
|
||||
|
||||
response = bridge.complete(
|
||||
[{"role": "user", "content": prompt}],
|
||||
max_tokens=600,
|
||||
system=system,
|
||||
timeout=30,
|
||||
)
|
||||
return {"status": "ok", "analysis": response or "(no response)"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("analyze_events error: %s", e)
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
def summarize_events(self, data: Dict) -> Dict:
|
||||
"""POST /api/sentinel/summarize — AI summary of recent unread events."""
|
||||
try:
|
||||
limit = min(int(data.get("limit", 50)), 100)
|
||||
rows = self.shared_data.db.query(
|
||||
"SELECT * FROM sentinel_events WHERE acknowledged = 0 "
|
||||
"ORDER BY timestamp DESC LIMIT ?",
|
||||
[limit],
|
||||
) or []
|
||||
|
||||
if not rows:
|
||||
return {"status": "ok", "summary": "No unread events to summarize."}
|
||||
|
||||
from llm_bridge import LLMBridge
|
||||
bridge = LLMBridge()
|
||||
|
||||
system = (
|
||||
"You are a cybersecurity analyst. Summarize the security events below. "
|
||||
"Group by type, identify patterns, flag critical items. "
|
||||
"Be concise — max 200 words. Use bullet points."
|
||||
)
|
||||
|
||||
prompt = (
|
||||
f"{len(rows)} unread sentinel events:\n"
|
||||
f"{json.dumps(rows, indent=2, default=str)}\n\n"
|
||||
"Summarize these events and identify patterns."
|
||||
)
|
||||
|
||||
response = bridge.complete(
|
||||
[{"role": "user", "content": prompt}],
|
||||
max_tokens=500,
|
||||
system=system,
|
||||
timeout=30,
|
||||
)
|
||||
return {"status": "ok", "summary": response or "(no response)"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("summarize_events error: %s", e)
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
def suggest_rule(self, data: Dict) -> Dict:
|
||||
"""POST /api/sentinel/suggest-rule — AI generates a rule from description."""
|
||||
try:
|
||||
description = (data.get("description") or "").strip()
|
||||
if not description:
|
||||
return {"status": "error", "message": "description required"}
|
||||
|
||||
from llm_bridge import LLMBridge
|
||||
bridge = LLMBridge()
|
||||
|
||||
system = (
|
||||
"You are a security rule generator. Given a user description, generate a Bjorn sentinel rule "
|
||||
"as JSON. The rule schema is:\n"
|
||||
'{"name": "string", "trigger_type": "new_device|arp_spoof|port_change|service_change|'
|
||||
'dhcp_server|rogue_ap|high_traffic|vulnerability", "conditions": {"key": "value"}, '
|
||||
'"logic": "AND|OR", "actions": ["notify_web","notify_discord","notify_email","notify_webhook"], '
|
||||
'"cooldown_s": 60, "enabled": 1}\n'
|
||||
"Respond with ONLY the JSON object, no markdown fences, no explanation."
|
||||
)
|
||||
|
||||
prompt = f"Generate a sentinel rule for: {description}"
|
||||
|
||||
response = bridge.complete(
|
||||
[{"role": "user", "content": prompt}],
|
||||
max_tokens=400,
|
||||
system=system,
|
||||
timeout=20,
|
||||
)
|
||||
|
||||
if not response:
|
||||
return {"status": "error", "message": "No LLM response"}
|
||||
|
||||
# Try to parse the JSON
|
||||
try:
|
||||
# Strip markdown fences if present
|
||||
clean = response.strip()
|
||||
if clean.startswith("```"):
|
||||
clean = clean.split("\n", 1)[1] if "\n" in clean else clean[3:]
|
||||
if clean.endswith("```"):
|
||||
clean = clean[:-3]
|
||||
clean = clean.strip()
|
||||
if clean.startswith("json"):
|
||||
clean = clean[4:].strip()
|
||||
|
||||
rule = json.loads(clean)
|
||||
return {"status": "ok", "rule": rule}
|
||||
except json.JSONDecodeError:
|
||||
return {"status": "ok", "rule": None, "raw": response,
|
||||
"message": "LLM response was not valid JSON"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("suggest_rule error: %s", e)
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
# ── Helpers ─────────────────────────────────────────────────────────
|
||||
|
||||
def _send_json(self, handler, data, status=200):
|
||||
|
||||
26
webapp.py
26
webapp.py
@@ -172,6 +172,14 @@ class CustomHandler(http.server.SimpleHTTPRequestHandler):
|
||||
# EPD Layout
|
||||
'/api/epd/layout': wu.system_utils.epd_get_layout,
|
||||
'/api/epd/layouts': wu.system_utils.epd_list_layouts,
|
||||
|
||||
# LLM Bridge
|
||||
'/api/llm/status': wu.llm_utils.get_llm_status,
|
||||
'/api/llm/config': wu.llm_utils.get_llm_config,
|
||||
'/api/llm/reasoning': wu.llm_utils.get_llm_reasoning,
|
||||
|
||||
# MCP Server
|
||||
'/api/mcp/status': wu.llm_utils.get_mcp_status,
|
||||
}
|
||||
|
||||
if debug_enabled:
|
||||
@@ -203,6 +211,7 @@ class CustomHandler(http.server.SimpleHTTPRequestHandler):
|
||||
'/clear_logs': wu.system_utils.clear_logs,
|
||||
'/clear_netkb': wu.system_utils.clear_netkb,
|
||||
'/erase_bjorn_memories': wu.system_utils.erase_bjorn_memories,
|
||||
'/upload_potfile': wu.network_utils.upload_potfile,
|
||||
'/create_preconfigured_file': wu.network_utils.create_preconfigured_file,
|
||||
'/delete_preconfigured_file': wu.network_utils.delete_preconfigured_file,
|
||||
'/clear_shared_config_json': wu.index_utils.clear_shared_config_json,
|
||||
@@ -297,6 +306,9 @@ class CustomHandler(http.server.SimpleHTTPRequestHandler):
|
||||
'/api/sentinel/rule/delete': wu.sentinel.delete_rule,
|
||||
'/api/sentinel/device': wu.sentinel.update_device,
|
||||
'/api/sentinel/notifiers': wu.sentinel.save_notifier_config,
|
||||
'/api/sentinel/analyze': wu.sentinel.analyze_events,
|
||||
'/api/sentinel/summarize': wu.sentinel.summarize_events,
|
||||
'/api/sentinel/suggest-rule': wu.sentinel.suggest_rule,
|
||||
# BIFROST
|
||||
'/api/bifrost/toggle': wu.bifrost.toggle_bifrost,
|
||||
'/api/bifrost/mode': wu.bifrost.set_mode,
|
||||
@@ -313,6 +325,13 @@ class CustomHandler(http.server.SimpleHTTPRequestHandler):
|
||||
'/api/loki/quick': wu.loki.quick_type,
|
||||
'/api/loki/install': wu.loki.install_gadget,
|
||||
'/api/loki/reboot': wu.loki.reboot,
|
||||
# LLM Bridge
|
||||
'/api/llm/chat': wu.llm_utils.handle_chat,
|
||||
'/api/llm/clear_history': wu.llm_utils.clear_chat_history,
|
||||
'/api/llm/config': wu.llm_utils.save_llm_config,
|
||||
# MCP Server
|
||||
'/api/mcp/toggle': wu.llm_utils.toggle_mcp,
|
||||
'/api/mcp/config': wu.llm_utils.save_mcp_config,
|
||||
}
|
||||
|
||||
if debug_enabled:
|
||||
@@ -369,6 +388,7 @@ class CustomHandler(http.server.SimpleHTTPRequestHandler):
|
||||
# EPD Layout
|
||||
'/api/epd/layout': lambda h, d: wu.system_utils.epd_save_layout(h, d),
|
||||
'/api/epd/layout/reset': lambda h, d: wu.system_utils.epd_reset_layout(h, d),
|
||||
|
||||
# Legacy aliases
|
||||
'reboot': lambda h, _: wu.system_utils.reboot_system(h),
|
||||
'shutdown': lambda h, _: wu.system_utils.shutdown_system(h),
|
||||
@@ -686,6 +706,12 @@ class CustomHandler(http.server.SimpleHTTPRequestHandler):
|
||||
filename = unquote(self.path.split('/c2/download_client/')[-1])
|
||||
self.web_utils.c2.c2_download_client(self, filename)
|
||||
return
|
||||
elif self.path.startswith('/api/llm/models'):
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
query = parse_qs(urlparse(self.path).query)
|
||||
params = {k: v[0] for k, v in query.items()}
|
||||
self.web_utils.llm_utils.get_llm_models(self, params)
|
||||
return
|
||||
elif self.path.startswith('/c2/stale_agents'):
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
query = parse_qs(urlparse(self.path).query)
|
||||
|
||||
Reference in New Issue
Block a user