Files
Bjorn/orchestrator.py

209 lines
7.5 KiB
Python

# orchestrator.py
# Action queue consumer for Bjorn - executes actions from the scheduler queue
import importlib
import time
import logging
import threading
import json
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional
from init_shared import shared_data
from logger import Logger
from action_scheduler import ActionScheduler
logger = Logger(name="orchestrator.py", level=logging.DEBUG)
class Orchestrator:
"""Orchestrator that consumes the action queue generated by the scheduler"""
def __init__(self):
self.shared_data = shared_data
self.actions = {} # Dictionary of action instances
self.network_scanner = None
self.scheduler = None
self.scheduler_thread = None
# Load all available actions
self.load_actions()
logger.info(f"Actions loaded: {list(self.actions.keys())}")
def load_actions(self):
"""Load all actions from database"""
actions_config = self.shared_data.get_actions_config()
for action in actions_config:
module_name = action["b_module"]
b_class = action["b_class"]
# 🔴 Skip disabled actions
if not int(action.get("b_enabled", 1)):
logger.info(f"Skipping disabled action: {b_class}")
continue
try:
# Import the module dynamically
module = importlib.import_module(f'actions.{module_name}')
# Global actions (NetworkScanner)
if module_name == "scanning":
scanner_class = getattr(module, b_class)
self.network_scanner = scanner_class(self.shared_data)
self.actions[b_class] = self.network_scanner
else:
# Normal actions
action_class = getattr(module, b_class)
action_instance = action_class(self.shared_data)
action_instance.action_name = b_class
action_instance.port = action.get("b_port")
action_instance.b_parent_action = action.get("b_parent")
self.actions[b_class] = action_instance
except Exception as e:
logger.error(f"Failed to load action {b_class}: {e}")
def start_scheduler(self):
"""Start the scheduler in background"""
logger.info("Starting ActionScheduler in background...")
self.scheduler = ActionScheduler(self.shared_data)
self.scheduler_thread = threading.Thread(
target=self.scheduler.run,
daemon=True,
name="ActionScheduler"
)
self.scheduler_thread.start()
logger.info("ActionScheduler started")
def get_next_action(self) -> Optional[Dict[str, Any]]:
"""Get next action from queue"""
action = self.shared_data.db.get_next_queued_action()
if action:
logger.info(
f"Next action: {action['action_name']} for {action['mac_address']} "
f"(priority={action.get('priority_effective')})"
)
return action
def execute_queued_action(self, queued_action: Dict[str, Any]) -> bool:
"""Execute a single queued action"""
queue_id = queued_action['id']
action_name = queued_action['action_name']
mac = queued_action['mac_address']
ip = queued_action['ip']
port = queued_action['port']
logger.info(f"Executing: {action_name} for {ip}:{port}")
# Update status to running
self.shared_data.db.update_queue_status(queue_id, 'running')
try:
# Check if action is loaded
if action_name not in self.actions:
raise Exception(f"Action {action_name} not loaded")
action = self.actions[action_name]
# Prepare row data for compatibility
row = {
"MAC Address": mac,
"IPs": ip,
"Ports": str(port) if port else "",
"Alive": 1
}
# Update shared status for display
self.shared_data.bjorn_orch_status = action_name
self.shared_data.bjorn_status_text2 = ip
# Check if global action
metadata = json.loads(queued_action.get('metadata', '{}'))
if metadata.get('is_global') and hasattr(action, 'scan'):
# Execute global scan
action.scan()
result = 'success'
else:
# Execute targeted action
result = action.execute(
ip,
str(port) if port else "",
row,
action_name
)
# Update queue status based on result
if result == 'success':
self.shared_data.db.update_queue_status(queue_id, 'success')
logger.success(f"Action {action_name} completed successfully for {ip}")
else:
self.shared_data.db.update_queue_status(queue_id, 'failed')
logger.warning(f"Action {action_name} failed for {ip}")
return result == 'success'
except Exception as e:
logger.error(f"Error executing action {action_name}: {e}")
self.shared_data.db.update_queue_status(queue_id, 'failed', str(e))
return False
finally:
# Clear status text
self.shared_data.bjorn_status_text2 = ""
def run(self):
"""Main loop: start scheduler and consume queue"""
# Start the scheduler
self.start_scheduler()
# Wait for scheduler initialization
time.sleep(2)
# Main execution loop
idle_time = 0
consecutive_idle_logs = 0
while not self.shared_data.orchestrator_should_exit:
try:
# Get next action from queue
next_action = self.get_next_action()
if next_action:
# Reset idle counters
idle_time = 0
consecutive_idle_logs = 0
# Execute the action
self.execute_queued_action(next_action)
else:
# IDLE mode
idle_time += 1
self.shared_data.bjorn_orch_status = "IDLE"
self.shared_data.bjorn_status_text2 = ""
# Log periodically (less spam)
if idle_time % 30 == 0: # Every 30 seconds
consecutive_idle_logs += 1
if consecutive_idle_logs <= 3: # Limit consecutive logs
logger.debug(f"Queue empty, idling... ({idle_time}s)")
# Event-driven wait (max 5s to check for exit signals)
self.shared_data.queue_event.wait(timeout=5)
self.shared_data.queue_event.clear()
except Exception as e:
logger.error(f"Error in orchestrator loop: {e}")
time.sleep(1)
# Cleanup on exit
if self.scheduler:
self.scheduler.stop()
logger.info("Orchestrator stopped")
if __name__ == "__main__":
orchestrator = Orchestrator()
orchestrator.run()