Skip to content

Commit

Permalink
Merge pull request #1231 from blacklanternsecurity/dns-engine
Browse files Browse the repository at this point in the history
DNS Engine - Offload DNS to Dedicated Process
  • Loading branch information
TheTechromancer authored Apr 26, 2024
2 parents 1dd8ab2 + 18694c1 commit 81ba613
Show file tree
Hide file tree
Showing 90 changed files with 2,664 additions and 1,970 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[![bbot_banner](https://user-images.githubusercontent.com/20261699/158000235-6c1ace81-a267-4f8e-90a1-f4c16884ebac.png)](https://github.com/blacklanternsecurity/bbot)

#### BBOT /ˈBEE·bot/ (noun): A recursive internet scanner for hackers.
#### /ˈBEE·bot/ (noun): A recursive internet scanner for hackers.

[![Python Version](https://img.shields.io/badge/python-3.9+-FF8400)](https://www.python.org) [![License](https://img.shields.io/badge/license-GPLv3-FF8400.svg)](https://github.com/blacklanternsecurity/bbot/blob/dev/LICENSE) [![DEF CON Demo Labs 2023](https://img.shields.io/badge/DEF%20CON%20Demo%20Labs-2023-FF8400.svg)](https://forum.defcon.org/node/246338) [![PyPi Downloads](https://static.pepy.tech/personalized-badge/bbot?right_color=orange&left_color=grey)](https://pepy.tech/project/bbot) [![Black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) [![Tests](https://github.com/blacklanternsecurity/bbot/actions/workflows/tests.yml/badge.svg?branch=stable)](https://github.com/blacklanternsecurity/bbot/actions?query=workflow%3A"tests") [![Codecov](https://codecov.io/gh/blacklanternsecurity/bbot/branch/dev/graph/badge.svg?token=IR5AZBDM5K)](https://codecov.io/gh/blacklanternsecurity/bbot) [![Discord](https://img.shields.io/discord/859164869970362439)](https://discord.com/invite/PZqkgxu5SA)

Expand Down
4 changes: 2 additions & 2 deletions bbot/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,12 +184,12 @@ def handle_keyboard_input(keyboard_input):
module = kill_match.group("module")
if module in scan.modules:
log.hugewarning(f'Killing module: "{module}"')
scan.manager.kill_module(module, message="killed by user")
scan.kill_module(module, message="killed by user")
else:
log.warning(f'Invalid module: "{module}"')
else:
scan.preset.core.logger.toggle_log_level(logger=log)
scan.manager.modules_status(_log=True)
scan.modules_status(_log=True)

reader = asyncio.StreamReader()
protocol = asyncio.StreamReaderProtocol(reader)
Expand Down
12 changes: 12 additions & 0 deletions bbot/core/config/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
import sys
import multiprocessing as mp

try:
mp.set_start_method("spawn")
except Exception:
start_method = mp.get_start_method()
if start_method != "spawn":
print(
f"[WARN] Multiprocessing spawn method is set to {start_method}. This may negatively affect performance.",
file=sys.stderr,
)
49 changes: 38 additions & 11 deletions bbot/core/config/logger.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import sys
import atexit
import logging
from copy import copy
import multiprocessing
import logging.handlers
from pathlib import Path

Expand Down Expand Up @@ -57,16 +59,37 @@ def __init__(self, core):
self._loggers = None
self._log_handlers = None
self._log_level = None
self.root_logger = logging.getLogger()
self.core_logger = logging.getLogger("bbot")
self.core = core

# Don't do this more than once
if len(self.core_logger.handlers) == 0:
for logger in self.loggers:
self.include_logger(logger)
self.listener = None

self.process_name = multiprocessing.current_process().name
if self.process_name == "MainProcess":
self.queue = multiprocessing.Queue()
self.setup_queue_handler()
# Start the QueueListener
self.listener = logging.handlers.QueueListener(self.queue, *self.log_handlers.values())
self.listener.start()
atexit.register(self.listener.stop)

self.log_level = logging.INFO

def setup_queue_handler(self, logging_queue=None, log_level=logging.DEBUG):
if logging_queue is None:
logging_queue = self.queue
else:
self.queue = logging_queue
self.queue_handler = logging.handlers.QueueHandler(logging_queue)

self.root_logger.addHandler(self.queue_handler)

self.core_logger.setLevel(log_level)
# disable asyncio logging for child processes
if self.process_name != "MainProcess":
logging.getLogger("asyncio").setLevel(logging.ERROR)

def addLoggingLevel(self, levelName, levelNum, methodName=None):
"""
Comprehensively adds a new logging level to the `logging` module and the
Expand Down Expand Up @@ -127,24 +150,28 @@ def loggers(self):
return self._loggers

def add_log_handler(self, handler, formatter=None):
if self.listener is None:
return
if handler.formatter is None:
handler.setFormatter(debug_format)
for logger in self.loggers:
if handler not in logger.handlers:
logger.addHandler(handler)
if handler not in self.listener.handlers:
self.listener.handlers = self.listener.handlers + (handler,)

def remove_log_handler(self, handler):
for logger in self.loggers:
if handler in logger.handlers:
logger.removeHandler(handler)
if self.listener is None:
return
if handler in self.listener.handlers:
new_handlers = list(self.listener.handlers)
new_handlers.remove(handler)
self.listener.handlers = tuple(new_handlers)

def include_logger(self, logger):
if logger not in self.loggers:
self.loggers.append(logger)
if self.log_level is not None:
logger.setLevel(self.log_level)
for handler in self.log_handlers.values():
logger.addHandler(handler)
self.add_log_handler(handler)

@property
def log_handlers(self):
Expand Down
28 changes: 28 additions & 0 deletions bbot/core/core.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import logging
import traceback
from copy import copy
import multiprocessing
from pathlib import Path
from omegaconf import OmegaConf

Expand All @@ -20,6 +22,27 @@ class BBOTCore:
- load quickly
"""

class BBOTProcess(multiprocessing.Process):

def __init__(self, *args, **kwargs):
self.logging_queue = kwargs.pop("logging_queue")
self.log_level = kwargs.pop("log_level")
super().__init__(*args, **kwargs)

def run(self):
log = logging.getLogger("bbot.core.process")
try:
from bbot.core import CORE

CORE.logger.setup_queue_handler(self.logging_queue, self.log_level)
super().run()
except KeyboardInterrupt:
log.warning(f"Got KeyboardInterrupt in {self.name}")
log.trace(traceback.format_exc())
except BaseException as e:
log.warning(f"Error in {self.name}: {e}")
log.trace(traceback.format_exc())

def __init__(self):
self._logger = None
self._files_config = None
Expand Down Expand Up @@ -142,6 +165,11 @@ def files_config(self):
self._files_config = files.BBOTConfigFiles(self)
return self._files_config

def create_process(self, *args, **kwargs):
process = self.BBOTProcess(*args, logging_queue=self.logger.queue, log_level=self.logger.log_level, **kwargs)
process.daemon = True
return process

@property
def logger(self):
self.config
Expand Down
212 changes: 212 additions & 0 deletions bbot/core/engine.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,212 @@
import zmq
import atexit
import pickle
import asyncio
import inspect
import logging
import tempfile
import traceback
import zmq.asyncio
from pathlib import Path
from contextlib import asynccontextmanager, suppress

from bbot.core import CORE
from bbot.core.helpers.misc import rand_string

CMD_EXIT = 1000


class EngineClient:

SERVER_CLASS = None

def __init__(self, **kwargs):
self.name = f"EngineClient {self.__class__.__name__}"
if self.SERVER_CLASS is None:
raise ValueError(f"Must set EngineClient SERVER_CLASS, {self.SERVER_CLASS}")
self.CMDS = dict(self.SERVER_CLASS.CMDS)
for k, v in list(self.CMDS.items()):
self.CMDS[v] = k
self.log = logging.getLogger(f"bbot.core.{self.__class__.__name__.lower()}")
self.socket_address = f"zmq_{rand_string(8)}.sock"
self.socket_path = Path(tempfile.gettempdir()) / self.socket_address
self.server_kwargs = kwargs.pop("server_kwargs", {})
self._server_process = None
self.context = zmq.asyncio.Context()
atexit.register(self.cleanup)

async def run_and_return(self, command, **kwargs):
async with self.new_socket() as socket:
message = self.make_message(command, args=kwargs)
await socket.send(message)
binary = await socket.recv()
# self.log.debug(f"{self.name}.{command}({kwargs}) got binary: {binary}")
message = pickle.loads(binary)
self.log.debug(f"{self.name}.{command}({kwargs}) got message: {message}")
# error handling
if self.check_error(message):
return
return message

async def run_and_yield(self, command, **kwargs):
message = self.make_message(command, args=kwargs)
async with self.new_socket() as socket:
await socket.send(message)
while 1:
binary = await socket.recv()
# self.log.debug(f"{self.name}.{command}({kwargs}) got binary: {binary}")
message = pickle.loads(binary)
self.log.debug(f"{self.name}.{command}({kwargs}) got message: {message}")
# error handling
if self.check_error(message) or self.check_stop(message):
break
yield message

def check_error(self, message):
if isinstance(message, dict) and len(message) == 1 and "_e" in message:
error, trace = message["_e"]
self.log.error(error)
self.log.trace(trace)
return True
return False

def check_stop(self, message):
if isinstance(message, dict) and len(message) == 1 and "_s" in message:
return True
return False

def make_message(self, command, args):
try:
cmd_id = self.CMDS[command]
except KeyError:
raise KeyError(f'Command "{command}" not found. Available commands: {",".join(self.available_commands)}')
return pickle.dumps(dict(c=cmd_id, a=args))

@property
def available_commands(self):
return [s for s in self.CMDS if isinstance(s, str)]

def start_server(self):
process = CORE.create_process(
target=self.server_process,
args=(
self.SERVER_CLASS,
self.socket_path,
),
kwargs=self.server_kwargs,
)
process.start()
return process

@staticmethod
def server_process(server_class, socket_path, **kwargs):
try:
engine_server = server_class(socket_path, **kwargs)
asyncio.run(engine_server.worker())
except (asyncio.CancelledError, KeyboardInterrupt):
pass
except Exception:
import traceback

log = logging.getLogger("bbot.core.engine.server")
log.critical(f"Unhandled error in {server_class.__name__} server process: {traceback.format_exc()}")

@asynccontextmanager
async def new_socket(self):
if self._server_process is None:
self._server_process = self.start_server()
while not self.socket_path.exists():
await asyncio.sleep(0.1)
socket = self.context.socket(zmq.DEALER)
socket.connect(f"ipc://{self.socket_path}")
try:
yield socket
finally:
with suppress(Exception):
socket.close()

def cleanup(self):
# delete socket file on exit
self.socket_path.unlink(missing_ok=True)


class EngineServer:

CMDS = {}

def __init__(self, socket_path):
self.log = logging.getLogger(f"bbot.core.{self.__class__.__name__.lower()}")
self.name = f"EngineServer {self.__class__.__name__}"
if socket_path is not None:
# create ZeroMQ context
self.context = zmq.asyncio.Context()
# ROUTER socket can handle multiple concurrent requests
self.socket = self.context.socket(zmq.ROUTER)
# create socket file
self.socket.bind(f"ipc://{socket_path}")

async def run_and_return(self, client_id, command_fn, **kwargs):
self.log.debug(f"{self.name} run-and-return {command_fn.__name__}({kwargs})")
try:
result = await command_fn(**kwargs)
except Exception as e:
error = f"Unhandled error in {self.name}.{command_fn.__name__}({kwargs}): {e}"
trace = traceback.format_exc()
result = {"_e": (error, trace)}
await self.send_socket_multipart([client_id, pickle.dumps(result)])

async def run_and_yield(self, client_id, command_fn, **kwargs):
self.log.debug(f"{self.name} run-and-yield {command_fn.__name__}({kwargs})")
try:
async for _ in command_fn(**kwargs):
await self.send_socket_multipart([client_id, pickle.dumps(_)])
await self.send_socket_multipart([client_id, pickle.dumps({"_s": None})])
except Exception as e:
error = f"Unhandled error in {self.name}.{command_fn.__name__}({kwargs}): {e}"
trace = traceback.format_exc()
result = {"_e": (error, trace)}
await self.send_socket_multipart([client_id, pickle.dumps(result)])

async def send_socket_multipart(self, *args, **kwargs):
try:
await self.socket.send_multipart(*args, **kwargs)
except Exception as e:
self.log.warning(f"Error sending ZMQ message: {e}")
self.log.trace(traceback.format_exc())

async def worker(self):
try:
while 1:
client_id, binary = await self.socket.recv_multipart()
message = pickle.loads(binary)
self.log.debug(f"{self.name} got message: {message}")

cmd = message.get("c", None)
if not isinstance(cmd, int):
self.log.warning(f"No command sent in message: {message}")
continue

kwargs = message.get("a", {})
if not isinstance(kwargs, dict):
self.log.warning(f"{self.name}: received invalid message of type {type(kwargs)}, should be dict")
continue

command_name = self.CMDS[cmd]
command_fn = getattr(self, command_name, None)

if command_fn is None:
self.log.warning(f'{self.name} has no function named "{command_fn}"')
continue

if inspect.isasyncgenfunction(command_fn):
coroutine = self.run_and_yield(client_id, command_fn, **kwargs)
else:
coroutine = self.run_and_return(client_id, command_fn, **kwargs)

asyncio.create_task(coroutine)
except Exception as e:
self.log.error(f"Error in EngineServer worker: {e}")
self.log.trace(traceback.format_exc())
finally:
with suppress(Exception):
self.socket.close()
Loading

0 comments on commit 81ba613

Please sign in to comment.