diff --git a/README.md b/README.md index 885d740ca..1ac582cae 100644 --- a/README.md +++ b/README.md @@ -1,34 +1,14 @@ [![bbot_banner](https://user-images.githubusercontent.com/20261699/158000235-6c1ace81-a267-4f8e-90a1-f4c16884ebac.png)](https://github.com/blacklanternsecurity/bbot) -# BEE·bot - -### A Recursive Internet Scanner for Hackers. +#### /ˈBEE·bot/ (noun): A recursive internet scanner for hackers. [![Python Version](https://img.shields.io/badge/python-3.9+-FF8400)](https://www.python.org) [![License](https://img.shields.io/badge/license-GPLv3-FF8400.svg)](https://github.com/blacklanternsecurity/bbot/blob/dev/LICENSE) [![DEF CON Demo Labs 2023](https://img.shields.io/badge/DEF%20CON%20Demo%20Labs-2023-FF8400.svg)](https://forum.defcon.org/node/246338) [![PyPi Downloads](https://static.pepy.tech/personalized-badge/bbot?right_color=orange&left_color=grey)](https://pepy.tech/project/bbot) [![Black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) [![Tests](https://github.com/blacklanternsecurity/bbot/actions/workflows/tests.yml/badge.svg?branch=stable)](https://github.com/blacklanternsecurity/bbot/actions?query=workflow%3A"tests") [![Codecov](https://codecov.io/gh/blacklanternsecurity/bbot/branch/dev/graph/badge.svg?token=IR5AZBDM5K)](https://codecov.io/gh/blacklanternsecurity/bbot) [![Discord](https://img.shields.io/discord/859164869970362439)](https://discord.com/invite/PZqkgxu5SA) -BBOT (Bighuge BLS OSINT Tool) is a recursive internet scanner inspired by [Spiderfoot](https://github.com/smicallef/spiderfoot), but designed to be faster, more reliable, and friendlier to pentesters, bug bounty hunters, and developers. - -Special features include: - -- Support for Multiple Targets -- Web Screenshots -- Suite of Offensive Web Modules -- AI-powered Subdomain Mutations -- Native Output to Neo4j (and more) -- Python API + Developer [Documentation](https://www.blacklanternsecurity.com/bbot/) - -https://github.com/blacklanternsecurity/bbot/assets/20261699/742df3fe-5d1f-4aea-83f6-f990657bf695 +https://github.com/blacklanternsecurity/bbot/assets/20261699/e539e89b-92ea-46fa-b893-9cde94eebf81 _A BBOT scan in real-time - visualization with [VivaGraphJS](https://github.com/blacklanternsecurity/bbot-vivagraphjs)_ -## Quick Start Guide - -Below are some short help sections to get you up and running. - -
-Installation ( Pip ) - -Note: BBOT's [PyPi package](https://pypi.org/project/bbot/) requires Linux and Python 3.9+. +## Installation ```bash # stable version @@ -36,81 +16,140 @@ pipx install bbot # bleeding edge (dev branch) pipx install --pip-args '\--pre' bbot - -bbot --help ``` -
+_For more installation methods, including [Docker](https://hub.docker.com/r/blacklanternsecurity/bbot), see [Getting Started](https://www.blacklanternsecurity.com/bbot/)_ -
-Installation ( Docker ) +## What is BBOT? + +### BBOT is... -[Docker images](https://hub.docker.com/r/blacklanternsecurity/bbot) are provided, along with helper script `bbot-docker.sh` to persist your scan data. +## 1) A Subdomain Finder + +Passive API sources plus a recursive DNS brute-force with target-specific subdomain mutations. ```bash -# bleeding edge (dev) -docker run -it blacklanternsecurity/bbot --help +# find subdomains of evilcorp.com +bbot -t evilcorp.com -p subdomain-enum +``` -# stable -docker run -it blacklanternsecurity/bbot:stable --help +
+subdomain-enum.yml -# helper script -git clone https://github.com/blacklanternsecurity/bbot && cd bbot -./bbot-docker.sh --help +```yaml +description: Enumerate subdomains via APIs, brute-force + +flags: + - subdomain-enum + +output_modules: + - subdomains + +config: + modules: + stdout: + format: text + # only output DNS_NAMEs to the console + event_types: + - DNS_NAME + # only show in-scope subdomains + in_scope_only: True + # display the raw subdomains, nothing else + event_fields: + - data + # automatically dedupe + accept_dups: False ```
-Example Usage +SEE: Comparison to Other Subdomain Enumeration Tools -## Example Commands +BBOT consistently finds 20-50% more subdomains than other tools. The bigger the domain, the bigger the difference. To learn how this is possible, see [How It Works](https://www.blacklanternsecurity.com/bbot/how_it_works/). -Scan output, logs, etc. are saved to `~/.bbot`. For more detailed examples and explanations, see [Scanning](https://www.blacklanternsecurity.com/bbot/scanning). +![subdomain-stats-ebay](https://github.com/blacklanternsecurity/bbot/assets/20261699/53e07e9f-50b6-4b70-9e83-297dbfbcb436) - -**Subdomains:** +
+ + +## 2) A Web Spider ```bash -# Perform a full subdomain enumeration on evilcorp.com -bbot -t evilcorp.com -f subdomain-enum +# crawl evilcorp.com, extracting emails and other goodies +bbot -t evilcorp.com -p spider ``` -**Subdomains (passive only):** +## 3) An Email Gatherer ```bash -# Perform a passive-only subdomain enumeration on evilcorp.com -bbot -t evilcorp.com -f subdomain-enum -rf passive +# enumerate evilcorp.com email addresses +bbot -t evilcorp.com -p subdomain-enum spider email-enum ``` -**Subdomains + port scan + web screenshots:** +## 4) A Web Scanner ```bash -# Port-scan every subdomain, screenshot every webpage, output to current directory -bbot -t evilcorp.com -f subdomain-enum -m nmap gowitness -n my_scan -o . +# run a light web scan against www.evilcorp.com +bbot -t www.evilcorp.com -p web-basic + +# run a heavy web scan against www.evilcorp.com +bbot -t www.evilcorp.com -p web-thorough ``` -**Subdomains + basic web scan:** +## 5) ...And Much More ```bash -# A basic web scan includes wappalyzer, robots.txt, and other non-intrusive web modules -bbot -t evilcorp.com -f subdomain-enum web-basic +# everything everywhere all at once +bbot -t evilcorp.com -p kitchen-sink + +# roughly equivalent to: +bbot -t evilcorp.com -p subdomain-enum cloud-enum code-enum email-enum spider web-basic paramminer dirbust-light web-screenshots ``` -**Web spider:** +## 6) It's Also a Python Library -```bash -# Crawl www.evilcorp.com up to a max depth of 2, automatically extracting emails, secrets, etc. -bbot -t www.evilcorp.com -m httpx robots badsecrets secretsdb -c web_spider_distance=2 web_spider_depth=2 +#### Synchronous +```python +from bbot.scanner import Scanner + +scan = Scanner("evilcorp.com", presets=["subdomain-enum"]) +for event in scan.start(): + print(event) ``` -**Everything everywhere all at once:** +#### Asynchronous +```python +from bbot.scanner import Scanner + +async def main(): + scan = Scanner("evilcorp.com", presets=["subdomain-enum"]) + async for event in scan.async_start(): + print(event.json()) -```bash -# Subdomains, emails, cloud buckets, port scan, basic web, web screenshots, nuclei -bbot -t evilcorp.com -f subdomain-enum email-enum cloud-enum web-basic -m nmap gowitness nuclei --allow-deadly +import asyncio +asyncio.run(main()) ``` - + +
+SEE: This Nefarious Discord Bot + +A [BBOT Discord Bot](https://www.blacklanternsecurity.com/bbot/dev/discord_bot/) that responds to the `/scan` command. Scan the internet from the comfort of your discord server! + +![bbot-discord](https://github.com/blacklanternsecurity/bbot/assets/20261699/22b268a2-0dfd-4c2a-b7c5-548c0f2cc6f9) + +## Feature Overview + +BBOT (Bighuge BLS OSINT Tool) is a recursive internet scanner inspired by [Spiderfoot](https://github.com/smicallef/spiderfoot), but designed to be faster, more reliable, and friendlier to pentesters, bug bounty hunters, and developers. + +Special features include: + +- Support for Multiple Targets +- Web Screenshots +- Suite of Offensive Web Modules +- AI-powered Subdomain Mutations +- Native Output to Neo4j (and more) +- Python API + Developer Documentation ## Targets @@ -134,7 +173,7 @@ For more information, see [Targets](https://www.blacklanternsecurity.com/bbot/sc Similar to Amass or Subfinder, BBOT supports API keys for various third-party services such as SecurityTrails, etc. -The standard way to do this is to enter your API keys in **`~/.config/bbot/secrets.yml`**: +The standard way to do this is to enter your API keys in **`~/.config/bbot/bbot.yml`**: ```yaml modules: shodan_dns: @@ -154,41 +193,7 @@ bbot -c modules.virustotal.api_key=dd5f0eee2e4a99b71a939bded450b246 For details, see [Configuration](https://www.blacklanternsecurity.com/bbot/scanning/configuration/) -## BBOT as a Python Library - -BBOT exposes a Python API that allows it to be used for all kinds of fun and nefarious purposes, like a [Discord Bot](https://www.blacklanternsecurity.com/bbot/dev/#bbot-python-library-advanced-usage#discord-bot-example) that responds to the `/scan` command. - -![bbot-discord](https://github.com/blacklanternsecurity/bbot/assets/20261699/22b268a2-0dfd-4c2a-b7c5-548c0f2cc6f9) - -**Synchronous** - -```python -from bbot.scanner import Scanner - -# any number of targets can be specified -scan = Scanner("example.com", "scanme.nmap.org", modules=["nmap", "sslcert"]) -for event in scan.start(): - print(event.json()) -``` - -**Asynchronous** - -```python -from bbot.scanner import Scanner - -async def main(): - scan = Scanner("example.com", "scanme.nmap.org", modules=["nmap", "sslcert"]) - async for event in scan.async_start(): - print(event.json()) - -import asyncio -asyncio.run(main()) -``` - -
- -
-Documentation - Table of Contents +## Documentation - **User Manual** @@ -198,6 +203,9 @@ asyncio.run(main()) - [Comparison to Other Tools](https://www.blacklanternsecurity.com/bbot/comparison) - **Scanning** - [Scanning Overview](https://www.blacklanternsecurity.com/bbot/scanning/) + - **Presets** + - [Overview](https://www.blacklanternsecurity.com/bbot/scanning/presets) + - [List of Presets](https://www.blacklanternsecurity.com/bbot/scanning/presets_list) - [Events](https://www.blacklanternsecurity.com/bbot/scanning/events) - [Output](https://www.blacklanternsecurity.com/bbot/scanning/output) - [Tips and Tricks](https://www.blacklanternsecurity.com/bbot/scanning/tips_and_tricks) @@ -226,12 +234,9 @@ asyncio.run(main()) - [Word Cloud](https://www.blacklanternsecurity.com/bbot/dev/helpers/wordcloud) -
- -
-Contribution +## Contribution -BBOT is constantly being improved by the community. Every day it grows more powerful! +Some of the best BBOT modules were written by the community. BBOT is being constantly improved; every day it grows more powerful! We welcome contributions. Not just code, but ideas too! If you have an idea for a new feature, please let us know in [Discussions](https://github.com/blacklanternsecurity/bbot/discussions). If you want to get your hands dirty, see [Contribution](https://www.blacklanternsecurity.com/bbot/contribution/). There you can find setup instructions and a simple tutorial on how to write a BBOT module. We also have extensive [Developer Documentation](https://www.blacklanternsecurity.com/bbot/dev/). @@ -243,71 +248,12 @@ Thanks to these amazing people for contributing to BBOT! :heart:

-Special thanks to the following people who made BBOT possible: +Special thanks to: - @TheTechromancer for creating [BBOT](https://github.com/blacklanternsecurity/bbot) -- @liquidsec for his extensive work on BBOT's web hacking features, including [badsecrets](https://github.com/blacklanternsecurity/badsecrets) +- @liquidsec for his extensive work on BBOT's web hacking features, including [badsecrets](https://github.com/blacklanternsecurity/badsecrets) and [baddns](https://github.com/blacklanternsecurity/baddns) - Steve Micallef (@smicallef) for creating Spiderfoot - @kerrymilan for his Neo4j and Ansible expertise +- @domwhewell-sage for his family of badass code-looting modules - @aconite33 and @amiremami for their ruthless testing - Aleksei Kornev (@alekseiko) for allowing us ownership of the bbot Pypi repository <3 - -
- -## Comparison to Other Tools - -BBOT consistently finds 20-50% more subdomains than other tools. The bigger the domain, the bigger the difference. To learn how this is possible, see [How It Works](https://www.blacklanternsecurity.com/bbot/how_it_works/). - -![subdomain-stats-ebay](https://github.com/blacklanternsecurity/bbot/assets/20261699/53e07e9f-50b6-4b70-9e83-297dbfbcb436) - -## BBOT Modules By Flag -For a full list of modules, including the data types consumed and emitted by each one, see [List of Modules](https://www.blacklanternsecurity.com/bbot/modules/list_of_modules/). - - -| Flag | # Modules | Description | Modules | -|------------------|-------------|----------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| safe | 80 | Non-intrusive, safe to run | affiliates, aggregate, ajaxpro, anubisdb, asn, azure_realm, azure_tenant, baddns, baddns_zone, badsecrets, bevigil, binaryedge, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, builtwith, c99, censys, certspotter, chaos, columbus, credshed, crobat, crt, dehashed, digitorus, dnscommonsrv, dnsdumpster, docker_pull, dockerhub, emailformat, filedownload, fingerprintx, fullhunt, git, git_clone, github_codesearch, github_org, gitlab, gowitness, hackertarget, httpx, hunt, hunterio, iis_shortnames, internetdb, ip2location, ipstack, leakix, myssl, newsletters, ntlm, oauth, otx, passivetotal, pgp, postman, rapiddns, riddler, robots, secretsdb, securitytrails, shodan_dns, sitedossier, skymem, social, sslcert, subdomaincenter, sublist3r, threatminer, trufflehog, urlscan, viewdns, virustotal, wappalyzer, wayback, zoomeye | -| passive | 59 | Never connects to target systems | affiliates, aggregate, anubisdb, asn, azure_realm, azure_tenant, bevigil, binaryedge, bucket_file_enum, builtwith, c99, censys, certspotter, chaos, columbus, credshed, crobat, crt, dehashed, digitorus, dnscommonsrv, dnsdumpster, docker_pull, emailformat, excavate, fullhunt, git_clone, github_codesearch, github_org, hackertarget, hunterio, internetdb, ip2location, ipneighbor, ipstack, leakix, massdns, myssl, otx, passivetotal, pgp, postman, rapiddns, riddler, securitytrails, shodan_dns, sitedossier, skymem, social, speculate, subdomaincenter, sublist3r, threatminer, trufflehog, urlscan, viewdns, virustotal, wayback, zoomeye | -| subdomain-enum | 45 | Enumerates subdomains | anubisdb, asn, azure_realm, azure_tenant, baddns_zone, bevigil, binaryedge, builtwith, c99, censys, certspotter, chaos, columbus, crt, digitorus, dnscommonsrv, dnsdumpster, fullhunt, github_codesearch, github_org, hackertarget, httpx, hunterio, internetdb, ipneighbor, leakix, massdns, myssl, oauth, otx, passivetotal, postman, rapiddns, riddler, securitytrails, shodan_dns, sitedossier, sslcert, subdomaincenter, subdomains, threatminer, urlscan, virustotal, wayback, zoomeye | -| active | 43 | Makes active connections to target systems | ajaxpro, baddns, baddns_zone, badsecrets, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google, bypass403, dastardly, dockerhub, dotnetnuke, ffuf, ffuf_shortnames, filedownload, fingerprintx, generic_ssrf, git, gitlab, gowitness, host_header, httpx, hunt, iis_shortnames, masscan, newsletters, nmap, ntlm, nuclei, oauth, paramminer_cookies, paramminer_getparams, paramminer_headers, robots, secretsdb, smuggler, sslcert, telerik, url_manipulation, vhost, wafw00f, wappalyzer | -| web-thorough | 29 | More advanced web scanning functionality | ajaxpro, azure_realm, badsecrets, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google, bypass403, dastardly, dotnetnuke, ffuf_shortnames, filedownload, generic_ssrf, git, host_header, httpx, hunt, iis_shortnames, nmap, ntlm, oauth, robots, secretsdb, smuggler, sslcert, telerik, url_manipulation, wappalyzer | -| aggressive | 20 | Generates a large amount of network traffic | bypass403, dastardly, dotnetnuke, ffuf, ffuf_shortnames, generic_ssrf, host_header, ipneighbor, masscan, massdns, nmap, nuclei, paramminer_cookies, paramminer_getparams, paramminer_headers, smuggler, telerik, url_manipulation, vhost, wafw00f | -| web-basic | 17 | Basic, non-intrusive web scan functionality | azure_realm, baddns, badsecrets, bucket_amazon, bucket_azure, bucket_firebase, bucket_google, filedownload, git, httpx, iis_shortnames, ntlm, oauth, robots, secretsdb, sslcert, wappalyzer | -| cloud-enum | 12 | Enumerates cloud resources | azure_realm, azure_tenant, baddns, baddns_zone, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, httpx, oauth | -| slow | 10 | May take a long time to complete | bucket_digitalocean, dastardly, docker_pull, fingerprintx, git_clone, paramminer_cookies, paramminer_getparams, paramminer_headers, smuggler, vhost | -| affiliates | 8 | Discovers affiliated hostnames/domains | affiliates, azure_realm, azure_tenant, builtwith, oauth, sslcert, viewdns, zoomeye | -| email-enum | 7 | Enumerates email addresses | dehashed, emailformat, emails, hunterio, pgp, skymem, sslcert | -| deadly | 4 | Highly aggressive | dastardly, ffuf, nuclei, vhost | -| portscan | 3 | Discovers open ports | internetdb, masscan, nmap | -| web-paramminer | 3 | Discovers HTTP parameters through brute-force | paramminer_cookies, paramminer_getparams, paramminer_headers | -| baddns | 2 | Runs all modules from the DNS auditing tool BadDNS | baddns, baddns_zone | -| iis-shortnames | 2 | Scans for IIS Shortname vulnerability | ffuf_shortnames, iis_shortnames | -| report | 2 | Generates a report at the end of the scan | affiliates, asn | -| social-enum | 2 | Enumerates social media | httpx, social | -| service-enum | 1 | Identifies protocols running on open ports | fingerprintx | -| subdomain-hijack | 1 | Detects hijackable subdomains | baddns | -| web-screenshots | 1 | Takes screenshots of web pages | gowitness | - - -## BBOT Output Modules -BBOT can save its data to TXT, CSV, JSON, and tons of other destinations including [Neo4j](https://www.blacklanternsecurity.com/bbot/scanning/output/#neo4j), [Splunk](https://www.blacklanternsecurity.com/bbot/scanning/output/#splunk), and [Discord](https://www.blacklanternsecurity.com/bbot/scanning/output/#discord-slack-teams). For instructions on how to use these, see [Output Modules](https://www.blacklanternsecurity.com/bbot/scanning/output). - - -| Module | Type | Needs API Key | Description | Flags | Consumed Events | Produced Events | -|-----------------|--------|-----------------|-----------------------------------------------------------------------------------------|----------------|--------------------------------------------------------------------------------------------------|---------------------------| -| asset_inventory | output | No | Merge hosts, open ports, technologies, findings, etc. into a single asset inventory CSV | | DNS_NAME, FINDING, HTTP_RESPONSE, IP_ADDRESS, OPEN_TCP_PORT, TECHNOLOGY, URL, VULNERABILITY, WAF | IP_ADDRESS, OPEN_TCP_PORT | -| csv | output | No | Output to CSV | | * | | -| discord | output | No | Message a Discord channel when certain events are encountered | | * | | -| emails | output | No | Output any email addresses found belonging to the target domain | email-enum | EMAIL_ADDRESS | | -| http | output | No | Send every event to a custom URL via a web request | | * | | -| human | output | No | Output to text | | * | | -| json | output | No | Output to Newline-Delimited JSON (NDJSON) | | * | | -| neo4j | output | No | Output to Neo4j | | * | | -| python | output | No | Output via Python API | | * | | -| slack | output | No | Message a Slack channel when certain events are encountered | | * | | -| splunk | output | No | Send every event to a splunk instance through HTTP Event Collector | | * | | -| subdomains | output | No | Output only resolved, in-scope subdomains | subdomain-enum | DNS_NAME, DNS_NAME_UNRESOLVED | | -| teams | output | No | Message a Teams channel when certain events are encountered | | * | | -| web_report | output | No | Create a markdown report with web assets | | FINDING, TECHNOLOGY, URL, VHOST, VULNERABILITY | | -| websocket | output | No | Output to websockets | | * | | - diff --git a/bbot/__init__.py b/bbot/__init__.py index 1d95273e3..8e016095f 100644 --- a/bbot/__init__.py +++ b/bbot/__init__.py @@ -1,10 +1,2 @@ # version placeholder (replaced by poetry-dynamic-versioning) -__version__ = "0.0.0" - -# global app config -from .core import configurator - -config = configurator.config - -# helpers -from .core import helpers +__version__ = "v0.0.0" diff --git a/bbot/agent/__init__.py b/bbot/agent/__init__.py deleted file mode 100644 index d2361b7a3..000000000 --- a/bbot/agent/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .agent import Agent diff --git a/bbot/agent/agent.py b/bbot/agent/agent.py deleted file mode 100644 index 1c8debc1e..000000000 --- a/bbot/agent/agent.py +++ /dev/null @@ -1,204 +0,0 @@ -import json -import asyncio -import logging -import traceback -import websockets -from omegaconf import OmegaConf - -from . import messages -import bbot.core.errors -from bbot.scanner import Scanner -from bbot.scanner.dispatcher import Dispatcher -from bbot.core.helpers.misc import urlparse, split_host_port -from bbot.core.configurator.environ import prepare_environment - -log = logging.getLogger("bbot.core.agent") - - -class Agent: - def __init__(self, config): - self.config = config - prepare_environment(self.config) - self.url = self.config.get("agent_url", "") - self.parsed_url = urlparse(self.url) - self.host, self.port = split_host_port(self.parsed_url.netloc) - self.token = self.config.get("agent_token", "") - self.scan = None - self.task = None - self._ws = None - self._scan_lock = asyncio.Lock() - - self.dispatcher = Dispatcher() - self.dispatcher.on_status = self.on_scan_status - self.dispatcher.on_finish = self.on_scan_finish - - def setup(self): - if not self.url: - log.error(f"Must specify agent_url") - return False - if not self.token: - log.error(f"Must specify agent_token") - return False - return True - - async def ws(self, rebuild=False): - if self._ws is None or rebuild: - kwargs = {"close_timeout": 0.5} - if self.token: - kwargs.update({"extra_headers": {"Authorization": f"Bearer {self.token}"}}) - verbs = ("Building", "Built") - if rebuild: - verbs = ("Rebuilding", "Rebuilt") - url = f"{self.url}/control/" - log.debug(f"{verbs[0]} websocket connection to {url}") - while 1: - try: - self._ws = await websockets.connect(url, **kwargs) - break - except Exception as e: - log.error(f'Failed to establish websockets connection to URL "{url}": {e}') - log.trace(traceback.format_exc()) - await asyncio.sleep(1) - log.debug(f"{verbs[1]} websocket connection to {url}") - return self._ws - - async def start(self): - rebuild = False - while 1: - ws = await self.ws(rebuild=rebuild) - rebuild = False - try: - message = await ws.recv() - log.debug(f"Got message: {message}") - try: - message = json.loads(message) - message = messages.Message(**message) - - if message.command == "ping": - if self.scan is None: - await self.send({"conversation": str(message.conversation), "message_type": "pong"}) - continue - - command_type = getattr(messages, message.command, None) - if command_type is None: - log.warning(f'Invalid command: "{message.command}"') - continue - - command_args = command_type(**message.arguments) - command_fn = getattr(self, message.command) - response = await self.err_handle(command_fn, **command_args.dict()) - log.info(str(response)) - await self.send({"conversation": str(message.conversation), "message": response}) - - except json.decoder.JSONDecodeError as e: - log.warning(f'Failed to decode message "{message}": {e}') - log.trace(traceback.format_exc()) - continue - except Exception as e: - log.debug(f"Error receiving message: {e}") - log.debug(traceback.format_exc()) - await asyncio.sleep(1) - rebuild = True - - async def send(self, message): - rebuild = False - while 1: - try: - ws = await self.ws(rebuild=rebuild) - j = json.dumps(message) - log.debug(f"Sending message of length {len(message)}") - await ws.send(j) - rebuild = False - break - except Exception as e: - log.warning(f"Error sending message: {e}, retrying") - log.trace(traceback.format_exc()) - await asyncio.sleep(1) - # rebuild = True - - async def start_scan(self, scan_id, name=None, targets=[], modules=[], output_modules=[], config={}): - async with self._scan_lock: - if self.scan is None: - log.success( - f"Starting scan with targets={targets}, modules={modules}, output_modules={output_modules}" - ) - output_module_config = OmegaConf.create( - {"output_modules": {"websocket": {"url": f"{self.url}/scan/{scan_id}/", "token": self.token}}} - ) - config = OmegaConf.create(config) - config = OmegaConf.merge(self.config, config, output_module_config) - output_modules = list(set(output_modules + ["websocket"])) - scan = Scanner( - *targets, - scan_id=scan_id, - name=name, - modules=modules, - output_modules=output_modules, - config=config, - dispatcher=self.dispatcher, - ) - self.task = asyncio.create_task(self._start_scan_task(scan)) - - return {"success": f"Started scan", "scan_id": scan.id} - else: - msg = f"Scan {self.scan.id} already in progress" - log.warning(msg) - return {"error": msg, "scan_id": self.scan.id} - - async def _start_scan_task(self, scan): - self.scan = scan - try: - await scan.async_start_without_generator() - except bbot.core.errors.ScanError as e: - log.error(f"Scan error: {e}") - log.trace(traceback.format_exc()) - except Exception: - log.critical(f"Encountered error: {traceback.format_exc()}") - self.on_scan_status("FAILED", scan.id) - finally: - self.task = None - - async def stop_scan(self): - log.warning("Stopping scan") - try: - async with self._scan_lock: - if self.scan is None: - msg = "Scan not in progress" - log.warning(msg) - return {"error": msg} - scan_id = str(self.scan.id) - self.scan.stop() - msg = f"Stopped scan {scan_id}" - log.warning(msg) - self.scan = None - return {"success": msg, "scan_id": scan_id} - except Exception as e: - log.warning(f"Error while stopping scan: {e}") - log.trace(traceback.format_exc()) - finally: - self.scan = None - self.task = None - - async def scan_status(self): - async with self._scan_lock: - if self.scan is None: - msg = "Scan not in progress" - log.warning(msg) - return {"error": msg} - return {"success": "Polled scan", "scan_status": self.scan.status} - - async def on_scan_status(self, status, scan_id): - await self.send({"message_type": "scan_status_change", "status": str(status), "scan_id": scan_id}) - - async def on_scan_finish(self, scan): - self.scan = None - self.task = None - - async def err_handle(self, callback, *args, **kwargs): - try: - return await callback(*args, **kwargs) - except Exception as e: - msg = f"Error in {callback.__qualname__}(): {e}" - log.error(msg) - log.trace(traceback.format_exc()) - return {"error": msg} diff --git a/bbot/agent/messages.py b/bbot/agent/messages.py deleted file mode 100644 index 34fd2c15c..000000000 --- a/bbot/agent/messages.py +++ /dev/null @@ -1,29 +0,0 @@ -from uuid import UUID -from typing import Optional -from pydantic import BaseModel - - -class Message(BaseModel): - conversation: UUID - command: str - arguments: Optional[dict] = {} - - -### COMMANDS ### - - -class start_scan(BaseModel): - scan_id: str - targets: list - modules: list - output_modules: list = [] - config: dict = {} - name: Optional[str] = None - - -class stop_scan(BaseModel): - pass - - -class scan_status(BaseModel): - pass diff --git a/bbot/cli.py b/bbot/cli.py index 7b91c964f..8e308d6f8 100755 --- a/bbot/cli.py +++ b/bbot/cli.py @@ -1,78 +1,66 @@ #!/usr/bin/env python3 -import os -import re import sys -import asyncio import logging -import traceback -from omegaconf import OmegaConf -from contextlib import suppress - -# fix tee buffering -sys.stdout.reconfigure(line_buffering=True) +from bbot.errors import * +from bbot import __version__ +from bbot.logger import log_to_stderr -# logging -from bbot.core.logger import get_log_level, toggle_log_level +silent = "-s" in sys.argv or "--silent" in sys.argv -import bbot.core.errors -from bbot import __version__ -from bbot.modules import module_loader -from bbot.core.configurator.args import parser -from bbot.core.helpers.logger import log_to_stderr -from bbot.core.configurator import ensure_config_files, check_cli_args, environ +if not silent: + ascii_art = f"""  ______  _____ ____ _______ + | ___ \| __ \ / __ \__ __| + | |___) | |__) | | | | | | + | ___ <| __ <| | | | | | + | |___) | |__) | |__| | | | + |______/|_____/ \____/ |_| + BIGHUGE BLS OSINT TOOL {__version__} -log = logging.getLogger("bbot.cli") + www.blacklanternsecurity.com/bbot +""" + print(ascii_art, file=sys.stderr) +scan_name = "" -log_level = get_log_level() +async def _main(): -from . import config + import asyncio + import traceback + from contextlib import suppress + # fix tee buffering + sys.stdout.reconfigure(line_buffering=True) -err = False -scan_name = "" + log = logging.getLogger("bbot.cli") + from bbot.scanner import Scanner + from bbot.scanner.preset import Preset -async def _main(): - global err global scan_name - environ.cli_execution = True - - # async def monitor_tasks(): - # in_row = 0 - # while 1: - # try: - # print('looooping') - # tasks = asyncio.all_tasks() - # current_task = asyncio.current_task() - # if len(tasks) == 1 and list(tasks)[0] == current_task: - # print('no tasks') - # in_row += 1 - # else: - # in_row = 0 - # for t in tasks: - # print(t) - # if in_row > 2: - # break - # await asyncio.sleep(1) - # except BaseException as e: - # print(traceback.format_exc()) - # with suppress(BaseException): - # await asyncio.sleep(.1) - - # monitor_tasks_task = asyncio.create_task(monitor_tasks()) - - ensure_config_files() try: + + # start by creating a default scan preset + preset = Preset(_log=True, name="bbot_cli_main") + # populate preset symlinks + preset.all_presets + # parse command line arguments and merge into preset + try: + preset.parse_args() + except BBOTArgumentError as e: + log_to_stderr(str(e), level="WARNING") + log.trace(traceback.format_exc()) + return + # ensure arguments (-c config options etc.) are valid + options = preset.args.parsed + + # print help if no arguments if len(sys.argv) == 1: - parser.print_help() + log.stdout(preset.args.parser.format_help()) sys.exit(1) - - options = parser.parse_args() - check_cli_args() + return # --version if options.version: @@ -80,328 +68,194 @@ async def _main(): sys.exit(0) return - # --current-config - if options.current_config: - log.stdout(f"{OmegaConf.to_yaml(config)}") - sys.exit(0) + # --list-presets + if options.list_presets: + log.stdout("") + log.stdout("### PRESETS ###") + log.stdout("") + for row in preset.presets_table().splitlines(): + log.stdout(row) return - if options.agent_mode: - from bbot.agent import Agent - - agent = Agent(config) - success = agent.setup() - if success: - await agent.start() - - else: - from bbot.scanner import Scanner - - try: - output_modules = set(options.output_modules) - module_filtering = False - if (options.list_modules or options.help_all) and not any([options.flags, options.modules]): - module_filtering = True - modules = set(module_loader.preloaded(type="scan")) - else: - modules = set(options.modules) - # enable modules by flags - for m, c in module_loader.preloaded().items(): - module_type = c.get("type", "scan") - if m not in modules: - flags = c.get("flags", []) - if "deadly" in flags: - continue - for f in options.flags: - if f in flags: - log.verbose(f'Enabling {m} because it has flag "{f}"') - if module_type == "output": - output_modules.add(m) - else: - modules.add(m) - - default_output_modules = ["human", "json", "csv"] - - # Make a list of the modules which can be output to the console - consoleable_output_modules = [ - k for k, v in module_loader.preloaded(type="output").items() if "console" in v["config"] - ] - - # if none of the output modules provided on the command line are consoleable, don't turn off the defaults. Instead, just add the one specified to the defaults. - if not any(o in consoleable_output_modules for o in output_modules): - output_modules.update(default_output_modules) - - scanner = Scanner( - *options.targets, - modules=list(modules), - output_modules=list(output_modules), - output_dir=options.output_dir, - config=config, - name=options.name, - whitelist=options.whitelist, - blacklist=options.blacklist, - strict_scope=options.strict_scope, - force_start=options.force, - ) - - if options.install_all_deps: - all_modules = list(module_loader.preloaded()) - scanner.helpers.depsinstaller.force_deps = True - succeeded, failed = await scanner.helpers.depsinstaller.install(*all_modules) - log.info("Finished installing module dependencies") - return False if failed else True - - scan_name = str(scanner.name) - - # enable modules by dependency - # this is only a basic surface-level check - # todo: recursive dependency graph with networkx or topological sort? - all_modules = list(set(scanner._scan_modules + scanner._internal_modules + scanner._output_modules)) - while 1: - changed = False - dep_choices = module_loader.recommend_dependencies(all_modules) - if not dep_choices: - break - for event_type, deps in dep_choices.items(): - if event_type in ("*", "all"): - continue - # skip resolving dependency if a target provides the missing type - if any(e.type == event_type for e in scanner.target.events): - continue - required_by = deps.get("required_by", []) - recommended = deps.get("recommended", []) - if not recommended: - log.hugewarning( - f"{len(required_by):,} modules ({','.join(required_by)}) rely on {event_type} but no modules produce it" - ) - elif len(recommended) == 1: - log.verbose( - f"Enabling {next(iter(recommended))} because {len(required_by):,} modules ({','.join(required_by)}) rely on it for {event_type}" - ) - all_modules = list(set(all_modules + list(recommended))) - scanner._scan_modules = list(set(scanner._scan_modules + list(recommended))) - changed = True - else: - log.hugewarning( - f"{len(required_by):,} modules ({','.join(required_by)}) rely on {event_type} but no enabled module produces it" - ) - log.hugewarning( - f"Recommend enabling one or more of the following modules which produce {event_type}:" - ) - for m in recommended: - log.warning(f" - {m}") - if not changed: - break - - # required flags - modules = set(scanner._scan_modules) - for m in scanner._scan_modules: - flags = module_loader._preloaded.get(m, {}).get("flags", []) - if not all(f in flags for f in options.require_flags): - log.verbose( - f"Removing {m} because it does not have the required flags: {'+'.join(options.require_flags)}" - ) - with suppress(KeyError): - modules.remove(m) - - # excluded flags - for m in scanner._scan_modules: - flags = module_loader._preloaded.get(m, {}).get("flags", []) - if any(f in flags for f in options.exclude_flags): - log.verbose(f"Removing {m} because of excluded flag: {','.join(options.exclude_flags)}") - with suppress(KeyError): - modules.remove(m) - - # excluded modules - for m in options.exclude_modules: - if m in modules: - log.verbose(f"Removing {m} because it is excluded") - with suppress(KeyError): - modules.remove(m) - scanner._scan_modules = list(modules) - - log_fn = log.info - if options.list_modules or options.help_all: - log_fn = log.stdout - - help_modules = list(modules) - if module_filtering: - help_modules = None - - if options.help_all: - log_fn(parser.format_help()) - - if options.list_flags: - log.stdout("") - log.stdout("### FLAGS ###") - log.stdout("") - for row in module_loader.flags_table(flags=options.flags).splitlines(): - log.stdout(row) - return - - log_fn("") - log_fn("### MODULES ###") - log_fn("") - for row in module_loader.modules_table(modules=help_modules).splitlines(): - log_fn(row) - - if options.help_all: - log_fn("") - log_fn("### MODULE OPTIONS ###") - log_fn("") - for row in module_loader.modules_options_table(modules=help_modules).splitlines(): - log_fn(row) - - if options.list_modules or options.list_flags or options.help_all: - return - - module_list = module_loader.filter_modules(modules=modules) - deadly_modules = [] - active_modules = [] - active_aggressive_modules = [] - slow_modules = [] - for m in module_list: - if m[0] in scanner._scan_modules: - if "deadly" in m[-1]["flags"]: - deadly_modules.append(m[0]) - if "active" in m[-1]["flags"]: - active_modules.append(m[0]) - if "aggressive" in m[-1]["flags"]: - active_aggressive_modules.append(m[0]) - if "slow" in m[-1]["flags"]: - slow_modules.append(m[0]) - if scanner._scan_modules: - if deadly_modules and not options.allow_deadly: - log.hugewarning(f"You enabled the following deadly modules: {','.join(deadly_modules)}") - log.hugewarning(f"Deadly modules are highly intrusive") - log.hugewarning(f"Please specify --allow-deadly to continue") - return False - if active_modules: - if active_modules: - if active_aggressive_modules: - log.hugewarning( - "This is an (aggressive) active scan! Intrusive connections will be made to target" - ) - else: - log.hugewarning( - "This is a (safe) active scan. Non-intrusive connections will be made to target" - ) - else: - log.hugeinfo("This is a passive scan. No connections will be made to target") - if slow_modules: - log.warning( - f"You have enabled the following slow modules: {','.join(slow_modules)}. Scan may take a while" - ) - - scanner.helpers.word_cloud.load() - - await scanner._prep() - - if not options.dry_run: - log.trace(f"Command: {' '.join(sys.argv)}") - - # if we're on the terminal, enable keyboard interaction - if sys.stdin.isatty(): - - import fcntl - from bbot.core.helpers.misc import smart_decode - - if not options.agent_mode and not options.yes: - log.hugesuccess(f"Scan ready. Press enter to execute {scanner.name}") - input() - - def handle_keyboard_input(keyboard_input): - kill_regex = re.compile(r"kill (?P[a-z0-9_]+)") - if keyboard_input: - log.verbose(f'Got keyboard input: "{keyboard_input}"') - kill_match = kill_regex.match(keyboard_input) - if kill_match: - module = kill_match.group("module") - if module in scanner.modules: - log.hugewarning(f'Killing module: "{module}"') - scanner.manager.kill_module(module, message="killed by user") - else: - log.warning(f'Invalid module: "{module}"') - else: - toggle_log_level(logger=log) - scanner.manager.modules_status(_log=True) + # if we're listing modules or their options + if options.list_modules or options.list_module_options: + + # if no modules or flags are specified, enable everything + if not (options.modules or options.output_modules or options.flags): + for module, preloaded in preset.module_loader.preloaded().items(): + module_type = preloaded.get("type", "scan") + preset.add_module(module, module_type=module_type) + + preset.bake() + + # --list-modules + if options.list_modules: + log.stdout("") + log.stdout("### MODULES ###") + log.stdout("") + for row in preset.module_loader.modules_table(preset.modules).splitlines(): + log.stdout(row) + return + + # --list-module-options + if options.list_module_options: + log.stdout("") + log.stdout("### MODULE OPTIONS ###") + log.stdout("") + for row in preset.module_loader.modules_options_table(preset.modules).splitlines(): + log.stdout(row) + return + + # --list-flags + if options.list_flags: + flags = preset.flags if preset.flags else None + log.stdout("") + log.stdout("### FLAGS ###") + log.stdout("") + for row in preset.module_loader.flags_table(flags=flags).splitlines(): + log.stdout(row) + return - reader = asyncio.StreamReader() - protocol = asyncio.StreamReaderProtocol(reader) - await asyncio.get_event_loop().connect_read_pipe(lambda: protocol, sys.stdin) + try: + scan = Scanner(preset=preset) + except (PresetAbortError, ValidationError) as e: + log.warning(str(e)) + return - # set stdout and stderr to blocking mode - # this is needed to prevent BlockingIOErrors in logging etc. - fds = [sys.stdout.fileno(), sys.stderr.fileno()] - for fd in fds: - flags = fcntl.fcntl(fd, fcntl.F_GETFL) - fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) + deadly_modules = [ + m for m in scan.preset.scan_modules if "deadly" in preset.preloaded_module(m).get("flags", []) + ] + if deadly_modules and not options.allow_deadly: + log.hugewarning(f"You enabled the following deadly modules: {','.join(deadly_modules)}") + log.hugewarning(f"Deadly modules are highly intrusive") + log.hugewarning(f"Please specify --allow-deadly to continue") + return False + + # --current-preset + if options.current_preset: + print(scan.preset.to_yaml()) + sys.exit(0) + return + + # --current-preset-full + if options.current_preset_full: + print(scan.preset.to_yaml(full_config=True)) + sys.exit(0) + return - async def akeyboard_listen(): + # --install-all-deps + if options.install_all_deps: + all_modules = list(preset.module_loader.preloaded()) + scan.helpers.depsinstaller.force_deps = True + succeeded, failed = await scan.helpers.depsinstaller.install(*all_modules) + log.info("Finished installing module dependencies") + return False if failed else True + + scan_name = str(scan.name) + + log.verbose("") + log.verbose("### MODULES ENABLED ###") + log.verbose("") + for row in scan.preset.module_loader.modules_table(scan.preset.modules).splitlines(): + log.verbose(row) + + scan.helpers.word_cloud.load() + await scan._prep() + + if not options.dry_run: + log.trace(f"Command: {' '.join(sys.argv)}") + + if sys.stdin.isatty(): + if not options.yes: + log.hugesuccess(f"Scan ready. Press enter to execute {scan.name}") + input() + + import os + import re + import fcntl + from bbot.core.helpers.misc import smart_decode + + def handle_keyboard_input(keyboard_input): + kill_regex = re.compile(r"kill (?P[a-z0-9_]+)") + if keyboard_input: + log.verbose(f'Got keyboard input: "{keyboard_input}"') + kill_match = kill_regex.match(keyboard_input) + if kill_match: + module = kill_match.group("module") + if module in scan.modules: + log.hugewarning(f'Killing module: "{module}"') + scan.kill_module(module, message="killed by user") + else: + log.warning(f'Invalid module: "{module}"') + else: + scan.preset.core.logger.toggle_log_level(logger=log) + scan.modules_status(_log=True) + + reader = asyncio.StreamReader() + protocol = asyncio.StreamReaderProtocol(reader) + await asyncio.get_event_loop().connect_read_pipe(lambda: protocol, sys.stdin) + + # set stdout and stderr to blocking mode + # this is needed to prevent BlockingIOErrors in logging etc. + fds = [sys.stdout.fileno(), sys.stderr.fileno()] + for fd in fds: + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) + + async def akeyboard_listen(): + try: + allowed_errors = 10 + while 1: + keyboard_input = None try: + keyboard_input = smart_decode((await reader.readline()).strip()) allowed_errors = 10 - while 1: - keyboard_input = None - try: - keyboard_input = smart_decode((await reader.readline()).strip()) - allowed_errors = 10 - except Exception as e: - log_to_stderr(f"Error in keyboard listen loop: {e}", level="TRACE") - log_to_stderr(traceback.format_exc(), level="TRACE") - allowed_errors -= 1 - if keyboard_input is not None: - handle_keyboard_input(keyboard_input) - if allowed_errors <= 0: - break except Exception as e: - log_to_stderr(f"Error in keyboard listen task: {e}", level="ERROR") + log_to_stderr(f"Error in keyboard listen loop: {e}", level="TRACE") log_to_stderr(traceback.format_exc(), level="TRACE") + allowed_errors -= 1 + if keyboard_input is not None: + handle_keyboard_input(keyboard_input) + if allowed_errors <= 0: + break + except Exception as e: + log_to_stderr(f"Error in keyboard listen task: {e}", level="ERROR") + log_to_stderr(traceback.format_exc(), level="TRACE") - asyncio.create_task(akeyboard_listen()) + asyncio.create_task(akeyboard_listen()) - await scanner.async_start_without_generator() + await scan.async_start_without_generator() - except bbot.core.errors.ScanError as e: - log_to_stderr(str(e), level="ERROR") - except Exception: - raise - - except bbot.core.errors.BBOTError as e: - log_to_stderr(f"{e} (--debug for details)", level="ERROR") - if log_level <= logging.DEBUG: - log_to_stderr(traceback.format_exc(), level="DEBUG") - err = True - - except Exception: - log_to_stderr(f"Encountered unknown error: {traceback.format_exc()}", level="ERROR") - err = True + return True finally: # save word cloud with suppress(BaseException): - save_success, filename = scanner.helpers.word_cloud.save() + save_success, filename = scan.helpers.word_cloud.save() if save_success: - log_to_stderr(f"Saved word cloud ({len(scanner.helpers.word_cloud):,} words) to {filename}") + log_to_stderr(f"Saved word cloud ({len(scan.helpers.word_cloud):,} words) to {filename}") # remove output directory if empty with suppress(BaseException): - scanner.home.rmdir() - if err: - os._exit(1) + scan.home.rmdir() def main(): + import asyncio + import traceback + from bbot.core import CORE + global scan_name try: asyncio.run(_main()) except asyncio.CancelledError: - if get_log_level() <= logging.DEBUG: + if CORE.logger.log_level <= logging.DEBUG: log_to_stderr(traceback.format_exc(), level="DEBUG") except KeyboardInterrupt: msg = "Interrupted" if scan_name: msg = f"You killed {scan_name}" log_to_stderr(msg, level="WARNING") - if get_log_level() <= logging.DEBUG: + if CORE.logger.log_level <= logging.DEBUG: log_to_stderr(traceback.format_exc(), level="DEBUG") exit(1) diff --git a/bbot/core/__init__.py b/bbot/core/__init__.py index 52cf06cc5..6cfaecf0f 100644 --- a/bbot/core/__init__.py +++ b/bbot/core/__init__.py @@ -1,4 +1,3 @@ -# logging -from .logger import init_logging +from .core import BBOTCore -init_logging() +CORE = BBOTCore() diff --git a/bbot/core/config/__init__.py b/bbot/core/config/__init__.py new file mode 100644 index 000000000..c36d91f48 --- /dev/null +++ b/bbot/core/config/__init__.py @@ -0,0 +1,12 @@ +import sys +import multiprocessing as mp + +try: + mp.set_start_method("spawn") +except Exception: + start_method = mp.get_start_method() + if start_method != "spawn": + print( + f"[WARN] Multiprocessing spawn method is set to {start_method}. This may negatively affect performance.", + file=sys.stderr, + ) diff --git a/bbot/core/config/files.py b/bbot/core/config/files.py new file mode 100644 index 000000000..6547d02ec --- /dev/null +++ b/bbot/core/config/files.py @@ -0,0 +1,55 @@ +import sys +from pathlib import Path +from omegaconf import OmegaConf + +from ..helpers.misc import mkdir +from ...logger import log_to_stderr +from ...errors import ConfigLoadError + + +bbot_code_dir = Path(__file__).parent.parent.parent + + +class BBOTConfigFiles: + + config_dir = (Path.home() / ".config" / "bbot").resolve() + defaults_filename = (bbot_code_dir / "defaults.yml").resolve() + config_filename = (config_dir / "bbot.yml").resolve() + + def __init__(self, core): + self.core = core + + def ensure_config_file(self): + mkdir(self.config_dir) + + comment_notice = ( + "# NOTICE: THESE ENTRIES ARE COMMENTED BY DEFAULT\n" + + "# Please be sure to uncomment when inserting API keys, etc.\n" + ) + + # ensure bbot.yml + if not self.config_filename.exists(): + log_to_stderr(f"Creating BBOT config at {self.config_filename}") + yaml = OmegaConf.to_yaml(self.core.default_config) + yaml = comment_notice + "\n".join(f"# {line}" for line in yaml.splitlines()) + with open(str(self.config_filename), "w") as f: + f.write(yaml) + + def _get_config(self, filename, name="config"): + filename = Path(filename).resolve() + try: + conf = OmegaConf.load(str(filename)) + cli_silent = any(x in sys.argv for x in ("-s", "--silent")) + if __name__ == "__main__" and not cli_silent: + log_to_stderr(f"Loaded {name} from {filename}") + return conf + except Exception as e: + if filename.exists(): + raise ConfigLoadError(f"Error parsing config at {filename}:\n\n{e}") + return OmegaConf.create() + + def get_custom_config(self): + return self._get_config(self.config_filename, name="config") + + def get_default_config(self): + return self._get_config(self.defaults_filename, name="defaults") diff --git a/bbot/core/config/logger.py b/bbot/core/config/logger.py new file mode 100644 index 000000000..b6aec39aa --- /dev/null +++ b/bbot/core/config/logger.py @@ -0,0 +1,258 @@ +import sys +import atexit +import logging +from copy import copy +import multiprocessing +import logging.handlers +from pathlib import Path + +from ..helpers.misc import mkdir, error_and_exit +from ...logger import colorize, loglevel_mapping + + +debug_format = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)s %(message)s") + + +class ColoredFormatter(logging.Formatter): + """ + Pretty colors for terminal + """ + + formatter = logging.Formatter("%(levelname)s %(message)s") + module_formatter = logging.Formatter("%(levelname)s %(name)s: %(message)s") + + def format(self, record): + colored_record = copy(record) + levelname = colored_record.levelname + levelshort = loglevel_mapping.get(levelname, "INFO") + colored_record.levelname = colorize(f"[{levelshort}]", level=levelname) + if levelname == "CRITICAL" or levelname.startswith("HUGE"): + colored_record.msg = colorize(colored_record.msg, level=levelname) + # remove name + if colored_record.name.startswith("bbot.modules."): + colored_record.name = colored_record.name.split("bbot.modules.")[-1] + return self.module_formatter.format(colored_record) + return self.formatter.format(colored_record) + + +class BBOTLogger: + """ + The main BBOT logger. + + The job of this class is to manage the different log handlers in BBOT, + allow adding new log handlers, and easily switching log levels on the fly. + """ + + def __init__(self, core): + # custom logging levels + if getattr(logging, "STDOUT", None) is None: + self.addLoggingLevel("STDOUT", 100) + self.addLoggingLevel("TRACE", 49) + self.addLoggingLevel("HUGEWARNING", 31) + self.addLoggingLevel("HUGESUCCESS", 26) + self.addLoggingLevel("SUCCESS", 25) + self.addLoggingLevel("HUGEINFO", 21) + self.addLoggingLevel("HUGEVERBOSE", 16) + self.addLoggingLevel("VERBOSE", 15) + self.verbosity_levels_toggle = [logging.INFO, logging.VERBOSE, logging.DEBUG] + + self._loggers = None + self._log_handlers = None + self._log_level = None + self.root_logger = logging.getLogger() + self.core_logger = logging.getLogger("bbot") + self.core = core + + self.listener = None + + self.process_name = multiprocessing.current_process().name + if self.process_name == "MainProcess": + self.queue = multiprocessing.Queue() + self.setup_queue_handler() + # Start the QueueListener + self.listener = logging.handlers.QueueListener(self.queue, *self.log_handlers.values()) + self.listener.start() + atexit.register(self.listener.stop) + + self.log_level = logging.INFO + + def setup_queue_handler(self, logging_queue=None, log_level=logging.DEBUG): + if logging_queue is None: + logging_queue = self.queue + else: + self.queue = logging_queue + self.queue_handler = logging.handlers.QueueHandler(logging_queue) + + self.root_logger.addHandler(self.queue_handler) + + self.core_logger.setLevel(log_level) + # disable asyncio logging for child processes + if self.process_name != "MainProcess": + logging.getLogger("asyncio").setLevel(logging.ERROR) + + def addLoggingLevel(self, levelName, levelNum, methodName=None): + """ + Comprehensively adds a new logging level to the `logging` module and the + currently configured logging class. + + `levelName` becomes an attribute of the `logging` module with the value + `levelNum`. `methodName` becomes a convenience method for both `logging` + itself and the class returned by `logging.getLoggerClass()` (usually just + `logging.Logger`). If `methodName` is not specified, `levelName.lower()` is + used. + + To avoid accidental clobberings of existing attributes, this method will + raise an `AttributeError` if the level name is already an attribute of the + `logging` module or if the method name is already present + + Example + ------- + >>> addLoggingLevel('TRACE', logging.DEBUG - 5) + >>> logging.getLogger(__name__).setLevel('TRACE') + >>> logging.getLogger(__name__).trace('that worked') + >>> logging.trace('so did this') + >>> logging.TRACE + 5 + + """ + if not methodName: + methodName = levelName.lower() + + if hasattr(logging, levelName): + raise AttributeError(f"{levelName} already defined in logging module") + if hasattr(logging, methodName): + raise AttributeError(f"{methodName} already defined in logging module") + if hasattr(logging.getLoggerClass(), methodName): + raise AttributeError(f"{methodName} already defined in logger class") + + # This method was inspired by the answers to Stack Overflow post + # http://stackoverflow.com/q/2183233/2988730, especially + # http://stackoverflow.com/a/13638084/2988730 + def logForLevel(self, message, *args, **kwargs): + if self.isEnabledFor(levelNum): + self._log(levelNum, message, args, **kwargs) + + def logToRoot(message, *args, **kwargs): + logging.log(levelNum, message, *args, **kwargs) + + logging.addLevelName(levelNum, levelName) + setattr(logging, levelName, levelNum) + setattr(logging.getLoggerClass(), methodName, logForLevel) + setattr(logging, methodName, logToRoot) + + @property + def loggers(self): + if self._loggers is None: + self._loggers = [ + logging.getLogger("bbot"), + logging.getLogger("asyncio"), + ] + return self._loggers + + def add_log_handler(self, handler, formatter=None): + if self.listener is None: + return + if handler.formatter is None: + handler.setFormatter(debug_format) + if handler not in self.listener.handlers: + self.listener.handlers = self.listener.handlers + (handler,) + + def remove_log_handler(self, handler): + if self.listener is None: + return + if handler in self.listener.handlers: + new_handlers = list(self.listener.handlers) + new_handlers.remove(handler) + self.listener.handlers = tuple(new_handlers) + + def include_logger(self, logger): + if logger not in self.loggers: + self.loggers.append(logger) + if self.log_level is not None: + logger.setLevel(self.log_level) + for handler in self.log_handlers.values(): + self.add_log_handler(handler) + + @property + def log_handlers(self): + if self._log_handlers is None: + log_dir = Path(self.core.home) / "logs" + if not mkdir(log_dir, raise_error=False): + error_and_exit(f"Failure creating or error writing to BBOT logs directory ({log_dir})") + + # Main log file + main_handler = logging.handlers.TimedRotatingFileHandler( + f"{log_dir}/bbot.log", when="d", interval=1, backupCount=14 + ) + + # Separate log file for debugging + debug_handler = logging.handlers.TimedRotatingFileHandler( + f"{log_dir}/bbot.debug.log", when="d", interval=1, backupCount=14 + ) + + def stderr_filter(record): + if record.levelno == logging.STDOUT or ( + record.levelno == logging.TRACE and self.log_level > logging.DEBUG + ): + return False + if record.levelno < self.log_level: + return False + return True + + # Log to stderr + stderr_handler = logging.StreamHandler(sys.stderr) + stderr_handler.addFilter(stderr_filter) + # Log to stdout + stdout_handler = logging.StreamHandler(sys.stdout) + stdout_handler.addFilter(lambda x: x.levelno == logging.STDOUT) + # log to files + debug_handler.addFilter( + lambda x: x.levelno == logging.TRACE or (x.levelno < logging.VERBOSE and x.levelno != logging.STDOUT) + ) + main_handler.addFilter( + lambda x: x.levelno not in (logging.STDOUT, logging.TRACE) and x.levelno >= logging.VERBOSE + ) + + # Set log format + debug_handler.setFormatter(debug_format) + main_handler.setFormatter(debug_format) + stderr_handler.setFormatter(ColoredFormatter("%(levelname)s %(name)s: %(message)s")) + stdout_handler.setFormatter(logging.Formatter("%(message)s")) + + self._log_handlers = { + "stderr": stderr_handler, + "stdout": stdout_handler, + "file_debug": debug_handler, + "file_main": main_handler, + } + return self._log_handlers + + @property + def log_level(self): + if self._log_level is None: + return logging.INFO + return self._log_level + + @log_level.setter + def log_level(self, level): + self.set_log_level(level) + + def set_log_level(self, level, logger=None): + if isinstance(level, str): + level = logging.getLevelName(level) + if logger is not None: + logger.hugeinfo(f"Setting log level to {logging.getLevelName(level)}") + self._log_level = level + for logger in self.loggers: + logger.setLevel(level) + + def toggle_log_level(self, logger=None): + if self.log_level in self.verbosity_levels_toggle: + for i, level in enumerate(self.verbosity_levels_toggle): + if self.log_level == level: + self.set_log_level( + self.verbosity_levels_toggle[(i + 1) % len(self.verbosity_levels_toggle)], logger=logger + ) + break + else: + self.set_log_level(self.verbosity_levels_toggle[0], logger=logger) diff --git a/bbot/core/configurator/__init__.py b/bbot/core/configurator/__init__.py deleted file mode 100644 index 15962ce59..000000000 --- a/bbot/core/configurator/__init__.py +++ /dev/null @@ -1,103 +0,0 @@ -import re -from omegaconf import OmegaConf - -from . import files, args, environ -from ..errors import ConfigLoadError -from ...modules import module_loader -from ..helpers.logger import log_to_stderr -from ..helpers.misc import error_and_exit, filter_dict, clean_dict, match_and_exit, is_file - -# cached sudo password -bbot_sudo_pass = None - -modules_config = OmegaConf.create( - { - "modules": module_loader.configs(type="scan"), - "output_modules": module_loader.configs(type="output"), - "internal_modules": module_loader.configs(type="internal"), - } -) - -try: - config = OmegaConf.merge( - # first, pull module defaults - modules_config, - # then look in .yaml files - files.get_config(), - # finally, pull from CLI arguments - args.get_config(), - ) -except ConfigLoadError as e: - error_and_exit(e) - - -config = environ.prepare_environment(config) -default_config = OmegaConf.merge(files.default_config, modules_config) - - -sentinel = object() - - -exclude_from_validation = re.compile(r".*modules\.[a-z0-9_]+\.(?:batch_size|max_event_handlers)$") - - -def check_cli_args(): - conf = [a for a in args.cli_config if not is_file(a)] - all_options = None - for c in conf: - c = c.split("=")[0].strip() - v = OmegaConf.select(default_config, c, default=sentinel) - # if option isn't in the default config - if v is sentinel: - if exclude_from_validation.match(c): - continue - if all_options is None: - from ...modules import module_loader - - modules_options = set() - for module_options in module_loader.modules_options().values(): - modules_options.update(set(o[0] for o in module_options)) - global_options = set(default_config.keys()) - {"modules", "output_modules"} - all_options = global_options.union(modules_options) - match_and_exit(c, all_options, msg="module option") - - -def ensure_config_files(): - secrets_strings = ["api_key", "username", "password", "token", "secret", "_id"] - exclude_keys = ["modules", "output_modules", "internal_modules"] - - comment_notice = ( - "# NOTICE: THESE ENTRIES ARE COMMENTED BY DEFAULT\n" - + "# Please be sure to uncomment when inserting API keys, etc.\n" - ) - - # ensure bbot.yml - if not files.config_filename.exists(): - log_to_stderr(f"Creating BBOT config at {files.config_filename}") - no_secrets_config = OmegaConf.to_object(default_config) - no_secrets_config = clean_dict( - no_secrets_config, - *secrets_strings, - fuzzy=True, - exclude_keys=exclude_keys, - ) - yaml = OmegaConf.to_yaml(no_secrets_config) - yaml = comment_notice + "\n".join(f"# {line}" for line in yaml.splitlines()) - with open(str(files.config_filename), "w") as f: - f.write(yaml) - - # ensure secrets.yml - if not files.secrets_filename.exists(): - log_to_stderr(f"Creating BBOT secrets at {files.secrets_filename}") - secrets_only_config = OmegaConf.to_object(default_config) - secrets_only_config = filter_dict( - secrets_only_config, - *secrets_strings, - fuzzy=True, - exclude_keys=exclude_keys, - ) - yaml = OmegaConf.to_yaml(secrets_only_config) - yaml = comment_notice + "\n".join(f"# {line}" for line in yaml.splitlines()) - with open(str(files.secrets_filename), "w") as f: - f.write(yaml) - files.secrets_filename.chmod(0o600) diff --git a/bbot/core/configurator/args.py b/bbot/core/configurator/args.py deleted file mode 100644 index 173583827..000000000 --- a/bbot/core/configurator/args.py +++ /dev/null @@ -1,255 +0,0 @@ -import sys -import argparse -from pathlib import Path -from omegaconf import OmegaConf -from contextlib import suppress - -from ...modules import module_loader -from ..helpers.logger import log_to_stderr -from ..helpers.misc import chain_lists, match_and_exit, is_file - -module_choices = sorted(set(module_loader.configs(type="scan"))) -output_module_choices = sorted(set(module_loader.configs(type="output"))) - -flag_choices = set() -for m, c in module_loader.preloaded().items(): - flag_choices.update(set(c.get("flags", []))) - - -class BBOTArgumentParser(argparse.ArgumentParser): - _dummy = False - - def parse_args(self, *args, **kwargs): - """ - Allow space or comma-separated entries for modules and targets - For targets, also allow input files containing additional targets - """ - ret = super().parse_args(*args, **kwargs) - # silent implies -y - if ret.silent: - ret.yes = True - ret.modules = chain_lists(ret.modules) - ret.exclude_modules = chain_lists(ret.exclude_modules) - ret.output_modules = chain_lists(ret.output_modules) - ret.targets = chain_lists(ret.targets, try_files=True, msg="Reading targets from file: {filename}") - ret.whitelist = chain_lists(ret.whitelist, try_files=True, msg="Reading whitelist from file: {filename}") - ret.blacklist = chain_lists(ret.blacklist, try_files=True, msg="Reading blacklist from file: {filename}") - ret.flags = chain_lists(ret.flags) - ret.exclude_flags = chain_lists(ret.exclude_flags) - ret.require_flags = chain_lists(ret.require_flags) - for m in ret.modules: - if m not in module_choices and not self._dummy: - match_and_exit(m, module_choices, msg="module") - for m in ret.exclude_modules: - if m not in module_choices and not self._dummy: - match_and_exit(m, module_choices, msg="module") - for m in ret.output_modules: - if m not in output_module_choices and not self._dummy: - match_and_exit(m, output_module_choices, msg="output module") - for f in set(ret.flags + ret.require_flags): - if f not in flag_choices and not self._dummy: - if f not in flag_choices and not self._dummy: - match_and_exit(f, flag_choices, msg="flag") - return ret - - -class DummyArgumentParser(BBOTArgumentParser): - _dummy = True - - def error(self, message): - pass - - -scan_examples = [ - ( - "Subdomains", - "Perform a full subdomain enumeration on evilcorp.com", - "bbot -t evilcorp.com -f subdomain-enum", - ), - ( - "Subdomains (passive only)", - "Perform a passive-only subdomain enumeration on evilcorp.com", - "bbot -t evilcorp.com -f subdomain-enum -rf passive", - ), - ( - "Subdomains + port scan + web screenshots", - "Port-scan every subdomain, screenshot every webpage, output to current directory", - "bbot -t evilcorp.com -f subdomain-enum -m nmap gowitness -n my_scan -o .", - ), - ( - "Subdomains + basic web scan", - "A basic web scan includes wappalyzer, robots.txt, and other non-intrusive web modules", - "bbot -t evilcorp.com -f subdomain-enum web-basic", - ), - ( - "Web spider", - "Crawl www.evilcorp.com up to a max depth of 2, automatically extracting emails, secrets, etc.", - "bbot -t www.evilcorp.com -m httpx robots badsecrets secretsdb -c web_spider_distance=2 web_spider_depth=2", - ), - ( - "Everything everywhere all at once", - "Subdomains, emails, cloud buckets, port scan, basic web, web screenshots, nuclei", - "bbot -t evilcorp.com -f subdomain-enum email-enum cloud-enum web-basic -m nmap gowitness nuclei --allow-deadly", - ), -] - -usage_examples = [ - ( - "List modules", - "", - "bbot -l", - ), - ( - "List flags", - "", - "bbot -lf", - ), -] - - -epilog = "EXAMPLES\n" -for example in (scan_examples, usage_examples): - for title, description, command in example: - epilog += f"\n {title}:\n {command}\n" - - -parser = BBOTArgumentParser( - description="Bighuge BLS OSINT Tool", formatter_class=argparse.RawTextHelpFormatter, epilog=epilog -) -dummy_parser = DummyArgumentParser( - description="Bighuge BLS OSINT Tool", formatter_class=argparse.RawTextHelpFormatter, epilog=epilog -) -for p in (parser, dummy_parser): - p.add_argument("--help-all", action="store_true", help="Display full help including module config options") - target = p.add_argument_group(title="Target") - target.add_argument("-t", "--targets", nargs="+", default=[], help="Targets to seed the scan", metavar="TARGET") - target.add_argument( - "-w", - "--whitelist", - nargs="+", - default=[], - help="What's considered in-scope (by default it's the same as --targets)", - ) - target.add_argument("-b", "--blacklist", nargs="+", default=[], help="Don't touch these things") - target.add_argument( - "--strict-scope", - action="store_true", - help="Don't consider subdomains of target/whitelist to be in-scope", - ) - modules = p.add_argument_group(title="Modules") - modules.add_argument( - "-m", - "--modules", - nargs="+", - default=[], - help=f'Modules to enable. Choices: {",".join(module_choices)}', - metavar="MODULE", - ) - modules.add_argument("-l", "--list-modules", action="store_true", help=f"List available modules.") - modules.add_argument( - "-em", "--exclude-modules", nargs="+", default=[], help=f"Exclude these modules.", metavar="MODULE" - ) - modules.add_argument( - "-f", - "--flags", - nargs="+", - default=[], - help=f'Enable modules by flag. Choices: {",".join(sorted(flag_choices))}', - metavar="FLAG", - ) - modules.add_argument("-lf", "--list-flags", action="store_true", help=f"List available flags.") - modules.add_argument( - "-rf", - "--require-flags", - nargs="+", - default=[], - help=f"Only enable modules with these flags (e.g. -rf passive)", - metavar="FLAG", - ) - modules.add_argument( - "-ef", - "--exclude-flags", - nargs="+", - default=[], - help=f"Disable modules with these flags. (e.g. -ef aggressive)", - metavar="FLAG", - ) - modules.add_argument( - "-om", - "--output-modules", - nargs="+", - default=["human", "json", "csv"], - help=f'Output module(s). Choices: {",".join(output_module_choices)}', - metavar="MODULE", - ) - modules.add_argument("--allow-deadly", action="store_true", help="Enable the use of highly aggressive modules") - scan = p.add_argument_group(title="Scan") - scan.add_argument("-n", "--name", help="Name of scan (default: random)", metavar="SCAN_NAME") - scan.add_argument( - "-o", - "--output-dir", - metavar="DIR", - ) - scan.add_argument( - "-c", - "--config", - nargs="*", - help="custom config file, or configuration options in key=value format: 'modules.shodan.api_key=1234'", - metavar="CONFIG", - ) - scan.add_argument("-v", "--verbose", action="store_true", help="Be more verbose") - scan.add_argument("-d", "--debug", action="store_true", help="Enable debugging") - scan.add_argument("-s", "--silent", action="store_true", help="Be quiet") - scan.add_argument("--force", action="store_true", help="Run scan even if module setups fail") - scan.add_argument("-y", "--yes", action="store_true", help="Skip scan confirmation prompt") - scan.add_argument("--dry-run", action="store_true", help=f"Abort before executing scan") - scan.add_argument( - "--current-config", - action="store_true", - help="Show current config in YAML format", - ) - deps = p.add_argument_group( - title="Module dependencies", description="Control how modules install their dependencies" - ) - g2 = deps.add_mutually_exclusive_group() - g2.add_argument("--no-deps", action="store_true", help="Don't install module dependencies") - g2.add_argument("--force-deps", action="store_true", help="Force install all module dependencies") - g2.add_argument("--retry-deps", action="store_true", help="Try again to install failed module dependencies") - g2.add_argument( - "--ignore-failed-deps", action="store_true", help="Run modules even if they have failed dependencies" - ) - g2.add_argument("--install-all-deps", action="store_true", help="Install dependencies for all modules") - agent = p.add_argument_group(title="Agent", description="Report back to a central server") - agent.add_argument("-a", "--agent-mode", action="store_true", help="Start in agent mode") - misc = p.add_argument_group(title="Misc") - misc.add_argument("--version", action="store_true", help="show BBOT version and exit") - - -cli_options = None -with suppress(Exception): - cli_options = dummy_parser.parse_args() - - -cli_config = [] - - -def get_config(): - global cli_config - with suppress(Exception): - if cli_options.config: - cli_config = cli_options.config - if cli_config: - filename = Path(cli_config[0]).resolve() - if len(cli_config) == 1 and is_file(filename): - try: - conf = OmegaConf.load(str(filename)) - log_to_stderr(f"Loaded custom config from {filename}") - return conf - except Exception as e: - log_to_stderr(f"Error parsing custom config at {filename}: {e}", level="ERROR") - sys.exit(2) - try: - return OmegaConf.from_cli(cli_config) - except Exception as e: - log_to_stderr(f"Error parsing command-line config: {e}", level="ERROR") - sys.exit(2) diff --git a/bbot/core/configurator/environ.py b/bbot/core/configurator/environ.py deleted file mode 100644 index 4358bb78d..000000000 --- a/bbot/core/configurator/environ.py +++ /dev/null @@ -1,153 +0,0 @@ -import os -import sys -import omegaconf -from pathlib import Path - -from . import args -from ...modules import module_loader -from ..helpers.misc import cpu_architecture, os_platform, os_platform_friendly - - -# keep track of whether BBOT is being executed via the CLI -cli_execution = False - - -def increase_limit(new_limit): - try: - import resource - - # Get current limit - soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE) - - new_limit = min(new_limit, hard_limit) - - # Attempt to set new limit - resource.setrlimit(resource.RLIMIT_NOFILE, (new_limit, hard_limit)) - except Exception as e: - sys.stderr.write(f"Failed to set new ulimit: {e}\n") - - -increase_limit(65535) - - -def flatten_config(config, base="bbot"): - """ - Flatten a JSON-like config into a list of environment variables: - {"modules": [{"httpx": {"timeout": 5}}]} --> "BBOT_MODULES_HTTPX_TIMEOUT=5" - """ - if type(config) == omegaconf.dictconfig.DictConfig: - for k, v in config.items(): - new_base = f"{base}_{k}" - if type(v) == omegaconf.dictconfig.DictConfig: - yield from flatten_config(v, base=new_base) - elif type(v) != omegaconf.listconfig.ListConfig: - yield (new_base.upper(), str(v)) - - -def add_to_path(v, k="PATH"): - var_list = os.environ.get(k, "").split(":") - deduped_var_list = [] - for _ in var_list: - if not _ in deduped_var_list: - deduped_var_list.append(_) - if not v in deduped_var_list: - deduped_var_list = [v] + deduped_var_list - new_var_str = ":".join(deduped_var_list) - os.environ[k] = new_var_str - - -def prepare_environment(bbot_config): - """ - Sync config to OS environment variables - """ - # ensure bbot_home - if not "home" in bbot_config: - bbot_config["home"] = "~/.bbot" - home = Path(bbot_config["home"]).expanduser().resolve() - bbot_config["home"] = str(home) - - # if we're running in a virtual environment, make sure to include its /bin in PATH - if sys.prefix != sys.base_prefix: - bin_dir = str(Path(sys.prefix) / "bin") - add_to_path(bin_dir) - - # add ~/.local/bin to PATH - local_bin_dir = str(Path.home() / ".local" / "bin") - add_to_path(local_bin_dir) - - # ensure bbot_tools - bbot_tools = home / "tools" - os.environ["BBOT_TOOLS"] = str(bbot_tools) - if not str(bbot_tools) in os.environ.get("PATH", "").split(":"): - os.environ["PATH"] = f'{bbot_tools}:{os.environ.get("PATH", "").strip(":")}' - # ensure bbot_cache - bbot_cache = home / "cache" - os.environ["BBOT_CACHE"] = str(bbot_cache) - # ensure bbot_temp - bbot_temp = home / "temp" - os.environ["BBOT_TEMP"] = str(bbot_temp) - # ensure bbot_lib - bbot_lib = home / "lib" - os.environ["BBOT_LIB"] = str(bbot_lib) - # export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:~/.bbot/lib/ - add_to_path(str(bbot_lib), k="LD_LIBRARY_PATH") - - # platform variables - os.environ["BBOT_OS_PLATFORM"] = os_platform() - os.environ["BBOT_OS"] = os_platform_friendly() - os.environ["BBOT_CPU_ARCH"] = cpu_architecture() - - # exchange certain options between CLI args and config - if cli_execution and args.cli_options is not None: - # deps - bbot_config["retry_deps"] = args.cli_options.retry_deps - bbot_config["force_deps"] = args.cli_options.force_deps - bbot_config["no_deps"] = args.cli_options.no_deps - bbot_config["ignore_failed_deps"] = args.cli_options.ignore_failed_deps - # debug - bbot_config["debug"] = args.cli_options.debug - bbot_config["silent"] = args.cli_options.silent - - import logging - - log = logging.getLogger() - if bbot_config.get("debug", False): - bbot_config["silent"] = False - log = logging.getLogger("bbot") - log.setLevel(logging.DEBUG) - logging.getLogger("asyncio").setLevel(logging.DEBUG) - elif bbot_config.get("silent", False): - log = logging.getLogger("bbot") - log.setLevel(logging.CRITICAL) - - # copy config to environment - bbot_environ = flatten_config(bbot_config) - os.environ.update(bbot_environ) - - # handle HTTP proxy - http_proxy = bbot_config.get("http_proxy", "") - if http_proxy: - os.environ["HTTP_PROXY"] = http_proxy - os.environ["HTTPS_PROXY"] = http_proxy - else: - os.environ.pop("HTTP_PROXY", None) - os.environ.pop("HTTPS_PROXY", None) - - # replace environment variables in preloaded modules - module_loader.find_and_replace(**os.environ) - - # ssl verification - import urllib3 - - urllib3.disable_warnings() - ssl_verify = bbot_config.get("ssl_verify", False) - if not ssl_verify: - import requests - import functools - - requests.adapters.BaseAdapter.send = functools.partialmethod(requests.adapters.BaseAdapter.send, verify=False) - requests.adapters.HTTPAdapter.send = functools.partialmethod(requests.adapters.HTTPAdapter.send, verify=False) - requests.Session.request = functools.partialmethod(requests.Session.request, verify=False) - requests.request = functools.partial(requests.request, verify=False) - - return bbot_config diff --git a/bbot/core/configurator/files.py b/bbot/core/configurator/files.py deleted file mode 100644 index e56950597..000000000 --- a/bbot/core/configurator/files.py +++ /dev/null @@ -1,40 +0,0 @@ -import sys -from pathlib import Path -from omegaconf import OmegaConf - -from ..helpers.misc import mkdir -from ..errors import ConfigLoadError -from ..helpers.logger import log_to_stderr - -config_dir = (Path.home() / ".config" / "bbot").resolve() -defaults_filename = (Path(__file__).parent.parent.parent / "defaults.yml").resolve() -mkdir(config_dir) -config_filename = (config_dir / "bbot.yml").resolve() -secrets_filename = (config_dir / "secrets.yml").resolve() -default_config = None - - -def _get_config(filename, name="config"): - notify = False - if sys.argv and sys.argv[0].endswith("bbot") and not any(x in sys.argv for x in ("-s", "--silent")): - notify = True - filename = Path(filename).resolve() - try: - conf = OmegaConf.load(str(filename)) - if notify and __name__ == "__main__": - log_to_stderr(f"Loaded {name} from {filename}") - return conf - except Exception as e: - if filename.exists(): - raise ConfigLoadError(f"Error parsing config at {filename}:\n\n{e}") - return OmegaConf.create() - - -def get_config(): - global default_config - default_config = _get_config(defaults_filename, name="defaults") - return OmegaConf.merge( - default_config, - _get_config(config_filename, name="config"), - _get_config(secrets_filename, name="secrets"), - ) diff --git a/bbot/core/core.py b/bbot/core/core.py new file mode 100644 index 000000000..1c43e5035 --- /dev/null +++ b/bbot/core/core.py @@ -0,0 +1,180 @@ +import logging +import traceback +from copy import copy +import multiprocessing +from pathlib import Path +from omegaconf import OmegaConf + +DEFAULT_CONFIG = None + + +class BBOTCore: + """ + This is the first thing that loads when you import BBOT. + + Unlike a Preset, BBOTCore holds only the config, not scan-specific stuff like targets, flags, modules, etc. + + Its main jobs are: + + - set up logging + - keep separation between the `default` and `custom` config (this allows presets to only display the config options that have changed) + - allow for easy merging of configs + - load quickly + """ + + class BBOTProcess(multiprocessing.Process): + + def __init__(self, *args, **kwargs): + self.logging_queue = kwargs.pop("logging_queue") + self.log_level = kwargs.pop("log_level") + super().__init__(*args, **kwargs) + + def run(self): + log = logging.getLogger("bbot.core.process") + try: + from bbot.core import CORE + + CORE.logger.setup_queue_handler(self.logging_queue, self.log_level) + super().run() + except KeyboardInterrupt: + log.warning(f"Got KeyboardInterrupt in {self.name}") + log.trace(traceback.format_exc()) + except BaseException as e: + log.warning(f"Error in {self.name}: {e}") + log.trace(traceback.format_exc()) + + def __init__(self): + self._logger = None + self._files_config = None + + self.bbot_sudo_pass = None + + self._config = None + self._custom_config = None + + # ensure bbot home dir + if not "home" in self.config: + self.custom_config["home"] = "~/.bbot" + + # bare minimum == logging + self.logger + self.log = logging.getLogger("bbot.core") + + @property + def home(self): + return Path(self.config["home"]).expanduser().resolve() + + @property + def cache_dir(self): + return self.home / "cache" + + @property + def tools_dir(self): + return self.home / "tools" + + @property + def temp_dir(self): + return self.home / "temp" + + @property + def lib_dir(self): + return self.home / "lib" + + @property + def scans_dir(self): + return self.home / "scans" + + @property + def config(self): + """ + .config is just .default_config + .custom_config merged together + + any new values should be added to custom_config. + """ + if self._config is None: + self._config = OmegaConf.merge(self.default_config, self.custom_config) + # set read-only flag (change .custom_config instead) + OmegaConf.set_readonly(self._config, True) + return self._config + + @property + def default_config(self): + """ + The default BBOT config (from `defaults.yml`). Read-only. + """ + global DEFAULT_CONFIG + if DEFAULT_CONFIG is None: + self.default_config = self.files_config.get_default_config() + return DEFAULT_CONFIG + + @default_config.setter + def default_config(self, value): + # we temporarily clear out the config so it can be refreshed if/when default_config changes + global DEFAULT_CONFIG + self._config = None + DEFAULT_CONFIG = value + # set read-only flag (change .custom_config instead) + OmegaConf.set_readonly(DEFAULT_CONFIG, True) + + @property + def custom_config(self): + """ + Custom BBOT config (from `~/.config/bbot/bbot.yml`) + """ + # we temporarily clear out the config so it can be refreshed if/when custom_config changes + self._config = None + if self._custom_config is None: + self._custom_config = self.files_config.get_custom_config() + return self._custom_config + + @custom_config.setter + def custom_config(self, value): + # we temporarily clear out the config so it can be refreshed if/when custom_config changes + self._config = None + self._custom_config = value + + def merge_custom(self, config): + """ + Merge a config into the custom config. + """ + self.custom_config = OmegaConf.merge(self.custom_config, OmegaConf.create(config)) + + def merge_default(self, config): + """ + Merge a config into the default config. + """ + self.default_config = OmegaConf.merge(self.default_config, OmegaConf.create(config)) + + def copy(self): + """ + Return a semi-shallow copy of self. (`custom_config` is copied, but `default_config` stays the same) + """ + core_copy = copy(self) + core_copy._custom_config = self._custom_config.copy() + return core_copy + + @property + def files_config(self): + """ + Get the configs from `bbot.yml` and `defaults.yml` + """ + if self._files_config is None: + from .config import files + + self.files = files + self._files_config = files.BBOTConfigFiles(self) + return self._files_config + + def create_process(self, *args, **kwargs): + process = self.BBOTProcess(*args, logging_queue=self.logger.queue, log_level=self.logger.log_level, **kwargs) + process.daemon = True + return process + + @property + def logger(self): + self.config + if self._logger is None: + from .config.logger import BBOTLogger + + self._logger = BBOTLogger(self) + return self._logger diff --git a/bbot/core/engine.py b/bbot/core/engine.py new file mode 100644 index 000000000..24781ab3b --- /dev/null +++ b/bbot/core/engine.py @@ -0,0 +1,212 @@ +import zmq +import atexit +import pickle +import asyncio +import inspect +import logging +import tempfile +import traceback +import zmq.asyncio +from pathlib import Path +from contextlib import asynccontextmanager, suppress + +from bbot.core import CORE +from bbot.core.helpers.misc import rand_string + +CMD_EXIT = 1000 + + +class EngineClient: + + SERVER_CLASS = None + + def __init__(self, **kwargs): + self.name = f"EngineClient {self.__class__.__name__}" + if self.SERVER_CLASS is None: + raise ValueError(f"Must set EngineClient SERVER_CLASS, {self.SERVER_CLASS}") + self.CMDS = dict(self.SERVER_CLASS.CMDS) + for k, v in list(self.CMDS.items()): + self.CMDS[v] = k + self.log = logging.getLogger(f"bbot.core.{self.__class__.__name__.lower()}") + self.socket_address = f"zmq_{rand_string(8)}.sock" + self.socket_path = Path(tempfile.gettempdir()) / self.socket_address + self.server_kwargs = kwargs.pop("server_kwargs", {}) + self._server_process = None + self.context = zmq.asyncio.Context() + atexit.register(self.cleanup) + + async def run_and_return(self, command, **kwargs): + async with self.new_socket() as socket: + message = self.make_message(command, args=kwargs) + await socket.send(message) + binary = await socket.recv() + # self.log.debug(f"{self.name}.{command}({kwargs}) got binary: {binary}") + message = pickle.loads(binary) + self.log.debug(f"{self.name}.{command}({kwargs}) got message: {message}") + # error handling + if self.check_error(message): + return + return message + + async def run_and_yield(self, command, **kwargs): + message = self.make_message(command, args=kwargs) + async with self.new_socket() as socket: + await socket.send(message) + while 1: + binary = await socket.recv() + # self.log.debug(f"{self.name}.{command}({kwargs}) got binary: {binary}") + message = pickle.loads(binary) + self.log.debug(f"{self.name}.{command}({kwargs}) got message: {message}") + # error handling + if self.check_error(message) or self.check_stop(message): + break + yield message + + def check_error(self, message): + if isinstance(message, dict) and len(message) == 1 and "_e" in message: + error, trace = message["_e"] + self.log.error(error) + self.log.trace(trace) + return True + return False + + def check_stop(self, message): + if isinstance(message, dict) and len(message) == 1 and "_s" in message: + return True + return False + + def make_message(self, command, args): + try: + cmd_id = self.CMDS[command] + except KeyError: + raise KeyError(f'Command "{command}" not found. Available commands: {",".join(self.available_commands)}') + return pickle.dumps(dict(c=cmd_id, a=args)) + + @property + def available_commands(self): + return [s for s in self.CMDS if isinstance(s, str)] + + def start_server(self): + process = CORE.create_process( + target=self.server_process, + args=( + self.SERVER_CLASS, + self.socket_path, + ), + kwargs=self.server_kwargs, + ) + process.start() + return process + + @staticmethod + def server_process(server_class, socket_path, **kwargs): + try: + engine_server = server_class(socket_path, **kwargs) + asyncio.run(engine_server.worker()) + except (asyncio.CancelledError, KeyboardInterrupt): + pass + except Exception: + import traceback + + log = logging.getLogger("bbot.core.engine.server") + log.critical(f"Unhandled error in {server_class.__name__} server process: {traceback.format_exc()}") + + @asynccontextmanager + async def new_socket(self): + if self._server_process is None: + self._server_process = self.start_server() + while not self.socket_path.exists(): + await asyncio.sleep(0.1) + socket = self.context.socket(zmq.DEALER) + socket.connect(f"ipc://{self.socket_path}") + try: + yield socket + finally: + with suppress(Exception): + socket.close() + + def cleanup(self): + # delete socket file on exit + self.socket_path.unlink(missing_ok=True) + + +class EngineServer: + + CMDS = {} + + def __init__(self, socket_path): + self.log = logging.getLogger(f"bbot.core.{self.__class__.__name__.lower()}") + self.name = f"EngineServer {self.__class__.__name__}" + if socket_path is not None: + # create ZeroMQ context + self.context = zmq.asyncio.Context() + # ROUTER socket can handle multiple concurrent requests + self.socket = self.context.socket(zmq.ROUTER) + # create socket file + self.socket.bind(f"ipc://{socket_path}") + + async def run_and_return(self, client_id, command_fn, **kwargs): + self.log.debug(f"{self.name} run-and-return {command_fn.__name__}({kwargs})") + try: + result = await command_fn(**kwargs) + except Exception as e: + error = f"Unhandled error in {self.name}.{command_fn.__name__}({kwargs}): {e}" + trace = traceback.format_exc() + result = {"_e": (error, trace)} + await self.send_socket_multipart([client_id, pickle.dumps(result)]) + + async def run_and_yield(self, client_id, command_fn, **kwargs): + self.log.debug(f"{self.name} run-and-yield {command_fn.__name__}({kwargs})") + try: + async for _ in command_fn(**kwargs): + await self.send_socket_multipart([client_id, pickle.dumps(_)]) + await self.send_socket_multipart([client_id, pickle.dumps({"_s": None})]) + except Exception as e: + error = f"Unhandled error in {self.name}.{command_fn.__name__}({kwargs}): {e}" + trace = traceback.format_exc() + result = {"_e": (error, trace)} + await self.send_socket_multipart([client_id, pickle.dumps(result)]) + + async def send_socket_multipart(self, *args, **kwargs): + try: + await self.socket.send_multipart(*args, **kwargs) + except Exception as e: + self.log.warning(f"Error sending ZMQ message: {e}") + self.log.trace(traceback.format_exc()) + + async def worker(self): + try: + while 1: + client_id, binary = await self.socket.recv_multipart() + message = pickle.loads(binary) + self.log.debug(f"{self.name} got message: {message}") + + cmd = message.get("c", None) + if not isinstance(cmd, int): + self.log.warning(f"No command sent in message: {message}") + continue + + kwargs = message.get("a", {}) + if not isinstance(kwargs, dict): + self.log.warning(f"{self.name}: received invalid message of type {type(kwargs)}, should be dict") + continue + + command_name = self.CMDS[cmd] + command_fn = getattr(self, command_name, None) + + if command_fn is None: + self.log.warning(f'{self.name} has no function named "{command_fn}"') + continue + + if inspect.isasyncgenfunction(command_fn): + coroutine = self.run_and_yield(client_id, command_fn, **kwargs) + else: + coroutine = self.run_and_return(client_id, command_fn, **kwargs) + + asyncio.create_task(coroutine) + except Exception as e: + self.log.error(f"Error in EngineServer worker: {e}") + self.log.trace(traceback.format_exc()) + finally: + with suppress(Exception): + self.socket.close() diff --git a/bbot/core/event/base.py b/bbot/core/event/base.py index 793015967..d7eabd6db 100644 --- a/bbot/core/event/base.py +++ b/bbot/core/event/base.py @@ -1,6 +1,5 @@ import re import json -import asyncio import logging import ipaddress import traceback @@ -9,14 +8,14 @@ from datetime import datetime from contextlib import suppress from urllib.parse import urljoin +from radixtarget import RadixTarget from pydantic import BaseModel, field_validator from .helpers import * -from bbot.core.errors import * +from bbot.errors import * from bbot.core.helpers import ( extract_words, get_file_extension, - host_in_host, is_domain, is_subdomain, is_ip, @@ -94,7 +93,7 @@ class BaseEvent: # Always emit this event type even if it's not in scope _always_emit = False # Always emit events with these tags even if they're not in scope - _always_emit_tags = ["affiliate"] + _always_emit_tags = ["affiliate", "target"] # Bypass scope checking and dns resolution, distribute immediately to modules # This is useful for "end-of-line" events like FINDING and VULNERABILITY _quick_emit = False @@ -152,8 +151,10 @@ def __init__( self._port = None self.__words = None self._priority = None + self._host_original = None self._module_priority = None self._resolved_hosts = set() + self.dns_children = dict() # keep track of whether this event has been recorded by the scan self._stats_recorded = False @@ -210,9 +211,6 @@ def __init__( if _internal: # or source._internal: self.internal = True - # an event indicating whether the event has undergone DNS resolution - self._resolved = asyncio.Event() - # inherit web spider distance from parent self.web_spider_distance = getattr(self.source, "web_spider_distance", 0) @@ -278,9 +276,24 @@ def host(self): E.g. for IP_ADDRESS, it could be an ipaddress.IPv4Address() or IPv6Address() object """ if self.__host is None: - self.__host = self._host() + self.host = self._host() return self.__host + @host.setter + def host(self, host): + if self._host_original is None: + self._host_original = host + self.__host = host + + @property + def host_original(self): + """ + Original host data, in case it was changed due to a wildcard DNS, etc. + """ + if self._host_original is None: + return self.host + return self._host_original + @property def port(self): self.host @@ -567,7 +580,9 @@ def __contains__(self, other): if self.host == other.host: return True # hostnames and IPs - return host_in_host(other.host, self.host) + radixtarget = RadixTarget() + radixtarget.insert(self.host) + return bool(radixtarget.search(other.host)) return False def json(self, mode="json", siem_friendly=False): @@ -606,7 +621,7 @@ def json(self, mode="json", siem_friendly=False): j["scan"] = self.scan.id j["timestamp"] = self.timestamp.timestamp() if self.host: - j["resolved_hosts"] = [str(h) for h in self.resolved_hosts] + j["resolved_hosts"] = sorted(str(h) for h in self.resolved_hosts) source_id = self.source_id if source_id: j["source"] = source_id @@ -796,7 +811,7 @@ def __init__(self, *args, **kwargs): ip = ipaddress.ip_address(self.data) self.add_tag(f"ipv{ip.version}") if ip.is_private: - self.add_tag("private") + self.add_tag("private-ip") self.dns_resolve_distance = getattr(self.source, "dns_resolve_distance", 0) def sanitize_data(self, data): @@ -883,7 +898,7 @@ class URL_UNVERIFIED(BaseEvent): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # increment the web spider distance - if self.type == "URL_UNVERIFIED" and getattr(self.module, "name", "") != "TARGET": + if self.type == "URL_UNVERIFIED": self.web_spider_distance += 1 self.num_redirects = getattr(self.source, "num_redirects", 0) @@ -953,7 +968,8 @@ def sanitize_data(self, data): @property def resolved_hosts(self): - return [".".join(i.split("-")[1:]) for i in self.tags if i.startswith("ip-")] + # TODO: remove this when we rip out httpx + return set(".".join(i.split("-")[1:]) for i in self.tags if i.startswith("ip-")) @property def pretty_string(self): @@ -968,6 +984,11 @@ class _data_validator(BaseModel): url: str _validate_url = field_validator("url")(validators.validate_url) + def sanitize_data(self, data): + data = super().sanitize_data(data) + data["name"] = data["name"].lower() + return data + def _words(self): return self.data["name"] @@ -1000,6 +1021,7 @@ def __init__(self, *args, **kwargs): def sanitize_data(self, data): url = data.get("url", "") self.parsed = validators.validate_url_parsed(url) + data["url"] = self.parsed.geturl() header_dict = {} for i in data.get("raw_header", "").splitlines(): diff --git a/bbot/core/event/helpers.py b/bbot/core/event/helpers.py index d3ad3ee78..b363c9f66 100644 --- a/bbot/core/event/helpers.py +++ b/bbot/core/event/helpers.py @@ -2,7 +2,7 @@ import ipaddress from contextlib import suppress -from bbot.core.errors import ValidationError +from bbot.errors import ValidationError from bbot.core.helpers.regexes import event_type_regexes from bbot.core.helpers import sha1, smart_decode, smart_encode_punycode diff --git a/bbot/core/flags.py b/bbot/core/flags.py index d8dbf8566..f65dbad28 100644 --- a/bbot/core/flags.py +++ b/bbot/core/flags.py @@ -4,6 +4,7 @@ "aggressive": "Generates a large amount of network traffic", "baddns": "Runs all modules from the DNS auditing tool BadDNS", "cloud-enum": "Enumerates cloud resources", + "code-enum": "Find public code repositories and search them for secrets etc.", "deadly": "Highly aggressive", "email-enum": "Enumerates email addresses", "iis-shortnames": "Scans for IIS Shortname vulnerability", diff --git a/bbot/core/helpers/cloud.py b/bbot/core/helpers/cloud.py deleted file mode 100644 index 811ca070c..000000000 --- a/bbot/core/helpers/cloud.py +++ /dev/null @@ -1,104 +0,0 @@ -import asyncio -import logging - -from cloudcheck import cloud_providers - -log = logging.getLogger("bbot.helpers.cloud") - - -class CloudHelper: - def __init__(self, parent_helper): - self.parent_helper = parent_helper - self.providers = cloud_providers - self.dummy_modules = {} - for provider_name in self.providers.providers: - self.dummy_modules[provider_name] = self.parent_helper._make_dummy_module( - f"{provider_name}_cloud", _type="scan" - ) - self._updated = False - self._update_lock = asyncio.Lock() - - def excavate(self, event, s): - """ - Extract buckets, etc. from strings such as an HTTP responses - """ - for provider in self: - provider_name = provider.name.lower() - base_kwargs = {"source": event, "tags": [f"cloud-{provider_name}"], "_provider": provider_name} - for event_type, sigs in provider.signatures.items(): - found = set() - for sig in sigs: - for match in sig.findall(s): - kwargs = dict(base_kwargs) - kwargs["event_type"] = event_type - if not match in found: - found.add(match) - if event_type == "STORAGE_BUCKET": - self.emit_bucket(match, **kwargs) - else: - self.emit_event(**kwargs) - - def speculate(self, event): - """ - Look for DNS_NAMEs that are buckets or other cloud resources - """ - for provider in self: - provider_name = provider.name.lower() - base_kwargs = dict( - source=event, tags=[f"{provider.provider_type}-{provider_name}"], _provider=provider_name - ) - if event.type.startswith("DNS_NAME"): - for event_type, sigs in provider.signatures.items(): - found = set() - for sig in sigs: - match = sig.match(event.data) - if match: - kwargs = dict(base_kwargs) - kwargs["event_type"] = event_type - if not event.data in found: - found.add(event.data) - if event_type == "STORAGE_BUCKET": - self.emit_bucket(match.groups(), **kwargs) - else: - self.emit_event(**kwargs) - - def emit_bucket(self, match, **kwargs): - bucket_name, bucket_domain = match - kwargs["data"] = {"name": bucket_name, "url": f"https://{bucket_name}.{bucket_domain}"} - self.emit_event(**kwargs) - - def emit_event(self, *args, **kwargs): - provider_name = kwargs.pop("_provider") - dummy_module = self.dummy_modules[provider_name] - event = dummy_module.make_event(*args, **kwargs) - if event: - self.parent_helper.scan.manager.queue_event(event) - - async def tag_event(self, event): - """ - Tags an event according to cloud provider - """ - async with self._update_lock: - if not self._updated: - await self.providers.update() - self._updated = True - - if event.host: - for host in [event.host] + list(event.resolved_hosts): - provider_name, provider_type, source = self.providers.check(host) - if provider_name is not None: - provider = self.providers.providers[provider_name.lower()] - event.add_tag(f"{provider_type}-{provider_name.lower()}") - # if its host directly matches this cloud provider's domains - if not self.parent_helper.is_ip(host): - # tag as buckets, etc. - for event_type, sigs in provider.signatures.items(): - for sig in sigs: - if sig.match(host): - event.add_tag(f"{provider_type}-{event_type}") - - def __getitem__(self, item): - return self.providers.providers[item.lower()] - - def __iter__(self): - yield from self.providers diff --git a/bbot/core/helpers/command.py b/bbot/core/helpers/command.py index 14a788f8a..06fc8a91f 100644 --- a/bbot/core/helpers/command.py +++ b/bbot/core/helpers/command.py @@ -185,7 +185,8 @@ async def _write_proc_line(proc, chunk): proc.stdin.write(smart_encode(chunk) + b"\n") await proc.stdin.drain() except Exception as e: - command = " ".join([str(s) for s in proc.args]) + proc_args = [str(s) for s in getattr(proc, "args", [])] + command = " ".join(proc_args) log.warning(f"Error writing line to stdin for command: {command}: {e}") log.trace(traceback.format_exc()) diff --git a/bbot/core/helpers/depsinstaller/installer.py b/bbot/core/helpers/depsinstaller/installer.py index 049baef86..c386b6c3b 100644 --- a/bbot/core/helpers/depsinstaller/installer.py +++ b/bbot/core/helpers/depsinstaller/installer.py @@ -13,8 +13,6 @@ from ansible_runner.interface import run from subprocess import CalledProcessError -from bbot.core import configurator -from bbot.modules import module_loader from ..misc import can_sudo_without_password, os_platform log = logging.getLogger("bbot.core.helpers.depsinstaller") @@ -23,6 +21,8 @@ class DepsInstaller: def __init__(self, parent_helper): self.parent_helper = parent_helper + self.preset = self.parent_helper.preset + self.core = self.preset.core # respect BBOT's http timeout http_timeout = self.parent_helper.config.get("http_timeout", 30) @@ -32,8 +32,8 @@ def __init__(self, parent_helper): self._installed_sudo_askpass = False self._sudo_password = os.environ.get("BBOT_SUDO_PASS", None) if self._sudo_password is None: - if configurator.bbot_sudo_pass is not None: - self._sudo_password = configurator.bbot_sudo_pass + if self.core.bbot_sudo_pass is not None: + self._sudo_password = self.core.bbot_sudo_pass elif can_sudo_without_password(): self._sudo_password = "" self.data_dir = self.parent_helper.cache_dir / "depsinstaller" @@ -43,17 +43,12 @@ def __init__(self, parent_helper): self.parent_helper.mkdir(self.command_status) self.setup_status = self.read_setup_status() - self.no_deps = self.parent_helper.config.get("no_deps", False) + self.deps_behavior = self.parent_helper.config.get("deps_behavior", "abort_on_failure").lower() self.ansible_debug = True - self.force_deps = self.parent_helper.config.get("force_deps", False) - self.retry_deps = self.parent_helper.config.get("retry_deps", False) - self.ignore_failed_deps = self.parent_helper.config.get("ignore_failed_deps", False) self.venv = "" if sys.prefix != sys.base_prefix: self.venv = sys.prefix - self.all_modules_preloaded = module_loader.preloaded() - self.ensure_root_lock = Lock() async def install(self, *modules): @@ -64,7 +59,7 @@ async def install(self, *modules): notified = False for m in modules: # assume success if we're ignoring dependencies - if self.no_deps: + if self.deps_behavior == "disable": succeeded.append(m) continue # abort if module name is unknown @@ -73,6 +68,7 @@ async def install(self, *modules): failed.append(m) continue preloaded = self.all_modules_preloaded[m] + log.debug(f"Installing {m} - Preloaded Deps {preloaded['deps']}") # make a hash of the dependencies and check if it's already been handled # take into consideration whether the venv or bbot home directory changes module_hash = self.parent_helper.sha1( @@ -84,11 +80,15 @@ async def install(self, *modules): success = self.setup_status.get(module_hash, None) dependencies = list(chain(*preloaded["deps"].values())) if len(dependencies) <= 0: - log.debug(f'No setup to do for module "{m}"') + log.debug(f'No dependency work to do for module "{m}"') succeeded.append(m) continue else: - if success is None or (success is False and self.retry_deps) or self.force_deps: + if ( + success is None + or (success is False and self.deps_behavior == "retry_failed") + or self.deps_behavior == "force_install" + ): if not notified: log.hugeinfo(f"Installing module dependencies. Please be patient, this may take a while.") notified = True @@ -98,14 +98,14 @@ async def install(self, *modules): self.ensure_root(f'Module "{m}" needs root privileges to install its dependencies.') success = await self.install_module(m) self.setup_status[module_hash] = success - if success or self.ignore_failed_deps: + if success or self.deps_behavior == "ignore_failed": log.debug(f'Setup succeeded for module "{m}"') succeeded.append(m) else: log.warning(f'Setup failed for module "{m}"') failed.append(m) else: - if success or self.ignore_failed_deps: + if success or self.deps_behavior == "ignore_failed": log.debug( f'Skipping dependency install for module "{m}" because it\'s already done (--force-deps to re-run)' ) @@ -148,6 +148,20 @@ async def install_module(self, module): if deps_pip: success &= await self.pip_install(deps_pip, constraints=deps_pip_constraints) + # shared/common + deps_common = preloaded["deps"]["common"] + if deps_common: + for dep_common in deps_common: + if self.setup_status.get(dep_common, False) == True: + log.critical( + f'Skipping installation of dependency "{dep_common}" for module "{module}" since it is already installed' + ) + continue + ansible_tasks = self.preset.module_loader._shared_deps[dep_common] + result = self.tasks(module, ansible_tasks) + self.setup_status[dep_common] = result + success &= result + return success async def pip_install(self, packages, constraints=None): @@ -310,7 +324,7 @@ def ensure_root(self, message=""): if self.parent_helper.verify_sudo_password(password): log.success("Authentication successful") self._sudo_password = password - configurator.bbot_sudo_pass = password + self.core.bbot_sudo_pass = password else: log.warning("Incorrect password") @@ -336,3 +350,7 @@ def _install_sudo_askpass(self): askpass_dst = self.parent_helper.tools_dir / self.askpass_filename shutil.copy(askpass_src, askpass_dst) askpass_dst.chmod(askpass_dst.stat().st_mode | stat.S_IEXEC) + + @property + def all_modules_preloaded(self): + return self.preset.module_loader.preloaded() diff --git a/bbot/core/helpers/diff.py b/bbot/core/helpers/diff.py index 25b265bde..c21f43718 100644 --- a/bbot/core/helpers/diff.py +++ b/bbot/core/helpers/diff.py @@ -3,7 +3,7 @@ from deepdiff import DeepDiff from contextlib import suppress from xml.parsers.expat import ExpatError -from bbot.core.errors import HttpCompareError +from bbot.errors import HttpCompareError log = logging.getLogger("bbot.core.helpers.diff") diff --git a/bbot/core/helpers/dns/__init__.py b/bbot/core/helpers/dns/__init__.py new file mode 100644 index 000000000..75426cd26 --- /dev/null +++ b/bbot/core/helpers/dns/__init__.py @@ -0,0 +1 @@ +from .dns import DNSHelper diff --git a/bbot/core/helpers/dns/dns.py b/bbot/core/helpers/dns/dns.py new file mode 100644 index 000000000..7f775483c --- /dev/null +++ b/bbot/core/helpers/dns/dns.py @@ -0,0 +1,164 @@ +import dns +import logging +import dns.exception +import dns.asyncresolver +from radixtarget import RadixTarget + +from bbot.core.engine import EngineClient +from ..misc import clean_dns_record, is_ip, is_domain, is_dns_name + +from .engine import DNSEngine + +log = logging.getLogger("bbot.core.helpers.dns") + + +class DNSHelper(EngineClient): + + SERVER_CLASS = DNSEngine + + """Helper class for DNS-related operations within BBOT. + + This class provides mechanisms for host resolution, wildcard domain detection, event tagging, and more. + It centralizes all DNS-related activities in BBOT, offering both synchronous and asynchronous methods + for DNS resolution, as well as various utilities for batch resolution and DNS query filtering. + + Attributes: + parent_helper: A reference to the instantiated `ConfigAwareHelper` (typically `scan.helpers`). + resolver (BBOTAsyncResolver): An asynchronous DNS resolver tailored for BBOT with rate-limiting capabilities. + timeout (int): The timeout value for DNS queries. Defaults to 5 seconds. + retries (int): The number of retries for failed DNS queries. Defaults to 1. + abort_threshold (int): The threshold for aborting after consecutive failed queries. Defaults to 50. + max_dns_resolve_distance (int): Maximum allowed distance for DNS resolution. Defaults to 4. + all_rdtypes (list): A list of DNS record types to be considered during operations. + wildcard_ignore (tuple): Domains to be ignored during wildcard detection. + wildcard_tests (int): Number of tests to be run for wildcard detection. Defaults to 5. + _wildcard_cache (dict): Cache for wildcard detection results. + _dns_cache (LRUCache): Cache for DNS resolution results, limited in size. + resolver_file (Path): File containing system's current resolver nameservers. + filter_bad_ptrs (bool): Whether to filter out DNS names that appear to be auto-generated PTR records. Defaults to True. + + Args: + parent_helper: The parent helper object with configuration details and utilities. + + Raises: + DNSError: If an issue arises when creating the BBOTAsyncResolver instance. + + Examples: + >>> dns_helper = DNSHelper(parent_config) + >>> resolved_host = dns_helper.resolver.resolve("example.com") + """ + + def __init__(self, parent_helper): + self.parent_helper = parent_helper + self.config = self.parent_helper.config + super().__init__(server_kwargs={"config": self.config}) + + # resolver + self.timeout = self.config.get("dns_timeout", 5) + self.resolver = dns.asyncresolver.Resolver() + self.resolver.rotate = True + self.resolver.timeout = self.timeout + self.resolver.lifetime = self.timeout + + self.max_dns_resolve_distance = self.config.get("max_dns_resolve_distance", 5) + + # wildcard handling + self.wildcard_disable = self.config.get("dns_wildcard_disable", False) + self.wildcard_ignore = RadixTarget() + for d in self.config.get("dns_wildcard_ignore", []): + self.wildcard_ignore.insert(d) + + # copy the system's current resolvers to a text file for tool use + self.system_resolvers = dns.resolver.Resolver().nameservers + # TODO: DNS server speed test (start in background task) + self.resolver_file = self.parent_helper.tempfile(self.system_resolvers, pipe=False) + + async def resolve(self, query, **kwargs): + return await self.run_and_return("resolve", query=query, **kwargs) + + async def resolve_batch(self, queries, **kwargs): + async for _ in self.run_and_yield("resolve_batch", queries=queries, **kwargs): + yield _ + + async def resolve_raw_batch(self, queries): + async for _ in self.run_and_yield("resolve_raw_batch", queries=queries): + yield _ + + async def is_wildcard(self, query, ips=None, rdtype=None): + """ + Use this method to check whether a *host* is a wildcard entry + + This can reliably tell the difference between a valid DNS record and a wildcard within a wildcard domain. + + If you want to know whether a domain is using wildcard DNS, use `is_wildcard_domain()` instead. + + Args: + query (str): The hostname to check for a wildcard entry. + ips (list, optional): List of IPs to compare against, typically obtained from a previous DNS resolution of the query. + rdtype (str, optional): The DNS record type (e.g., "A", "AAAA") to consider during the check. + + Returns: + dict: A dictionary indicating if the query is a wildcard for each checked DNS record type. + Keys are DNS record types like "A", "AAAA", etc. + Values are tuples where the first element is a boolean indicating if the query is a wildcard, + and the second element is the wildcard parent if it's a wildcard. + + Raises: + ValueError: If only one of `ips` or `rdtype` is specified or if no valid IPs are specified. + + Examples: + >>> is_wildcard("www.github.io") + {"A": (True, "github.io"), "AAAA": (True, "github.io")} + + >>> is_wildcard("www.evilcorp.com", ips=["93.184.216.34"], rdtype="A") + {"A": (False, "evilcorp.com")} + + Note: + `is_wildcard` can be True, False, or None (indicating that wildcard detection was inconclusive) + """ + if [ips, rdtype].count(None) == 1: + raise ValueError("Both ips and rdtype must be specified") + + query = self._wildcard_prevalidation(query) + if not query: + return {} + + # skip check if the query is a domain + if is_domain(query): + return {} + + return await self.run_and_return("is_wildcard", query=query, ips=ips, rdtype=rdtype) + + async def is_wildcard_domain(self, domain, log_info=False): + domain = self._wildcard_prevalidation(domain) + if not domain: + return {} + + return await self.run_and_return("is_wildcard_domain", domain=domain, log_info=False) + + def _wildcard_prevalidation(self, host): + if self.wildcard_disable: + return False + + host = clean_dns_record(host) + # skip check if it's an IP or a plain hostname + if is_ip(host) or not "." in host: + return False + + # skip if query isn't a dns name + if not is_dns_name(host): + return False + + # skip check if the query's parent domain is excluded in the config + wildcard_ignore = self.wildcard_ignore.search(host) + if wildcard_ignore: + log.debug(f"Skipping wildcard detection on {host} because {wildcard_ignore} is excluded in the config") + return False + + return host + + async def _mock_dns(self, mock_data): + from .mock import MockResolver + + self.resolver = MockResolver(mock_data) + await self.run_and_return("_mock_dns", mock_data=mock_data) diff --git a/bbot/core/helpers/dns.py b/bbot/core/helpers/dns/engine.py similarity index 50% rename from bbot/core/helpers/dns.py rename to bbot/core/helpers/dns/engine.py index 63177756f..6018e0e3f 100644 --- a/bbot/core/helpers/dns.py +++ b/bbot/core/helpers/dns/engine.py @@ -1,103 +1,60 @@ +import os import dns import time import asyncio import logging -import ipaddress import traceback -import contextlib -import dns.exception -import dns.asyncresolver from cachetools import LRUCache from contextlib import suppress -from .regexes import dns_name_regex -from bbot.core.helpers.ratelimiter import RateLimiter +from ..regexes import dns_name_regex +from bbot.errors import DNSWildcardBreak +from bbot.core.engine import EngineServer from bbot.core.helpers.async_helpers import NamedLock -from bbot.core.errors import ValidationError, DNSError, DNSWildcardBreak -from .misc import is_ip, is_domain, is_dns_name, domain_parents, parent_domain, rand_string, cloudcheck - -log = logging.getLogger("bbot.core.helpers.dns") - - -class BBOTAsyncResolver(dns.asyncresolver.Resolver): - """Custom asynchronous resolver for BBOT with rate limiting. - - This class extends dnspython's async resolver and provides additional support for rate-limiting DNS queries. - The maximum number of queries allowed per second can be customized via BBOT's config. - - Attributes: - _parent_helper: A reference to the instantiated `ConfigAwareHelper` (typically `scan.helpers`). - _dns_rate_limiter (RateLimiter): An instance of the RateLimiter class for DNS query rate-limiting. - - Args: - *args: Positional arguments passed to the base resolver. - **kwargs: Keyword arguments. '_parent_helper' is expected among these to provide configuration data for - rate-limiting. All other keyword arguments are passed to the base resolver. - """ - - def __init__(self, *args, **kwargs): - self._parent_helper = kwargs.pop("_parent_helper") - dns_queries_per_second = self._parent_helper.config.get("dns_queries_per_second", 100) - self._dns_rate_limiter = RateLimiter(dns_queries_per_second, "DNS") - super().__init__(*args, **kwargs) - self.rotate = True - - async def resolve(self, *args, **kwargs): - async with self._dns_rate_limiter: - return await super().resolve(*args, **kwargs) - - -class DNSHelper: - """Helper class for DNS-related operations within BBOT. - - This class provides mechanisms for host resolution, wildcard domain detection, event tagging, and more. - It centralizes all DNS-related activities in BBOT, offering both synchronous and asynchronous methods - for DNS resolution, as well as various utilities for batch resolution and DNS query filtering. - - Attributes: - parent_helper: A reference to the instantiated `ConfigAwareHelper` (typically `scan.helpers`). - resolver (BBOTAsyncResolver): An asynchronous DNS resolver tailored for BBOT with rate-limiting capabilities. - timeout (int): The timeout value for DNS queries. Defaults to 5 seconds. - retries (int): The number of retries for failed DNS queries. Defaults to 1. - abort_threshold (int): The threshold for aborting after consecutive failed queries. Defaults to 50. - max_dns_resolve_distance (int): Maximum allowed distance for DNS resolution. Defaults to 4. - all_rdtypes (list): A list of DNS record types to be considered during operations. - wildcard_ignore (tuple): Domains to be ignored during wildcard detection. - wildcard_tests (int): Number of tests to be run for wildcard detection. Defaults to 5. - _wildcard_cache (dict): Cache for wildcard detection results. - _dns_cache (LRUCache): Cache for DNS resolution results, limited in size. - _event_cache (LRUCache): Cache for event resolution results, tags. Limited in size. - resolver_file (Path): File containing system's current resolver nameservers. - filter_bad_ptrs (bool): Whether to filter out DNS names that appear to be auto-generated PTR records. Defaults to True. - - Args: - parent_helper: The parent helper object with configuration details and utilities. - - Raises: - DNSError: If an issue arises when creating the BBOTAsyncResolver instance. - - Examples: - >>> dns_helper = DNSHelper(parent_config) - >>> resolved_host = dns_helper.resolver.resolve("example.com") - """ - - all_rdtypes = ["A", "AAAA", "SRV", "MX", "NS", "SOA", "CNAME", "TXT"] - - def __init__(self, parent_helper): - self.parent_helper = parent_helper - try: - self.resolver = BBOTAsyncResolver(_parent_helper=self.parent_helper) - except Exception as e: - raise DNSError(f"Failed to create BBOT DNS resolver: {e}") - self.timeout = self.parent_helper.config.get("dns_timeout", 5) - self.retries = self.parent_helper.config.get("dns_retries", 1) - self.abort_threshold = self.parent_helper.config.get("dns_abort_threshold", 50) - self.max_dns_resolve_distance = self.parent_helper.config.get("max_dns_resolve_distance", 5) +from bbot.core.helpers.misc import ( + is_ip, + rand_string, + smart_decode, + parent_domain, + domain_parents, + clean_dns_record, +) + + +log = logging.getLogger("bbot.core.helpers.dns.engine.server") + +all_rdtypes = ["A", "AAAA", "SRV", "MX", "NS", "SOA", "CNAME", "TXT"] + + +class DNSEngine(EngineServer): + + CMDS = { + 0: "resolve", + 1: "resolve_batch", + 2: "resolve_raw_batch", + 3: "is_wildcard", + 4: "is_wildcard_domain", + 99: "_mock_dns", + } + + def __init__(self, socket_path, config={}): + super().__init__(socket_path) + + self.config = config + # config values + self.timeout = self.config.get("dns_timeout", 5) + self.retries = self.config.get("dns_retries", 1) + self.abort_threshold = self.config.get("dns_abort_threshold", 50) + self.max_dns_resolve_distance = self.config.get("max_dns_resolve_distance", 5) + + # resolver + self.resolver = dns.asyncresolver.Resolver() + self.resolver.rotate = True self.resolver.timeout = self.timeout self.resolver.lifetime = self.timeout # skip certain queries - dns_omit_queries = self.parent_helper.config.get("dns_omit_queries", None) + dns_omit_queries = self.config.get("dns_omit_queries", None) if not dns_omit_queries: dns_omit_queries = [] self.dns_omit_queries = dict() @@ -112,35 +69,27 @@ def __init__(self, parent_helper): except KeyError: self.dns_omit_queries[rdtype] = {query} - self.wildcard_ignore = self.parent_helper.config.get("dns_wildcard_ignore", None) + # wildcard handling + self.wildcard_ignore = self.config.get("dns_wildcard_ignore", None) if not self.wildcard_ignore: self.wildcard_ignore = [] self.wildcard_ignore = tuple([str(d).strip().lower() for d in self.wildcard_ignore]) - self.wildcard_tests = self.parent_helper.config.get("dns_wildcard_tests", 5) + self.wildcard_tests = self.config.get("dns_wildcard_tests", 5) self._wildcard_cache = dict() # since wildcard detection takes some time, This is to prevent multiple # modules from kicking off wildcard detection for the same domain at the same time self._wildcard_lock = NamedLock() + self._dns_connectivity_lock = asyncio.Lock() self._last_dns_success = None self._last_connectivity_warning = time.time() # keeps track of warnings issued for wildcard detection to prevent duplicate warnings self._dns_warnings = set() self._errors = dict() - self.fallback_nameservers_file = self.parent_helper.wordlist_dir / "nameservers.txt" - self._debug = self.parent_helper.config.get("dns_debug", False) - self._dummy_modules = dict() + self._debug = self.config.get("dns_debug", False) self._dns_cache = LRUCache(maxsize=10000) - self._event_cache = LRUCache(maxsize=10000) - self._event_cache_locks = NamedLock() - # copy the system's current resolvers to a text file for tool use - self.system_resolvers = dns.resolver.Resolver().nameservers - if len(self.system_resolvers) == 1: - log.warning("BBOT performs better with multiple DNS servers. Your system currently only has one.") - self.resolver_file = self.parent_helper.tempfile(self.system_resolvers, pipe=False) - - self.filter_bad_ptrs = self.parent_helper.config.get("dns_filter_ptrs", True) + self.filter_bad_ptrs = self.config.get("dns_filter_ptrs", True) async def resolve(self, query, **kwargs): """Resolve DNS names and IP addresses to their corresponding results. @@ -164,14 +113,12 @@ async def resolve(self, query, **kwargs): {"1.2.3.4", "dead::beef"} """ results = set() + errors = [] try: - r = await self.resolve_raw(query, **kwargs) - if r: - raw_results, errors = r - for rdtype, answers in raw_results: - for answer in answers: - for _, t in self.extract_targets(answer): - results.add(t) + answers, errors = await self.resolve_raw(query, **kwargs) + for answer in answers: + for _, host in self.extract_targets(answer): + results.add(host) except BaseException: log.trace(f"Caught exception in resolve({query}, {kwargs}):") log.trace(traceback.format_exc()) @@ -205,39 +152,24 @@ async def resolve_raw(self, query, **kwargs): ([('PTR', )], []) >>> await resolve_raw("dns.google") - ([('A', ), ('AAAA', )], []) + (, []) """ # DNS over TCP is more reliable # But setting this breaks DNS resolution on Ubuntu because systemd-resolve doesn't support TCP # kwargs["tcp"] = True - results = [] - errors = [] try: query = str(query).strip() + kwargs.pop("rdtype", None) + rdtype = kwargs.pop("type", "A") if is_ip(query): - kwargs.pop("type", None) - kwargs.pop("rdtype", None) - results, errors = await self._resolve_ip(query, **kwargs) - return [("PTR", results)], [("PTR", e) for e in errors] + return await self._resolve_ip(query, **kwargs) else: - types = ["A", "AAAA"] - kwargs.pop("rdtype", None) - if "type" in kwargs: - t = kwargs.pop("type") - types = self._parse_rdtype(t, default=types) - for t in types: - r, e = await self._resolve_hostname(query, rdtype=t, **kwargs) - if r: - results.append((t, r)) - for error in e: - errors.append((t, error)) + return await self._resolve_hostname(query, rdtype=rdtype, **kwargs) except BaseException: log.trace(f"Caught exception in resolve_raw({query}, {kwargs}):") log.trace(traceback.format_exc()) raise - return (results, errors) - async def _resolve_hostname(self, query, **kwargs): """Translate a hostname into its corresponding IP addresses. @@ -273,7 +205,7 @@ async def _resolve_hostname(self, query, **kwargs): self.debug(f"Skipping {rdtype}:{query} because it's omitted in the config") return results, errors - parent = self.parent_helper.parent_domain(query) + parent = parent_domain(query) retries = kwargs.pop("retries", self.retries) use_cache = kwargs.pop("use_cache", True) tries_left = int(retries) + 1 @@ -397,259 +329,82 @@ async def _resolve_ip(self, query, **kwargs): return results, errors - async def handle_wildcard_event(self, event, children): + async def resolve_batch(self, queries, threads=10, **kwargs): """ - Used within BBOT's scan manager to detect and tag DNS wildcard events. - - Wildcards are detected for every major record type. If a wildcard is detected, its data - is overwritten, for example: `_wildcard.evilcorp.com`. + A helper to execute a bunch of DNS requests. Args: - event (object): The event to check for wildcards. - children (list): A list of the event's resulting DNS children after resolution. + queries (list): List of queries to resolve. + **kwargs: Additional keyword arguments to pass to `resolve()`. - Returns: - None: This method modifies the `event` in place and does not return a value. + Yields: + tuple: A tuple containing the original query and its resolved value. Examples: - >>> handle_wildcard_event(event, children) - # The `event` might now have tags like ["wildcard", "a-wildcard", "aaaa-wildcard"] and - # its `data` attribute might be modified to "_wildcard.evilcorp.com" if it was detected - # as a wildcard. - """ - log.debug(f"Entering handle_wildcard_event({event}, children={children})") - try: - event_host = str(event.host) - # wildcard checks - if not is_ip(event.host): - # check if the dns name itself is a wildcard entry - wildcard_rdtypes = await self.is_wildcard(event_host) - for rdtype, (is_wildcard, wildcard_host) in wildcard_rdtypes.items(): - wildcard_tag = "error" - if is_wildcard == True: - event.add_tag("wildcard") - wildcard_tag = "wildcard" - event.add_tag(f"{rdtype.lower()}-{wildcard_tag}") - - # wildcard event modification (www.evilcorp.com --> _wildcard.evilcorp.com) - if not is_ip(event.host) and children: - if wildcard_rdtypes: - # these are the rdtypes that successfully resolve - resolved_rdtypes = set([c.upper() for c in children]) - # these are the rdtypes that have wildcards - wildcard_rdtypes_set = set(wildcard_rdtypes) - # consider the event a full wildcard if all its records are wildcards - event_is_wildcard = False - if resolved_rdtypes: - event_is_wildcard = all(r in wildcard_rdtypes_set for r in resolved_rdtypes) - - if event_is_wildcard: - if event.type in ("DNS_NAME",) and not "_wildcard" in event.data.split("."): - wildcard_parent = self.parent_helper.parent_domain(event_host) - for rdtype, (_is_wildcard, _parent_domain) in wildcard_rdtypes.items(): - if _is_wildcard: - wildcard_parent = _parent_domain - break - wildcard_data = f"_wildcard.{wildcard_parent}" - if wildcard_data != event.data: - log.debug( - f'Wildcard detected, changing event.data "{event.data}" --> "{wildcard_data}"' - ) - event.data = wildcard_data - # tag wildcard domains for convenience - elif is_domain(event_host) or hash(event_host) in self._wildcard_cache: - event_target = "target" in event.tags - wildcard_domain_results = await self.is_wildcard_domain(event_host, log_info=event_target) - for hostname, wildcard_domain_rdtypes in wildcard_domain_results.items(): - if wildcard_domain_rdtypes: - event.add_tag("wildcard-domain") - for rdtype, ips in wildcard_domain_rdtypes.items(): - event.add_tag(f"{rdtype.lower()}-wildcard-domain") - finally: - log.debug(f"Finished handle_wildcard_event({event}, children={children})") - - async def resolve_event(self, event, minimal=False): + >>> import asyncio + >>> async def example_usage(): + ... async for result in resolve_batch(['www.evilcorp.com', 'evilcorp.com']): + ... print(result) + ('www.evilcorp.com', {'1.1.1.1'}) + ('evilcorp.com', {'2.2.2.2'}) """ - Tag the given event with the appropriate DNS record types and optionally create child - events based on DNS resolutions. - - Args: - event (object): The event to be resolved and tagged. - minimal (bool, optional): If set to True, the function will perform minimal DNS - resolution. Defaults to False. + tasks = {} - Returns: - tuple: A 4-tuple containing the following items: - - event_tags (set): Set of tags for the event. - - event_whitelisted (bool): Whether the event is whitelisted. - - event_blacklisted (bool): Whether the event is blacklisted. - - dns_children (dict): Dictionary containing child events from DNS resolutions. - - Examples: - >>> event = make_event("evilcorp.com") - >>> resolve_event(event) - ({'resolved', 'ns-record', 'a-record',}, False, False, {'A': {IPv4Address('1.2.3.4'), IPv4Address('1.2.3.5')}, 'NS': {'ns1.evilcorp.com'}}) - - Note: - This method does not modify the passed in `event`. Instead, it returns data - that can be used to modify or act upon the `event`. - """ - log.debug(f"Resolving {event}") - event_host = str(event.host) - event_tags = set() - dns_children = dict() - event_whitelisted = False - event_blacklisted = False + def new_task(query): + task = asyncio.create_task(self.resolve(query, **kwargs)) + tasks[task] = query - try: - if (not event.host) or (event.type in ("IP_RANGE",)): - return event_tags, event_whitelisted, event_blacklisted, dns_children - - # lock to ensure resolution of the same host doesn't start while we're working here - async with self._event_cache_locks.lock(event_host): - # try to get data from cache - _event_tags, _event_whitelisted, _event_blacklisted, _dns_children = self.event_cache_get(event_host) - event_tags.update(_event_tags) - # if we found it, return it - if _event_whitelisted is not None: - return event_tags, _event_whitelisted, _event_blacklisted, _dns_children - - # then resolve - types = () - if self.parent_helper.is_ip(event.host): - if not minimal: - types = ("PTR",) - else: - if event.type == "DNS_NAME" and not minimal: - types = self.all_rdtypes - else: - types = ("A", "AAAA") - - if types: - for t in types: - resolved_raw, errors = await self.resolve_raw(event_host, type=t, use_cache=True) - for rdtype, e in errors: - if rdtype not in resolved_raw: - event_tags.add(f"{rdtype.lower()}-error") - for rdtype, records in resolved_raw: - rdtype = str(rdtype).upper() - if records: - event_tags.add("resolved") - event_tags.add(f"{rdtype.lower()}-record") - - # whitelisting and blacklisting of IPs - for r in records: - for _, t in self.extract_targets(r): - if t: - ip = self.parent_helper.make_ip_type(t) - - if rdtype in ("A", "AAAA", "CNAME"): - with contextlib.suppress(ValidationError): - if self.parent_helper.is_ip(ip): - if self.parent_helper.scan.whitelisted(ip): - event_whitelisted = True - with contextlib.suppress(ValidationError): - if self.parent_helper.scan.blacklisted(ip): - event_blacklisted = True - - if self.filter_bad_ptrs and rdtype in ("PTR") and self.parent_helper.is_ptr(t): - self.debug(f"Filtering out bad PTR: {t}") - continue - - try: - dns_children[rdtype].add(ip) - except KeyError: - dns_children[rdtype] = {ip} - - # tag with cloud providers - if not self.parent_helper.in_tests: - to_check = set() - if event.type == "IP_ADDRESS": - to_check.add(event.data) - for rdtype, ips in dns_children.items(): - if rdtype in ("A", "AAAA"): - for ip in ips: - to_check.add(ip) - for ip in to_check: - provider, provider_type, subnet = cloudcheck(ip) - if provider: - event_tags.add(f"{provider_type}-{provider}") - - # if needed, mark as unresolved - if not is_ip(event_host) and "resolved" not in event_tags: - event_tags.add("unresolved") - # check for private IPs - for rdtype, ips in dns_children.items(): - for ip in ips: - try: - ip = ipaddress.ip_address(ip) - if ip.is_private: - event_tags.add("private-ip") - except ValueError: - continue - - self._event_cache[event_host] = (event_tags, event_whitelisted, event_blacklisted, dns_children) - - return event_tags, event_whitelisted, event_blacklisted, dns_children - - finally: - log.debug(f"Finished resolving {event}") - - def event_cache_get(self, host): - """ - Retrieves cached event data based on the given host. + queries = list(queries) + for _ in range(threads): # Start initial batch of tasks + if queries: # Ensure there are args to process + new_task(queries.pop(0)) - Args: - host (str): The host for which the event data is to be retrieved. + while tasks: # While there are tasks pending + # Wait for the first task to complete + done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) - Returns: - tuple: A 4-tuple containing the following items: - - event_tags (set): Set of tags for the event. - - event_whitelisted (bool or None): Whether the event is whitelisted. Returns None if not found. - - event_blacklisted (bool or None): Whether the event is blacklisted. Returns None if not found. - - dns_children (set): Set containing child events from DNS resolutions. + for task in done: + results = task.result() + query = tasks.pop(task) - Examples: - Assuming an event with host "www.evilcorp.com" has been cached: + if results: + yield (query, results) - >>> event_cache_get("www.evilcorp.com") - ({"resolved", "a-record"}, False, False, {'1.2.3.4'}) + if queries: # Start a new task for each one completed, if URLs remain + new_task(queries.pop(0)) - Assuming no event with host "www.notincache.com" has been cached: + async def resolve_raw_batch(self, queries, threads=10): + tasks = {} - >>> event_cache_get("www.notincache.com") - (set(), None, None, set()) - """ - try: - event_tags, event_whitelisted, event_blacklisted, dns_children = self._event_cache[host] - return (event_tags, event_whitelisted, event_blacklisted, dns_children) - except KeyError: - return set(), None, None, set() + def new_task(query, rdtype): + task = asyncio.create_task(self.resolve_raw(query, type=rdtype)) + tasks[task] = (query, rdtype) - async def resolve_batch(self, queries, **kwargs): - """ - A helper to execute a bunch of DNS requests. + queries = list(queries) + for _ in range(threads): # Start initial batch of tasks + if queries: # Ensure there are args to process + new_task(*queries.pop(0)) - Args: - queries (list): List of queries to resolve. - **kwargs: Additional keyword arguments to pass to `resolve()`. + while tasks: # While there are tasks pending + # Wait for the first task to complete + done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) - Yields: - tuple: A tuple containing the original query and its resolved value. + for task in done: + answers, errors = task.result() + query, rdtype = tasks.pop(task) - Examples: - >>> import asyncio - >>> async def example_usage(): - ... async for result in resolve_batch(['www.evilcorp.com', 'evilcorp.com']): - ... print(result) - ('www.evilcorp.com', {'1.1.1.1'}) - ('evilcorp.com', {'2.2.2.2'}) + results = set() + for answer in answers: + for rdtype, host in self.extract_targets(answer): + results.add((host, rdtype)) + if results or errors: + yield ((query, rdtype), (results, errors)) - """ - for q in queries: - yield (q, await self.resolve(q, **kwargs)) + if queries: # Start a new task for each one completed, if URLs remain + new_task(*queries.pop(0)) - def extract_targets(self, record): + @staticmethod + def extract_targets(record): """ Extracts hostnames or IP addresses from a given DNS record. @@ -675,55 +430,34 @@ def extract_targets(self, record): """ results = set() + + def add_result(rdtype, _record): + cleaned = clean_dns_record(_record) + if cleaned: + results.add((rdtype, cleaned)) + rdtype = str(record.rdtype.name).upper() if rdtype in ("A", "AAAA", "NS", "CNAME", "PTR"): - results.add((rdtype, self._clean_dns_record(record))) + add_result(rdtype, record) elif rdtype == "SOA": - results.add((rdtype, self._clean_dns_record(record.mname))) + add_result(rdtype, record.mname) elif rdtype == "MX": - results.add((rdtype, self._clean_dns_record(record.exchange))) + add_result(rdtype, record.exchange) elif rdtype == "SRV": - results.add((rdtype, self._clean_dns_record(record.target))) + add_result(rdtype, record.target) elif rdtype == "TXT": for s in record.strings: - s = self.parent_helper.smart_decode(s) + s = smart_decode(s) for match in dns_name_regex.finditer(s): start, end = match.span() host = s[start:end] - results.add((rdtype, host)) + add_result(rdtype, host) elif rdtype == "NSEC": - results.add((rdtype, self._clean_dns_record(record.next))) + add_result(rdtype, record.next) else: log.warning(f'Unknown DNS record type "{rdtype}"') return results - @staticmethod - def _clean_dns_record(record): - """ - Cleans and formats a given DNS record for further processing. - - This static method converts the DNS record to text format if it's not already a string. - It also removes any trailing dots and converts the record to lowercase. - - Args: - record (str or dns.rdata.Rdata): The DNS record to clean. - - Returns: - str: The cleaned and formatted DNS record. - - Examples: - >>> _clean_dns_record('www.evilcorp.com.') - 'www.evilcorp.com' - - >>> from dns.rrset import from_text - >>> record = from_text('www.evilcorp.com', 3600, 'IN', 'A', '1.2.3.4')[0] - >>> _clean_dns_record(record) - '1.2.3.4' - """ - if not isinstance(record, str): - record = str(record.to_text()) - return str(record).rstrip(".").lower() - async def _catch(self, callback, *args, **kwargs): """ Asynchronously catches exceptions thrown during DNS resolution and logs them. @@ -791,55 +525,31 @@ async def is_wildcard(self, query, ips=None, rdtype=None): """ result = {} - if [ips, rdtype].count(None) == 1: - raise ValueError("Both ips and rdtype must be specified") - - if not is_dns_name(query): - return {} - - # skip check if the query's parent domain is excluded in the config - for d in self.wildcard_ignore: - if self.parent_helper.host_in_host(query, d): - log.debug(f"Skipping wildcard detection on {query} because it is excluded in the config") - return {} - - query = self._clean_dns_record(query) - # skip check if it's an IP - if is_ip(query) or not "." in query: - return {} - # skip check if the query is a domain - if is_domain(query): - return {} - parent = parent_domain(query) parents = list(domain_parents(query)) - rdtypes_to_check = [rdtype] if rdtype is not None else self.all_rdtypes + rdtypes_to_check = [rdtype] if rdtype is not None else all_rdtypes - base_query_ips = dict() + query_baseline = dict() # if the caller hasn't already done the work of resolving the IPs if ips is None: # then resolve the query for all rdtypes - for t in rdtypes_to_check: - raw_results, errors = await self.resolve_raw(query, type=t, use_cache=True) - if errors and not raw_results: - self.debug(f"Failed to resolve {query} ({t}) during wildcard detection") - result[t] = (None, parent) - continue - for __rdtype, answers in raw_results: - base_query_results = set() - for answer in answers: - for _, t in self.extract_targets(answer): - base_query_results.add(t) - if base_query_results: - base_query_ips[__rdtype] = base_query_results + queries = [(query, t) for t in rdtypes_to_check] + async for (query, _rdtype), (answers, errors) in self.resolve_raw_batch(queries): + if answers: + query_baseline[_rdtype] = set([a[0] for a in answers]) + else: + if errors: + self.debug(f"Failed to resolve {query} ({_rdtype}) during wildcard detection") + result[_rdtype] = (None, parent) + continue else: # otherwise, we can skip all that - cleaned_ips = set([self._clean_dns_record(ip) for ip in ips]) + cleaned_ips = set([clean_dns_record(ip) for ip in ips]) if not cleaned_ips: raise ValueError("Valid IPs must be specified") - base_query_ips[rdtype] = cleaned_ips - if not base_query_ips: + query_baseline[rdtype] = cleaned_ips + if not query_baseline: return result # once we've resolved the base query and have IP addresses to work with @@ -852,9 +562,9 @@ async def is_wildcard(self, query, ips=None, rdtype=None): await self.is_wildcard_domain(host) # for every rdtype - for _rdtype in list(base_query_ips): + for _rdtype in list(query_baseline): # get the IPs from above - query_ips = base_query_ips.get(_rdtype, set()) + query_ips = query_baseline.get(_rdtype, set()) host_hash = hash(host) if host_hash in self._wildcard_cache: @@ -871,13 +581,14 @@ async def is_wildcard(self, query, ips=None, rdtype=None): result[_rdtype] = (True, host) # if we've reached a point where the dns name is a complete wildcard, class can be dismissed early - base_query_rdtypes = set(base_query_ips) + base_query_rdtypes = set(query_baseline) wildcard_rdtypes_set = set([k for k, v in result.items() if v[0] is True]) if base_query_rdtypes and wildcard_rdtypes_set and base_query_rdtypes == wildcard_rdtypes_set: log.debug( f"Breaking from wildcard detection for {query} at {host} because base query rdtypes ({base_query_rdtypes}) == wildcard rdtypes ({wildcard_rdtypes_set})" ) raise DNSWildcardBreak() + except DNSWildcardBreak: pass @@ -905,18 +616,8 @@ async def is_wildcard_domain(self, domain, log_info=False): {} """ wildcard_domain_results = {} - domain = self._clean_dns_record(domain) - - if not is_dns_name(domain): - return {} - - # skip check if the query's parent domain is excluded in the config - for d in self.wildcard_ignore: - if self.parent_helper.host_in_host(domain, d): - log.debug(f"Skipping wildcard detection on {domain} because it is excluded in the config") - return {} - rdtypes_to_check = set(self.all_rdtypes) + rdtypes_to_check = set(all_rdtypes) # make a list of its parents parents = list(domain_parents(domain, include_self=True)) @@ -937,22 +638,23 @@ async def is_wildcard_domain(self, domain, log_info=False): # resolve a bunch of random subdomains of the same parent is_wildcard = False wildcard_results = dict() - for rdtype in list(rdtypes_to_check): - # continue if a wildcard was already found for this rdtype - # if rdtype in self._wildcard_cache[host_hash]: - # continue + + queries = [] + for rdtype in rdtypes_to_check: for _ in range(self.wildcard_tests): rand_query = f"{rand_string(digits=False, length=10)}.{host}" - results = await self.resolve(rand_query, type=rdtype, use_cache=False) - if results: - is_wildcard = True - if not rdtype in wildcard_results: - wildcard_results[rdtype] = set() - wildcard_results[rdtype].update(results) - # we know this rdtype is a wildcard - # so we don't need to check it anymore - with suppress(KeyError): - rdtypes_to_check.remove(rdtype) + queries.append((rand_query, rdtype)) + + async for (query, rdtype), (answers, errors) in self.resolve_raw_batch(queries): + if answers: + is_wildcard = True + if not rdtype in wildcard_results: + wildcard_results[rdtype] = set() + wildcard_results[rdtype].update(set(a[0] for a in answers)) + # we know this rdtype is a wildcard + # so we don't need to check it anymore + with suppress(KeyError): + rdtypes_to_check.remove(rdtype) self._wildcard_cache.update({host_hash: wildcard_results}) wildcard_domain_results.update({host: wildcard_results}) @@ -998,25 +700,15 @@ async def _connectivity_check(self, interval=5): self._errors.clear() return False - def _parse_rdtype(self, t, default=None): - if isinstance(t, str): - if t.strip().lower() in ("any", "all", "*"): - return self.all_rdtypes - else: - return [t.strip().upper()] - elif any([isinstance(t, x) for x in (list, tuple)]): - return [str(_).strip().upper() for _ in t] - return default - def debug(self, *args, **kwargs): if self._debug: log.trace(*args, **kwargs) - def _get_dummy_module(self, name): - try: - dummy_module = self._dummy_modules[name] - except KeyError: - dummy_module = self.parent_helper._make_dummy_module(name=name, _type="DNS") - dummy_module.suppress_dupes = False - self._dummy_modules[name] = dummy_module - return dummy_module + @property + def in_tests(self): + return os.getenv("BBOT_TESTING", "") == "True" + + async def _mock_dns(self, mock_data): + from .mock import MockResolver + + self.resolver = MockResolver(mock_data) diff --git a/bbot/core/helpers/dns/mock.py b/bbot/core/helpers/dns/mock.py new file mode 100644 index 000000000..70d978aff --- /dev/null +++ b/bbot/core/helpers/dns/mock.py @@ -0,0 +1,56 @@ +import dns + + +class MockResolver: + + def __init__(self, mock_data=None): + self.mock_data = mock_data if mock_data else {} + self.nameservers = ["127.0.0.1"] + + async def resolve_address(self, ipaddr, *args, **kwargs): + modified_kwargs = {} + modified_kwargs.update(kwargs) + modified_kwargs["rdtype"] = "PTR" + return await self.resolve(str(dns.reversename.from_address(ipaddr)), *args, **modified_kwargs) + + def create_dns_response(self, query_name, rdtype): + query_name = query_name.strip(".") + answers = self.mock_data.get(query_name, {}).get(rdtype, []) + if not answers: + raise dns.resolver.NXDOMAIN(f"No answer found for {query_name} {rdtype}") + + message_text = f"""id 1234 +opcode QUERY +rcode NOERROR +flags QR AA RD +;QUESTION +{query_name}. IN {rdtype} +;ANSWER""" + for answer in answers: + message_text += f"\n{query_name}. 1 IN {rdtype} {answer}" + + message_text += "\n;AUTHORITY\n;ADDITIONAL\n" + message = dns.message.from_text(message_text) + return message + + async def resolve(self, query_name, rdtype=None): + if rdtype is None: + rdtype = "A" + elif isinstance(rdtype, str): + rdtype = rdtype.upper() + else: + rdtype = str(rdtype.name).upper() + + domain_name = dns.name.from_text(query_name) + rdtype_obj = dns.rdatatype.from_text(rdtype) + + if "_NXDOMAIN" in self.mock_data and query_name in self.mock_data["_NXDOMAIN"]: + # Simulate the NXDOMAIN exception + raise dns.resolver.NXDOMAIN + + try: + response = self.create_dns_response(query_name, rdtype) + answer = dns.resolver.Answer(domain_name, rdtype_obj, dns.rdataclass.IN, response) + return answer + except dns.resolver.NXDOMAIN: + return [] diff --git a/bbot/core/helpers/helper.py b/bbot/core/helpers/helper.py index 899f3ab0b..16afc05cd 100644 --- a/bbot/core/helpers/helper.py +++ b/bbot/core/helpers/helper.py @@ -1,19 +1,22 @@ import os +import asyncio import logging from pathlib import Path +import multiprocessing as mp +from functools import partial +from cloudcheck import cloud_providers +from concurrent.futures import ProcessPoolExecutor from . import misc from .dns import DNSHelper from .web import WebHelper from .diff import HttpCompare -from .cloud import CloudHelper +from .regex import RegexHelper from .wordcloud import WordCloud from .interactsh import Interactsh from ...scanner.target import Target -from ...modules.base import BaseModule from .depsinstaller import DepsInstaller - log = logging.getLogger("bbot.core.helpers") @@ -51,10 +54,9 @@ class ConfigAwareHelper: from .cache import cache_get, cache_put, cache_filename, is_cached from .command import run, run_live, _spawn_proc, _prepare_command_kwargs - def __init__(self, config, scan=None): - self.config = config - self._scan = scan - self.bbot_home = Path(self.config.get("home", "~/.bbot")).expanduser().resolve() + def __init__(self, preset): + self.preset = preset + self.bbot_home = self.preset.bbot_home self.cache_dir = self.bbot_home / "cache" self.temp_dir = self.bbot_home / "temp" self.tools_dir = self.bbot_home / "tools" @@ -68,15 +70,27 @@ def __init__(self, config, scan=None): self.mkdir(self.tools_dir) self.mkdir(self.lib_dir) + self._loop = None + + # multiprocessing thread pool + start_method = mp.get_start_method() + if start_method != "spawn": + self.warning(f"Multiprocessing spawn method is set to {start_method}.") + + # we spawn 1 fewer processes than cores + # this helps to avoid locking up the system or competing with the main python process for cpu time + num_processes = max(1, mp.cpu_count() - 1) + self.process_pool = ProcessPoolExecutor(max_workers=num_processes) + + self.cloud = cloud_providers + + self.re = RegexHelper(self) self.dns = DNSHelper(self) self.web = WebHelper(self) self.depsinstaller = DepsInstaller(self) self.word_cloud = WordCloud(self) self.dummy_modules = {} - # cloud helpers - self.cloud = CloudHelper(self) - def interactsh(self, *args, **kwargs): return Interactsh(self, *args, **kwargs) @@ -97,30 +111,51 @@ def clean_old_scans(self): self.clean_old(self.scans_dir, keep=self.keep_old_scans, filter=_filter) def make_target(self, *events): - return Target(self.scan, *events) + return Target(*events) @property - def scan(self): - if self._scan is None: - from bbot.scanner import Scanner + def config(self): + return self.preset.config - self._scan = Scanner() - return self._scan + @property + def scan(self): + return self.preset.scan @property - def in_tests(self): - return os.environ.get("BBOT_TESTING", "") == "True" + def loop(self): + """ + Get the current event loop + """ + if self._loop is None: + self._loop = asyncio.get_running_loop() + return self._loop - def _make_dummy_module(self, name, _type="scan"): + def run_in_executor(self, callback, *args, **kwargs): """ - Construct a dummy module, for attachment to events + Run a synchronous task in the event loop's default thread pool executor + + Examples: + Execute callback: + >>> result = await self.helpers.run_in_executor(callback_fn, arg1, arg2) """ - try: - return self.dummy_modules[name] - except KeyError: - dummy = DummyModule(scan=self.scan, name=name, _type=_type) - self.dummy_modules[name] = dummy - return dummy + callback = partial(callback, **kwargs) + return self.loop.run_in_executor(None, callback, *args) + + def run_in_executor_mp(self, callback, *args, **kwargs): + """ + Same as run_in_executor() except with a process pool executor + Use only in cases where callback is CPU-bound + + Examples: + Execute callback: + >>> result = await self.helpers.run_in_executor_mp(callback_fn, arg1, arg2) + """ + callback = partial(callback, **kwargs) + return self.loop.run_in_executor(self.process_pool, callback, *args) + + @property + def in_tests(self): + return os.environ.get("BBOT_TESTING", "") == "True" def __getattribute__(self, attr): """ @@ -163,12 +198,3 @@ def __getattribute__(self, attr): except AttributeError: # then die raise AttributeError(f'Helper has no attribute "{attr}"') - - -class DummyModule(BaseModule): - _priority = 4 - - def __init__(self, *args, **kwargs): - self._name = kwargs.pop("name") - self._type = kwargs.pop("_type") - super().__init__(*args, **kwargs) diff --git a/bbot/core/helpers/interactsh.py b/bbot/core/helpers/interactsh.py index aad4a169f..f707fac93 100644 --- a/bbot/core/helpers/interactsh.py +++ b/bbot/core/helpers/interactsh.py @@ -11,7 +11,7 @@ from Crypto.PublicKey import RSA from Crypto.Cipher import AES, PKCS1_OAEP -from bbot.core.errors import InteractshError +from bbot.errors import InteractshError log = logging.getLogger("bbot.core.helpers.interactsh") diff --git a/bbot/core/helpers/misc.py b/bbot/core/helpers/misc.py index 3e8f5930d..206fc50f0 100644 --- a/bbot/core/helpers/misc.py +++ b/bbot/core/helpers/misc.py @@ -1,41 +1,21 @@ import os -import re import sys -import copy -import idna import json -import atexit -import codecs -import psutil import random -import shutil -import signal import string import asyncio -import difflib -import inspect import logging -import platform import ipaddress -import traceback +import regex as re import subprocess as sp from pathlib import Path -from itertools import islice -from datetime import datetime -from tabulate import tabulate -import wordninja as _wordninja from contextlib import suppress -import cloudcheck as _cloudcheck -import tldextract as _tldextract -import xml.etree.ElementTree as ET -from collections.abc import Mapping -from hashlib import sha1 as hashlib_sha1 +from unidecode import unidecode # noqa F401 from asyncio import create_task, gather, sleep, wait_for # noqa from urllib.parse import urlparse, quote, unquote, urlunparse, urljoin # noqa F401 from .url import * # noqa F401 -from .. import errors -from .logger import log_to_stderr +from ... import errors from . import regexes as bbot_regexes from .names_generator import random_name, names, adjectives # noqa F401 @@ -478,6 +458,8 @@ def tldextract(data): - Utilizes `smart_decode` to preprocess the data. - Makes use of the `tldextract` library for extraction. """ + import tldextract as _tldextract + return _tldextract.extract(smart_decode(data)) @@ -655,7 +637,7 @@ def is_ip_type(i): >>> is_ip_type("192.168.1.0/24") False """ - return isinstance(i, ipaddress._BaseV4) or isinstance(i, ipaddress._BaseV6) + return ipaddress._IPAddressBase in i.__class__.__mro__ def make_ip_type(s): @@ -681,78 +663,17 @@ def make_ip_type(s): >>> make_ip_type("evilcorp.com") 'evilcorp.com' """ + if not s: + raise ValueError(f'Invalid hostname: "{s}"') # IP address with suppress(Exception): - return ipaddress.ip_address(str(s).strip()) + return ipaddress.ip_address(s) # IP network with suppress(Exception): - return ipaddress.ip_network(str(s).strip(), strict=False) + return ipaddress.ip_network(s, strict=False) return s -def host_in_host(host1, host2): - """ - Checks if host1 is included within host2, either as a subdomain, IP, or IP network. - Used for scope calculations/decisions within BBOT. - - Args: - host1 (str or ipaddress.IPv4Address or ipaddress.IPv6Address or ipaddress.IPv4Network or ipaddress.IPv6Network): - The host to check for inclusion within host2. - host2 (str or ipaddress.IPv4Address or ipaddress.IPv6Address or ipaddress.IPv4Network or ipaddress.IPv6Network): - The host within which to check for the inclusion of host1. - - Returns: - bool: True if host1 is included in host2, otherwise False. - - Examples: - >>> host_in_host("www.evilcorp.com", "evilcorp.com") - True - >>> host_in_host("evilcorp.com", "www.evilcorp.com") - False - >>> host_in_host(ipaddress.IPv6Address('dead::beef'), ipaddress.IPv6Network('dead::/64')) - True - >>> host_in_host(ipaddress.IPv4Address('192.168.1.1'), ipaddress.IPv4Network('10.0.0.0/8')) - False - - Notes: - - If checking an IP address/network, you MUST FIRST convert your IP into an ipaddress object (e.g. via `make_ip_type()`) before passing it to this function. - """ - - """ - Is host1 included in host2? - "www.evilcorp.com" in "evilcorp.com"? --> True - "evilcorp.com" in "www.evilcorp.com"? --> False - IPv6Address('dead::beef') in IPv6Network('dead::/64')? --> True - IPv4Address('192.168.1.1') in IPv4Network('10.0.0.0/8')? --> False - - Very important! Used throughout BBOT for scope calculations/decisions. - - Works with hostnames, IPs, and IP networks. - """ - - if not host1 or not host2: - return False - - # check if hosts are IP types - host1_ip_type = is_ip_type(host1) - host2_ip_type = is_ip_type(host2) - # if both hosts are IP types - if host1_ip_type and host2_ip_type: - if not host1.version == host2.version: - return False - host1_net = ipaddress.ip_network(host1) - host2_net = ipaddress.ip_network(host2) - return host1_net.subnet_of(host2_net) - - # else hostnames - elif not (host1_ip_type or host2_ip_type): - host2_len = len(host2.split(".")) - host1_truncated = ".".join(host1.split(".")[-host2_len:]) - return host1_truncated == host2 - - return False - - def sha1(data): """ Computes the SHA-1 hash of the given data. @@ -767,6 +688,8 @@ def sha1(data): >>> sha1("asdf").hexdigest() '3da541559918a808c2402bba5012f6c60b27661c' """ + from hashlib import sha1 as hashlib_sha1 + if isinstance(data, dict): data = json.dumps(data, sort_keys=True) return hashlib_sha1(smart_encode(data)) @@ -840,6 +763,8 @@ def recursive_decode(data, max_depth=5): >>> recursive_dcode("%5Cu0020%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442%5Cu0021") " Привет!" """ + import codecs + # Decode newline and tab escapes data = backslash_regex.sub( lambda match: {"n": "\n", "t": "\t", "r": "\r", "b": "\b", "v": "\v"}.get(match.group("char")), data @@ -954,6 +879,8 @@ def extract_params_xml(xml_data): >>> extract_params_xml('') {'child1', 'child2', 'root'} """ + import xml.etree.ElementTree as ET + try: root = ET.fromstring(xml_data) except ET.ParseError: @@ -1046,6 +973,7 @@ def extract_words(data, acronyms=True, wordninja=True, model=None, max_length=10 >>> extract_words('blacklanternsecurity') {'black', 'lantern', 'security', 'bls', 'blacklanternsecurity'} """ + import wordninja as _wordninja if word_regexes is None: word_regexes = bbot_regexes.word_regexes @@ -1102,6 +1030,8 @@ def closest_match(s, choices, n=1, cutoff=0.0): >>> closest_match("asdf", ["asd", "fds", "asdff"], n=3) ['asdff', 'asd', 'fds'] """ + import difflib + matches = difflib.get_close_matches(s, choices, n=n, cutoff=cutoff) if not choices or not matches: return @@ -1110,8 +1040,8 @@ def closest_match(s, choices, n=1, cutoff=0.0): return matches -def match_and_exit(s, choices, msg=None, loglevel="HUGEWARNING", exitcode=2): - """Finds the closest match from a list of choices for a given string, logs a warning, and exits the program. +def get_closest_match(s, choices, msg=None): + """Finds the closest match from a list of choices for a given string. This function is particularly useful for CLI applications where you want to validate flags or modules. @@ -1123,27 +1053,32 @@ def match_and_exit(s, choices, msg=None, loglevel="HUGEWARNING", exitcode=2): exitcode (int, optional): The exit code to use when exiting the program. Defaults to 2. Examples: - >>> match_and_exit("some_module", ["some_mod", "some_other_mod"], msg="module") + >>> get_closest_match("some_module", ["some_mod", "some_other_mod"], msg="module") # Output: Could not find module "some_module". Did you mean "some_mod"? - # Exits with code 2 """ if msg is None: msg = "" else: msg += " " closest = closest_match(s, choices) - log_to_stderr(f'Could not find {msg}"{s}". Did you mean "{closest}"?', level="HUGEWARNING") - sys.exit(2) + return f'Could not find {msg}"{s}". Did you mean "{closest}"?' -def kill_children(parent_pid=None, sig=signal.SIGTERM): +def kill_children(parent_pid=None, sig=None): """ Forgive me father for I have sinned """ + import psutil + import signal + + if sig is None: + sig = signal.SIGTERM + try: parent = psutil.Process(parent_pid) except psutil.NoSuchProcess: log.debug(f"No such PID: {parent_pid}") + return log.debug(f"Killing children of process ID {parent.pid}") children = parent.children(recursive=True) for child in children: @@ -1155,6 +1090,7 @@ def kill_children(parent_pid=None, sig=signal.SIGTERM): log.debug(f"No such PID: {child.pid}") except psutil.AccessDenied: log.debug(f"Error killing PID: {child.pid} - access denied") + log.debug(f"Finished killing children of process ID {parent.pid}") def str_or_file(s): @@ -1262,6 +1198,8 @@ def rm_at_exit(path): Examples: >>> rm_at_exit("/tmp/test/file1.txt") """ + import atexit + atexit.register(delete_file, path) @@ -1375,6 +1313,8 @@ def which(*executables): >>> which("python", "python3") "/usr/bin/python" """ + import shutil + for e in executables: location = shutil.which(e) if location: @@ -1473,74 +1413,6 @@ def search_dict_values(d, *regexes): yield from search_dict_values(v, *regexes) -def filter_dict(d, *key_names, fuzzy=False, exclude_keys=None, _prev_key=None): - """ - Recursively filter a dictionary based on key names. - - Args: - d (dict): The input dictionary. - *key_names: Names of keys to filter for. - fuzzy (bool): Whether to perform fuzzy matching on keys. - exclude_keys (list, None): List of keys to be excluded from the final dict. - _prev_key (str, None): For internal recursive use; the previous key in the hierarchy. - - Returns: - dict: A dictionary containing only the keys specified in key_names. - - Examples: - >>> filter_dict({"key1": "test", "key2": "asdf"}, "key2") - {"key2": "asdf"} - >>> filter_dict({"key1": "test", "key2": {"key3": "asdf"}}, "key1", "key3", exclude_keys="key2") - {'key1': 'test'} - """ - if exclude_keys is None: - exclude_keys = [] - if isinstance(exclude_keys, str): - exclude_keys = [exclude_keys] - ret = {} - if isinstance(d, dict): - for key in d: - if key in key_names or (fuzzy and any(k in key for k in key_names)): - if not any(k in exclude_keys for k in [key, _prev_key]): - ret[key] = copy.deepcopy(d[key]) - elif isinstance(d[key], list) or isinstance(d[key], dict): - child = filter_dict(d[key], *key_names, fuzzy=fuzzy, _prev_key=key, exclude_keys=exclude_keys) - if child: - ret[key] = child - return ret - - -def clean_dict(d, *key_names, fuzzy=False, exclude_keys=None, _prev_key=None): - """ - Recursively clean unwanted keys from a dictionary. - Useful for removing secrets from a config. - - Args: - d (dict): The input dictionary. - *key_names: Names of keys to remove. - fuzzy (bool): Whether to perform fuzzy matching on keys. - exclude_keys (list, None): List of keys to be excluded from removal. - _prev_key (str, None): For internal recursive use; the previous key in the hierarchy. - - Returns: - dict: A dictionary cleaned of the keys specified in key_names. - - """ - if exclude_keys is None: - exclude_keys = [] - if isinstance(exclude_keys, str): - exclude_keys = [exclude_keys] - d = copy.deepcopy(d) - if isinstance(d, dict): - for key, val in list(d.items()): - if key in key_names or (fuzzy and any(k in key for k in key_names)): - if _prev_key not in exclude_keys: - d.pop(key) - else: - d[key] = clean_dict(val, *key_names, fuzzy=fuzzy, _prev_key=key, exclude_keys=exclude_keys) - return d - - def grouper(iterable, n): """ Grouper groups an iterable into chunks of a given size. @@ -1556,6 +1428,7 @@ def grouper(iterable, n): >>> list(grouper('ABCDEFG', 3)) [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] """ + from itertools import islice iterable = iter(iterable) return iter(lambda: list(islice(iterable, n)), []) @@ -1634,6 +1507,8 @@ def make_date(d=None, microseconds=False): >>> make_date(microseconds=True) "20220707_1330_35167617" """ + from datetime import datetime + f = "%Y%m%d_%H%M_%S" if microseconds: f += "%f" @@ -1767,6 +1642,8 @@ def rm_rf(f): Examples: >>> rm_rf("/tmp/httpx98323849") """ + import shutil + shutil.rmtree(f) @@ -1886,6 +1763,8 @@ def smart_encode_punycode(text: str) -> str: """ ドメイン.テスト --> xn--eckwd4c7c.xn--zckzah """ + import idna + host, before, after = extract_host(text) if host is None: return text @@ -1902,6 +1781,8 @@ def smart_decode_punycode(text: str) -> str: """ xn--eckwd4c7c.xn--zckzah --> ドメイン.テスト """ + import idna + host, before, after = extract_host(text) if host is None: return text @@ -1967,7 +1848,7 @@ def verify_sudo_password(sudo_pass): return True -def make_table(rows, header, *args, **kwargs): +def make_table(rows, header, **kwargs): """Generate a formatted table from the given rows and headers. This function uses the `tabulate` package to generate a table with formatting options. @@ -1993,9 +1874,11 @@ def make_table(rows, header, *args, **kwargs): | row2 | row2 | +-----------+-----------+ """ + from tabulate import tabulate + # fix IndexError: list index out of range - if args and not args[0]: - args = ([[]],) + args[1:] + if not rows: + rows = [[]] tablefmt = os.environ.get("BBOT_TABLE_FORMAT", None) defaults = {"tablefmt": "grid", "disable_numparse": True, "maxcolwidths": None} if tablefmt is None: @@ -2015,7 +1898,7 @@ def markdown_escape(s): rows = [[markdown_escape(f) for f in row] for row in rows] header = [markdown_escape(h) for h in header] - return tabulate(rows, header, *args, **kwargs) + return tabulate(rows, header, **kwargs) def human_timedelta(d): @@ -2142,6 +2025,8 @@ def cpu_architecture(): >>> cpu_architecture() 'amd64' """ + import platform + uname = platform.uname() arch = uname.machine.lower() if arch.startswith("aarch"): @@ -2164,6 +2049,8 @@ def os_platform(): >>> os_platform() 'linux' """ + import platform + return platform.system().lower() @@ -2232,6 +2119,8 @@ def memory_status(): >>> mem.percent 79.0 """ + import psutil + return psutil.virtual_memory() @@ -2254,6 +2143,8 @@ def swap_status(): >>> swap.used 2097152 """ + import psutil + return psutil.swap_memory() @@ -2276,6 +2167,8 @@ def get_size(obj, max_depth=5, seen=None): >>> get_size(my_dict, max_depth=3) 8400 """ + from collections.abc import Mapping + # If seen is not provided, initialize an empty set if seen is None: seen = set() @@ -2351,6 +2244,8 @@ def cloudcheck(ip): >>> cloudcheck("168.62.20.37") ('Azure', 'cloud', IPv4Network('168.62.0.0/19')) """ + import cloudcheck as _cloudcheck + return _cloudcheck.check(ip) @@ -2370,6 +2265,8 @@ def is_async_function(f): >>> is_async_function(foo) True """ + import inspect + return inspect.iscoroutinefunction(f) @@ -2448,6 +2345,8 @@ def get_traceback_details(e): ... print(f"File: {filename}, Line: {lineno}, Function: {funcname}") File: , Line: 2, Function: """ + import traceback + tb = traceback.extract_tb(e.__traceback__) last_frame = tb[-1] # Get the last frame in the traceback (the one where the exception was raised) filename = last_frame.filename @@ -2486,6 +2385,8 @@ async def cancel_tasks(tasks, ignore_errors=True): await task except BaseException as e: if not isinstance(e, asyncio.CancelledError): + import traceback + log.trace(traceback.format_exc()) @@ -2663,3 +2564,30 @@ async def as_completed(coros): for task in done: tasks.pop(task) yield task + + +def clean_dns_record(record): + """ + Cleans and formats a given DNS record for further processing. + + This static method converts the DNS record to text format if it's not already a string. + It also removes any trailing dots and converts the record to lowercase. + + Args: + record (str or dns.rdata.Rdata): The DNS record to clean. + + Returns: + str: The cleaned and formatted DNS record. + + Examples: + >>> clean_dns_record('www.evilcorp.com.') + 'www.evilcorp.com' + + >>> from dns.rrset import from_text + >>> record = from_text('www.evilcorp.com', 3600, 'IN', 'A', '1.2.3.4')[0] + >>> clean_dns_record(record) + '1.2.3.4' + """ + if not isinstance(record, str): + record = str(record.to_text()) + return str(record).rstrip(".").lower() diff --git a/bbot/core/helpers/ntlm.py b/bbot/core/helpers/ntlm.py index 8605ef34a..9d66b3ea7 100644 --- a/bbot/core/helpers/ntlm.py +++ b/bbot/core/helpers/ntlm.py @@ -5,7 +5,7 @@ import logging import collections -from bbot.core.errors import NTLMError +from bbot.errors import NTLMError log = logging.getLogger("bbot.core.helpers.ntlm") diff --git a/bbot/core/helpers/regex.py b/bbot/core/helpers/regex.py new file mode 100644 index 000000000..f85fb72a5 --- /dev/null +++ b/bbot/core/helpers/regex.py @@ -0,0 +1,72 @@ +import regex as re +from . import misc + + +class RegexHelper: + """ + Class for misc CPU-intensive regex operations + + Offloads regex processing to other CPU cores via GIL release + thread pool + + For quick, one-off regexes, you don't need to use this helper. + Only use this helper if you're searching large bodies of text + or if your regex is CPU-intensive + """ + + def __init__(self, parent_helper): + self.parent_helper = parent_helper + + def ensure_compiled_regex(self, r): + """ + Make sure a regex has been compiled + """ + if not isinstance(r, re.Pattern): + raise ValueError("Regex must be compiled first!") + + def compile(self, *args, **kwargs): + return re.compile(*args, **kwargs) + + async def search(self, compiled_regex, *args, **kwargs): + self.ensure_compiled_regex(compiled_regex) + return await self.parent_helper.run_in_executor(compiled_regex.search, *args, **kwargs) + + async def findall(self, compiled_regex, *args, **kwargs): + self.ensure_compiled_regex(compiled_regex) + return await self.parent_helper.run_in_executor(compiled_regex.findall, *args, **kwargs) + + async def finditer(self, compiled_regex, *args, **kwargs): + self.ensure_compiled_regex(compiled_regex) + return await self.parent_helper.run_in_executor(self._finditer, compiled_regex, *args, **kwargs) + + async def finditer_multi(self, compiled_regexes, *args, **kwargs): + """ + Same as finditer() but with multiple regexes + """ + for r in compiled_regexes: + self.ensure_compiled_regex(r) + return await self.parent_helper.run_in_executor(self._finditer_multi, compiled_regexes, *args, **kwargs) + + def _finditer_multi(self, compiled_regexes, *args, **kwargs): + matches = [] + for r in compiled_regexes: + for m in r.finditer(*args, **kwargs): + matches.append(m) + return matches + + def _finditer(self, compiled_regex, *args, **kwargs): + return list(compiled_regex.finditer(*args, **kwargs)) + + async def extract_params_html(self, *args, **kwargs): + return await self.parent_helper.run_in_executor(misc.extract_params_html, *args, **kwargs) + + async def extract_emails(self, *args, **kwargs): + return await self.parent_helper.run_in_executor(misc.extract_emails, *args, **kwargs) + + async def search_dict_values(self, *args, **kwargs): + def _search_dict_values(*_args, **_kwargs): + return list(misc.search_dict_values(*_args, **_kwargs)) + + return await self.parent_helper.run_in_executor(_search_dict_values, *args, **kwargs) + + async def recursive_decode(self, *args, **kwargs): + return await self.parent_helper.run_in_executor(misc.recursive_decode, *args, **kwargs) diff --git a/bbot/core/helpers/regexes.py b/bbot/core/helpers/regexes.py index 6e80801a6..4e2ada0c2 100644 --- a/bbot/core/helpers/regexes.py +++ b/bbot/core/helpers/regexes.py @@ -1,4 +1,4 @@ -import re +import regex as re from collections import OrderedDict # for extracting words from strings @@ -104,3 +104,7 @@ _extract_host_regex = r"(?:[a-z0-9]{1,20}://)?(?:[^?]*@)?(" + valid_netloc + ")" extract_host_regex = re.compile(_extract_host_regex, re.I) + +# for use in recursive_decode() +encoded_regex = re.compile(r"%[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\[ntrbv]") +backslash_regex = re.compile(r"(?P\\+)(?P[ntrvb])") diff --git a/bbot/core/helpers/validators.py b/bbot/core/helpers/validators.py index 0384a876e..c016ed8df 100644 --- a/bbot/core/helpers/validators.py +++ b/bbot/core/helpers/validators.py @@ -5,7 +5,7 @@ from contextlib import suppress from bbot.core.helpers import regexes -from bbot.core.errors import ValidationError +from bbot.errors import ValidationError from bbot.core.helpers.url import parse_url, hash_url from bbot.core.helpers.misc import smart_encode_punycode, split_host_port, make_netloc, is_ip diff --git a/bbot/core/helpers/web.py b/bbot/core/helpers/web.py index 1a442c7e3..26773bc9c 100644 --- a/bbot/core/helpers/web.py +++ b/bbot/core/helpers/web.py @@ -13,7 +13,7 @@ from httpx._models import Cookies from socksio.exceptions import SOCKSError -from bbot.core.errors import WordlistError, CurlError +from bbot.errors import WordlistError, CurlError from bbot.core.helpers.ratelimiter import RateLimiter from bs4 import MarkupResemblesLocatorWarning @@ -50,18 +50,18 @@ class BBOTAsyncClient(httpx.AsyncClient): """ def __init__(self, *args, **kwargs): - self._bbot_scan = kwargs.pop("_bbot_scan") - web_requests_per_second = self._bbot_scan.config.get("web_requests_per_second", 100) + self._preset = kwargs.pop("_preset") + web_requests_per_second = self._preset.config.get("web_requests_per_second", 100) self._rate_limiter = RateLimiter(web_requests_per_second, "Web") - http_debug = self._bbot_scan.config.get("http_debug", None) + http_debug = self._preset.config.get("http_debug", None) if http_debug: log.trace(f"Creating AsyncClient: {args}, {kwargs}") self._persist_cookies = kwargs.pop("persist_cookies", True) # timeout - http_timeout = self._bbot_scan.config.get("http_timeout", 20) + http_timeout = self._preset.config.get("http_timeout", 20) if not "timeout" in kwargs: kwargs["timeout"] = http_timeout @@ -70,12 +70,12 @@ def __init__(self, *args, **kwargs): if headers is None: headers = {} # user agent - user_agent = self._bbot_scan.config.get("user_agent", "BBOT") + user_agent = self._preset.config.get("user_agent", "BBOT") if "User-Agent" not in headers: headers["User-Agent"] = user_agent kwargs["headers"] = headers # proxy - proxies = self._bbot_scan.config.get("http_proxy", None) + proxies = self._preset.config.get("http_proxy", None) kwargs["proxies"] = proxies super().__init__(*args, **kwargs) @@ -89,8 +89,8 @@ async def request(self, *args, **kwargs): def build_request(self, *args, **kwargs): request = super().build_request(*args, **kwargs) # add custom headers if the URL is in-scope - if self._bbot_scan.in_scope(str(request.url)): - for hk, hv in self._bbot_scan.config.get("http_headers", {}).items(): + if self._preset.in_scope(str(request.url)): + for hk, hv in self._preset.config.get("http_headers", {}).items(): # don't clobber headers if hk not in request.headers: request.headers[hk] = hv @@ -141,7 +141,7 @@ def __init__(self, parent_helper): self.web_client = self.AsyncClient(persist_cookies=False) def AsyncClient(self, *args, **kwargs): - kwargs["_bbot_scan"] = self.parent_helper.scan + kwargs["_preset"] = self.parent_helper.preset retries = kwargs.pop("retries", self.parent_helper.config.get("http_retries", 1)) kwargs["transport"] = httpx.AsyncHTTPTransport(retries=retries, verify=self.ssl_verify) kwargs["verify"] = self.ssl_verify @@ -278,7 +278,9 @@ async def download(self, url, **kwargs): if not "method" in kwargs: kwargs["method"] = "GET" try: - async with self._acatch(url, raise_error), self.AsyncClient().stream(url=url, **kwargs) as response: + async with self._acatch(url, raise_error=True), self.AsyncClient().stream( + url=url, **kwargs + ) as response: status_code = getattr(response, "status_code", 0) log.debug(f"Download result: HTTP {status_code}") if status_code != 0: @@ -300,6 +302,8 @@ async def download(self, url, **kwargs): if warn: log_fn = log.warning log_fn(f"Failed to download {url}: {e}") + if raise_error: + raise return if success: @@ -475,8 +479,8 @@ async def curl(self, *args, **kwargs): headers["User-Agent"] = user_agent # only add custom headers if the URL is in-scope - if self.parent_helper.scan.in_scope(url): - for hk, hv in self.parent_helper.scan.config.get("http_headers", {}).items(): + if self.parent_helper.preset.in_scope(url): + for hk, hv in self.parent_helper.config.get("http_headers", {}).items(): headers[hk] = hv # add the timeout @@ -560,9 +564,9 @@ def is_spider_danger(self, source_event, url): False """ url_depth = self.parent_helper.url_depth(url) - web_spider_depth = self.parent_helper.scan.config.get("web_spider_depth", 1) + web_spider_depth = self.parent_helper.config.get("web_spider_depth", 1) spider_distance = getattr(source_event, "web_spider_distance", 0) + 1 - web_spider_distance = self.parent_helper.scan.config.get("web_spider_distance", 0) + web_spider_distance = self.parent_helper.config.get("web_spider_distance", 0) if (url_depth > web_spider_depth) or (spider_distance > web_spider_distance): return True return False @@ -651,36 +655,42 @@ async def _acatch(self, url, raise_error): try: yield except httpx.TimeoutException: - log.verbose(f"HTTP timeout to URL: {url}") if raise_error: raise + else: + log.verbose(f"HTTP timeout to URL: {url}") except httpx.ConnectError: - log.debug(f"HTTP connect failed to URL: {url}") if raise_error: raise - except httpx.RequestError as e: - log.trace(f"Error with request to URL: {url}: {e}") - log.trace(traceback.format_exc()) + else: + log.debug(f"HTTP connect failed to URL: {url}") + except httpx.HTTPError as e: if raise_error: raise + else: + log.trace(f"Error with request to URL: {url}: {e}") + log.trace(traceback.format_exc()) except ssl.SSLError as e: msg = f"SSL error with request to URL: {url}: {e}" - log.trace(msg) - log.trace(traceback.format_exc()) if raise_error: raise httpx.RequestError(msg) + else: + log.trace(msg) + log.trace(traceback.format_exc()) except anyio.EndOfStream as e: msg = f"AnyIO error with request to URL: {url}: {e}" - log.trace(msg) - log.trace(traceback.format_exc()) if raise_error: raise httpx.RequestError(msg) + else: + log.trace(msg) + log.trace(traceback.format_exc()) except SOCKSError as e: msg = f"SOCKS error with request to URL: {url}: {e}" - log.trace(msg) - log.trace(traceback.format_exc()) if raise_error: raise httpx.RequestError(msg) + else: + log.trace(msg) + log.trace(traceback.format_exc()) except BaseException as e: # don't log if the error is the result of an intentional cancellation if not any( diff --git a/bbot/core/helpers/wordcloud.py b/bbot/core/helpers/wordcloud.py index 26d050406..5eafb00c5 100644 --- a/bbot/core/helpers/wordcloud.py +++ b/bbot/core/helpers/wordcloud.py @@ -322,7 +322,7 @@ def json(self, limit=None): @property def default_filename(self): - return self.parent_helper.scan.home / f"wordcloud.tsv" + return self.parent_helper.preset.scan.home / f"wordcloud.tsv" def save(self, filename=None, limit=None): """ diff --git a/bbot/core/logger/__init__.py b/bbot/core/logger/__init__.py deleted file mode 100644 index 39f447d6a..000000000 --- a/bbot/core/logger/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .logger import ( - init_logging, - get_log_level, - set_log_level, - add_log_handler, - ColoredFormatter, - get_log_handlers, - toggle_log_level, - remove_log_handler, -) diff --git a/bbot/core/logger/logger.py b/bbot/core/logger/logger.py deleted file mode 100644 index eb8da4c55..000000000 --- a/bbot/core/logger/logger.py +++ /dev/null @@ -1,238 +0,0 @@ -import os -import sys -import logging -from copy import copy -import logging.handlers -from pathlib import Path - -from ..configurator import config -from ..helpers.misc import mkdir, error_and_exit -from ..helpers.logger import colorize, loglevel_mapping - - -_log_level_override = None - -bbot_loggers = None -bbot_log_handlers = None - -debug_format = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)s %(message)s") - - -class ColoredFormatter(logging.Formatter): - """ - Pretty colors for terminal - """ - - formatter = logging.Formatter("%(levelname)s %(message)s") - module_formatter = logging.Formatter("%(levelname)s %(name)s: %(message)s") - - def format(self, record): - colored_record = copy(record) - levelname = colored_record.levelname - levelshort = loglevel_mapping.get(levelname, "INFO") - colored_record.levelname = colorize(f"[{levelshort}]", level=levelname) - if levelname == "CRITICAL" or levelname.startswith("HUGE"): - colored_record.msg = colorize(colored_record.msg, level=levelname) - # remove name - if colored_record.name.startswith("bbot.modules."): - colored_record.name = colored_record.name.split("bbot.modules.")[-1] - return self.module_formatter.format(colored_record) - return self.formatter.format(colored_record) - - -def addLoggingLevel(levelName, levelNum, methodName=None): - """ - Comprehensively adds a new logging level to the `logging` module and the - currently configured logging class. - - `levelName` becomes an attribute of the `logging` module with the value - `levelNum`. `methodName` becomes a convenience method for both `logging` - itself and the class returned by `logging.getLoggerClass()` (usually just - `logging.Logger`). If `methodName` is not specified, `levelName.lower()` is - used. - - To avoid accidental clobberings of existing attributes, this method will - raise an `AttributeError` if the level name is already an attribute of the - `logging` module or if the method name is already present - - Example - ------- - >>> addLoggingLevel('TRACE', logging.DEBUG - 5) - >>> logging.getLogger(__name__).setLevel('TRACE') - >>> logging.getLogger(__name__).trace('that worked') - >>> logging.trace('so did this') - >>> logging.TRACE - 5 - - """ - if not methodName: - methodName = levelName.lower() - - if hasattr(logging, levelName): - raise AttributeError(f"{levelName} already defined in logging module") - if hasattr(logging, methodName): - raise AttributeError(f"{methodName} already defined in logging module") - if hasattr(logging.getLoggerClass(), methodName): - raise AttributeError(f"{methodName} already defined in logger class") - - # This method was inspired by the answers to Stack Overflow post - # http://stackoverflow.com/q/2183233/2988730, especially - # http://stackoverflow.com/a/13638084/2988730 - def logForLevel(self, message, *args, **kwargs): - if self.isEnabledFor(levelNum): - self._log(levelNum, message, args, **kwargs) - - def logToRoot(message, *args, **kwargs): - logging.log(levelNum, message, *args, **kwargs) - - logging.addLevelName(levelNum, levelName) - setattr(logging, levelName, levelNum) - setattr(logging.getLoggerClass(), methodName, logForLevel) - setattr(logging, methodName, logToRoot) - - -# custom logging levels -addLoggingLevel("STDOUT", 100) -addLoggingLevel("TRACE", 49) -addLoggingLevel("HUGEWARNING", 31) -addLoggingLevel("HUGESUCCESS", 26) -addLoggingLevel("SUCCESS", 25) -addLoggingLevel("HUGEINFO", 21) -addLoggingLevel("HUGEVERBOSE", 16) -addLoggingLevel("VERBOSE", 15) - - -verbosity_levels_toggle = [logging.INFO, logging.VERBOSE, logging.DEBUG] - - -def get_bbot_loggers(): - global bbot_loggers - if bbot_loggers is None: - bbot_loggers = [ - logging.getLogger("bbot"), - logging.getLogger("asyncio"), - ] - return bbot_loggers - - -def add_log_handler(handler, formatter=None): - if handler.formatter is None: - handler.setFormatter(debug_format) - for logger in get_bbot_loggers(): - if handler not in logger.handlers: - logger.addHandler(handler) - - -def remove_log_handler(handler): - for logger in get_bbot_loggers(): - if handler in logger.handlers: - logger.removeHandler(handler) - - -def init_logging(): - # Don't do this more than once - if len(logging.getLogger("bbot").handlers) == 0: - for logger in get_bbot_loggers(): - include_logger(logger) - - -def include_logger(logger): - bbot_loggers = get_bbot_loggers() - if logger not in bbot_loggers: - bbot_loggers.append(logger) - logger.setLevel(get_log_level()) - for handler in get_log_handlers().values(): - logger.addHandler(handler) - - -def get_log_handlers(): - global bbot_log_handlers - - if bbot_log_handlers is None: - log_dir = Path(config["home"]) / "logs" - if not mkdir(log_dir, raise_error=False): - error_and_exit(f"Failure creating or error writing to BBOT logs directory ({log_dir})") - - # Main log file - main_handler = logging.handlers.TimedRotatingFileHandler( - f"{log_dir}/bbot.log", when="d", interval=1, backupCount=14 - ) - - # Separate log file for debugging - debug_handler = logging.handlers.TimedRotatingFileHandler( - f"{log_dir}/bbot.debug.log", when="d", interval=1, backupCount=14 - ) - - def stderr_filter(record): - log_level = get_log_level() - if record.levelno == logging.STDOUT or (record.levelno == logging.TRACE and log_level > logging.DEBUG): - return False - if record.levelno < log_level: - return False - return True - - # Log to stderr - stderr_handler = logging.StreamHandler(sys.stderr) - stderr_handler.addFilter(stderr_filter) - # Log to stdout - stdout_handler = logging.StreamHandler(sys.stdout) - stdout_handler.addFilter(lambda x: x.levelno == logging.STDOUT) - # log to files - debug_handler.addFilter( - lambda x: x.levelno == logging.TRACE or (x.levelno < logging.VERBOSE and x.levelno != logging.STDOUT) - ) - main_handler.addFilter( - lambda x: x.levelno not in (logging.STDOUT, logging.TRACE) and x.levelno >= logging.VERBOSE - ) - - # Set log format - debug_handler.setFormatter(debug_format) - main_handler.setFormatter(debug_format) - stderr_handler.setFormatter(ColoredFormatter("%(levelname)s %(name)s: %(message)s")) - stdout_handler.setFormatter(logging.Formatter("%(message)s")) - - bbot_log_handlers = { - "stderr": stderr_handler, - "stdout": stdout_handler, - "file_debug": debug_handler, - "file_main": main_handler, - } - return bbot_log_handlers - - -def get_log_level(): - if _log_level_override is not None: - return _log_level_override - - from bbot.core.configurator.args import cli_options - - if config.get("debug", False) or os.environ.get("BBOT_DEBUG", "").lower() in ("true", "yes"): - return logging.DEBUG - - loglevel = logging.INFO - if cli_options is not None: - if cli_options.verbose: - loglevel = logging.VERBOSE - if cli_options.debug: - loglevel = logging.DEBUG - return loglevel - - -def set_log_level(level, logger=None): - global _log_level_override - if logger is not None: - logger.hugeinfo(f"Setting log level to {logging.getLevelName(level)}") - config["silent"] = False - _log_level_override = level - for logger in bbot_loggers: - logger.setLevel(level) - - -def toggle_log_level(logger=None): - log_level = get_log_level() - if log_level in verbosity_levels_toggle: - for i, level in enumerate(verbosity_levels_toggle): - if log_level == level: - set_log_level(verbosity_levels_toggle[(i + 1) % len(verbosity_levels_toggle)], logger=logger) - else: - set_log_level(verbosity_levels_toggle[0], logger=logger) diff --git a/bbot/core/helpers/modules.py b/bbot/core/modules.py similarity index 60% rename from bbot/core/helpers/modules.py rename to bbot/core/modules.py index c6cc52f42..b9ae83af5 100644 --- a/bbot/core/helpers/modules.py +++ b/bbot/core/modules.py @@ -1,29 +1,98 @@ +import re import ast import sys +import atexit +import pickle +import logging import importlib +import omegaconf import traceback +from copy import copy from pathlib import Path from omegaconf import OmegaConf from contextlib import suppress -from ..flags import flag_descriptions -from .misc import list_files, sha1, search_dict_by_key, search_format_dict, make_table, os_platform +from bbot.core import CORE +from bbot.errors import BBOTError +from bbot.logger import log_to_stderr + +from .flags import flag_descriptions +from .shared_deps import SHARED_DEPS +from .helpers.misc import list_files, sha1, search_dict_by_key, search_format_dict, make_table, os_platform, mkdir + + +log = logging.getLogger("bbot.module_loader") + +bbot_code_dir = Path(__file__).parent.parent class ModuleLoader: """ - Main class responsible for loading BBOT modules. + Main class responsible for preloading BBOT modules. This class is in charge of preloading modules to determine their dependencies. Once dependencies are identified, they are installed before the actual module is imported. This ensures that all requisite libraries and components are available for the module to function correctly. """ + default_module_dir = bbot_code_dir / "modules" + + module_dir_regex = re.compile(r"^[a-z][a-z0-9_]*$") + + # if a module consumes these event types, automatically assume these dependencies + default_module_deps = {"HTTP_RESPONSE": "httpx", "URL": "httpx", "SOCIAL": "social"} + def __init__(self): - self._preloaded = {} - self._preloaded_orig = None + self.core = CORE + + self._shared_deps = dict(SHARED_DEPS) + + self.__preloaded = {} self._modules = {} self._configs = {} + self.flag_choices = set() + self.all_module_choices = set() + self.scan_module_choices = set() + self.output_module_choices = set() + self.internal_module_choices = set() + + self._preload_cache = None + + self._module_dirs = set() + self._module_dirs_preloaded = set() + self.add_module_dir(self.default_module_dir) + + # save preload cache before exiting + atexit.register(self.save_preload_cache) + + def copy(self): + module_loader_copy = copy(self) + module_loader_copy.__preloaded = dict(self.__preloaded) + return module_loader_copy + + @property + def preload_cache_file(self): + return self.core.cache_dir / "module_preload_cache" + + @property + def module_dirs(self): + return self._module_dirs + + def add_module_dir(self, module_dir): + module_dir = Path(module_dir).resolve() + if module_dir in self._module_dirs: + log.debug(f'Already added custom module dir "{module_dir}"') + return + if not module_dir.is_dir(): + log.warning(f'Failed to add custom module dir "{module_dir}", please make sure it exists') + return + new_module_dirs = set() + for _module_dir in self.get_recursive_dirs(module_dir): + _module_dir = Path(_module_dir).resolve() + if _module_dir not in self._module_dirs: + self._module_dirs.add(_module_dir) + new_module_dirs.add(_module_dir) + self.preload(module_dirs=new_module_dirs) def file_filter(self, file): file = file.resolve() @@ -31,11 +100,11 @@ def file_filter(self, file): return False return file.suffix.lower() == ".py" and file.stem not in ["base", "__init__"] - def preload(self, module_dir): - """Preloads all modules within a directory. + def preload(self, module_dirs=None): + """Preloads all BBOT modules. - This function recursively iterates through each file in the specified directory - and preloads the BBOT module to gather its meta-information and dependencies. + This function recursively iterates through each file in the module directories + and preloads each BBOT module to gather its meta-information and dependencies. Args: module_dir (str or Path): Directory containing BBOT modules to be preloaded. @@ -52,30 +121,120 @@ def preload(self, module_dir): ... } """ - module_dir = Path(module_dir) - for module_file in list_files(module_dir, filter=self.file_filter): - if module_dir.name == "modules": - namespace = f"bbot.modules" - else: - namespace = f"bbot.modules.{module_dir.name}" - try: - preloaded = self.preload_module(module_file) - module_type = "scan" - if module_dir.name in ("output", "internal"): - module_type = str(module_dir.name) - elif module_dir.name not in ("modules"): - preloaded["flags"] = list(set(preloaded["flags"] + [module_dir.name])) - preloaded["type"] = module_type - preloaded["namespace"] = namespace + new_modules = False + if module_dirs is None: + module_dirs = self.module_dirs + + for module_dir in module_dirs: + if module_dir in self._module_dirs_preloaded: + log.debug(f"Already preloaded modules from {module_dir}") + continue + + log.debug(f"Preloading modules from {module_dir}") + new_modules = True + for module_file in list_files(module_dir, filter=self.file_filter): + module_name = module_file.stem + module_file = module_file.resolve() + + # try to load from cache + module_cache_key = (str(module_file), tuple(module_file.stat())) + preloaded = self.preload_cache.get(module_name, {}) + cache_key = preloaded.get("cache_key", ()) + if preloaded and module_cache_key == cache_key: + log.debug(f"Preloading {module_name} from cache") + else: + log.debug(f"Preloading {module_name} from disk") + if module_dir.name == "modules": + namespace = f"bbot.modules" + else: + namespace = f"bbot.modules.{module_dir.name}" + try: + preloaded = self.preload_module(module_file) + module_type = "scan" + if module_dir.name in ("output", "internal"): + module_type = str(module_dir.name) + elif module_dir.name not in ("modules"): + flags = set(preloaded["flags"] + [module_dir.name]) + preloaded["flags"] = sorted(flags) + + # derive module dependencies from watched event types (only for scan modules) + if module_type == "scan": + for event_type in preloaded["watched_events"]: + if event_type in self.default_module_deps: + deps_modules = set(preloaded.get("deps", {}).get("modules", [])) + deps_modules.add(self.default_module_deps[event_type]) + preloaded["deps"]["modules"] = sorted(deps_modules) + + preloaded["type"] = module_type + preloaded["namespace"] = namespace + preloaded["cache_key"] = module_cache_key + + except Exception: + log_to_stderr(f"Error preloading {module_file}\n\n{traceback.format_exc()}", level="CRITICAL") + log_to_stderr(f"Error in {module_file.name}", level="CRITICAL") + sys.exit(1) + + self.all_module_choices.add(module_name) + module_type = preloaded.get("type", "scan") + if module_type == "scan": + self.scan_module_choices.add(module_name) + elif module_type == "output": + self.output_module_choices.add(module_name) + elif module_type == "internal": + self.internal_module_choices.add(module_name) + + flags = preloaded.get("flags", []) + self.flag_choices.update(set(flags)) + + self.__preloaded[module_name] = preloaded config = OmegaConf.create(preloaded.get("config", {})) - self._configs[module_file.stem] = config - self._preloaded[module_file.stem] = preloaded - except Exception: - print(f"[CRIT] Error preloading {module_file}\n\n{traceback.format_exc()}") - print(f"[CRIT] Error in {module_file.name}") - sys.exit(1) + self._configs[module_name] = config - return self._preloaded + self._module_dirs_preloaded.add(module_dir) + + # update default config with module defaults + module_config = omegaconf.OmegaConf.create( + { + "modules": self.configs(), + } + ) + self.core.merge_default(module_config) + + return new_modules + + @property + def preload_cache(self): + if self._preload_cache is None: + self._preload_cache = {} + if self.preload_cache_file.is_file(): + with suppress(Exception): + with open(self.preload_cache_file, "rb") as f: + self._preload_cache = pickle.load(f) + return self._preload_cache + + @preload_cache.setter + def preload_cache(self, value): + self._preload_cache = value + mkdir(self.preload_cache_file.parent) + with open(self.preload_cache_file, "wb") as f: + pickle.dump(self._preload_cache, f) + + def save_preload_cache(self): + self.preload_cache = self.__preloaded + + @property + def _preloaded(self): + return self.__preloaded + + def get_recursive_dirs(self, *dirs): + dirs = set(Path(d).resolve() for d in dirs) + for d in list(dirs): + if not d.is_dir(): + continue + for p in d.iterdir(): + if p.is_dir() and self.module_dir_regex.match(p.name): + dirs.update(self.get_recursive_dirs(p)) + return dirs def preloaded(self, type=None): preloaded = {} @@ -94,9 +253,8 @@ def configs(self, type=None): return OmegaConf.create(configs) def find_and_replace(self, **kwargs): - if self._preloaded_orig is None: - self._preloaded_orig = dict(self._preloaded) - self._preloaded = search_format_dict(self._preloaded_orig, **kwargs) + self.__preloaded = search_format_dict(self.__preloaded, **kwargs) + self._shared_deps = search_format_dict(self._shared_deps, **kwargs) def check_type(self, module, type): return self._preloaded[module]["type"] == type @@ -136,6 +294,9 @@ def preload_module(self, module_file): "options_desc": {}, "hash": "d5a88dd3866c876b81939c920bf4959716e2a374", "deps": { + "modules": [ + "httpx" + ] "pip": [ "python-Wappalyzer~=0.3.1" ], @@ -147,14 +308,16 @@ def preload_module(self, module_file): "sudo": false } """ - watched_events = [] - produced_events = [] - flags = [] + watched_events = set() + produced_events = set() + flags = set() meta = {} - pip_deps = [] - pip_deps_constraints = [] - shell_deps = [] - apt_deps = [] + deps_modules = set() + deps_pip = [] + deps_pip_constraints = [] + deps_shell = [] + deps_apt = [] + deps_common = [] ansible_tasks = [] python_code = open(module_file).read() # take a hash of the code so we can keep track of when it changes @@ -166,84 +329,109 @@ def preload_module(self, module_file): # look for classes if type(root_element) == ast.ClassDef: for class_attr in root_element.body: + # class attributes that are dictionaries if type(class_attr) == ast.Assign and type(class_attr.value) == ast.Dict: # module options if any([target.id == "options" for target in class_attr.targets]): config.update(ast.literal_eval(class_attr.value)) # module options - if any([target.id == "options_desc" for target in class_attr.targets]): + elif any([target.id == "options_desc" for target in class_attr.targets]): options_desc.update(ast.literal_eval(class_attr.value)) # module metadata - if any([target.id == "meta" for target in class_attr.targets]): + elif any([target.id == "meta" for target in class_attr.targets]): meta = ast.literal_eval(class_attr.value) + # class attributes that are lists if type(class_attr) == ast.Assign and type(class_attr.value) == ast.List: # flags if any([target.id == "flags" for target in class_attr.targets]): for flag in class_attr.value.elts: if type(flag.value) == str: - flags.append(flag.value) + flags.add(flag.value) # watched events - if any([target.id == "watched_events" for target in class_attr.targets]): + elif any([target.id == "watched_events" for target in class_attr.targets]): for event_type in class_attr.value.elts: if type(event_type.value) == str: - watched_events.append(event_type.value) + watched_events.add(event_type.value) # produced events - if any([target.id == "produced_events" for target in class_attr.targets]): + elif any([target.id == "produced_events" for target in class_attr.targets]): for event_type in class_attr.value.elts: if type(event_type.value) == str: - produced_events.append(event_type.value) - # python dependencies - if any([target.id == "deps_pip" for target in class_attr.targets]): - for python_dep in class_attr.value.elts: - if type(python_dep.value) == str: - pip_deps.append(python_dep.value) - - if any([target.id == "deps_pip_constraints" for target in class_attr.targets]): - for python_dep in class_attr.value.elts: - if type(python_dep.value) == str: - pip_deps_constraints.append(python_dep.value) + produced_events.add(event_type.value) + # bbot module dependencies + elif any([target.id == "deps_modules" for target in class_attr.targets]): + for dep_module in class_attr.value.elts: + if type(dep_module.value) == str: + deps_modules.add(dep_module.value) + # python dependencies + elif any([target.id == "deps_pip" for target in class_attr.targets]): + for dep_pip in class_attr.value.elts: + if type(dep_pip.value) == str: + deps_pip.append(dep_pip.value) + elif any([target.id == "deps_pip_constraints" for target in class_attr.targets]): + for dep_pip in class_attr.value.elts: + if type(dep_pip.value) == str: + deps_pip_constraints.append(dep_pip.value) # apt dependencies elif any([target.id == "deps_apt" for target in class_attr.targets]): - for apt_dep in class_attr.value.elts: - if type(apt_dep.value) == str: - apt_deps.append(apt_dep.value) + for dep_apt in class_attr.value.elts: + if type(dep_apt.value) == str: + deps_apt.append(dep_apt.value) # bash dependencies elif any([target.id == "deps_shell" for target in class_attr.targets]): - for shell_dep in class_attr.value.elts: - shell_deps.append(ast.literal_eval(shell_dep)) + for dep_shell in class_attr.value.elts: + deps_shell.append(ast.literal_eval(dep_shell)) # ansible playbook elif any([target.id == "deps_ansible" for target in class_attr.targets]): ansible_tasks = ast.literal_eval(class_attr.value) + # shared/common module dependencies + elif any([target.id == "deps_common" for target in class_attr.targets]): + for dep_common in class_attr.value.elts: + if type(dep_common.value) == str: + deps_common.append(dep_common.value) + for task in ansible_tasks: if not "become" in task: task["become"] = False # don't sudo brew elif os_platform() == "darwin" and ("package" in task and task.get("become", False) == True): task["become"] = False + preloaded_data = { - "watched_events": watched_events, - "produced_events": produced_events, - "flags": flags, + "watched_events": sorted(watched_events), + "produced_events": sorted(produced_events), + "flags": sorted(flags), "meta": meta, "config": config, "options_desc": options_desc, "hash": module_hash, "deps": { - "pip": pip_deps, - "pip_constraints": pip_deps_constraints, - "shell": shell_deps, - "apt": apt_deps, + "modules": sorted(deps_modules), + "pip": deps_pip, + "pip_constraints": deps_pip_constraints, + "shell": deps_shell, + "apt": deps_apt, "ansible": ansible_tasks, + "common": deps_common, }, - "sudo": len(apt_deps) > 0, + "sudo": len(deps_apt) > 0, } - if any(x == True for x in search_dict_by_key("become", ansible_tasks)) or any( - x == True for x in search_dict_by_key("ansible_become", ansible_tasks) - ): - preloaded_data["sudo"] = True + ansible_task_list = list(ansible_tasks) + for dep_common in deps_common: + try: + ansible_task_list.extend(self._shared_deps[dep_common]) + except KeyError: + common_choices = ",".join(self._shared_deps) + raise BBOTError( + f'Error while preloading module "{module_file}": No shared dependency named "{dep_common}" (choices: {common_choices})' + ) + for ansible_task in ansible_task_list: + if any(x == True for x in search_dict_by_key("become", ansible_task)) or any( + x == True for x in search_dict_by_key("ansible_become", ansible_tasks) + ): + preloaded_data["sudo"] = True return preloaded_data def load_modules(self, module_names): @@ -413,14 +601,10 @@ def modules_options(self, modules=None, mod_type=None): modules_options = {} for module_name, preloaded in self.filter_modules(modules, mod_type): modules_options[module_name] = [] - module_type = preloaded["type"] module_options = preloaded["config"] module_options_desc = preloaded["options_desc"] for k, v in sorted(module_options.items(), key=lambda x: x[0]): - module_key = "modules" - if module_type in ("internal", "output"): - module_key = f"{module_type}_modules" - option_name = f"{module_key}.{module_name}.{k}" + option_name = f"modules.{module_name}.{k}" option_type = type(v).__name__ option_description = module_options_desc[k] modules_options[module_name].append((option_name, option_type, option_description, str(v))) @@ -497,4 +681,4 @@ def filter_modules(self, modules=None, mod_type=None): return module_list -module_loader = ModuleLoader() +MODULE_LOADER = ModuleLoader() diff --git a/bbot/core/shared_deps.py b/bbot/core/shared_deps.py new file mode 100644 index 000000000..751117752 --- /dev/null +++ b/bbot/core/shared_deps.py @@ -0,0 +1,119 @@ +DEP_FFUF = [ + { + "name": "Download ffuf", + "unarchive": { + "src": "https://github.com/ffuf/ffuf/releases/download/v#{BBOT_DEPS_FFUF_VERSION}/ffuf_#{BBOT_DEPS_FFUF_VERSION}_#{BBOT_OS}_#{BBOT_CPU_ARCH}.tar.gz", + "include": "ffuf", + "dest": "#{BBOT_TOOLS}", + "remote_src": True, + }, + } +] + +DEP_DOCKER = [ + { + "name": "Check if Docker is already installed", + "command": "docker --version", + "register": "docker_installed", + "ignore_errors": True, + }, + { + "name": "Install Docker (Non-Debian)", + "package": {"name": "docker", "state": "present"}, + "become": True, + "when": "ansible_facts['os_family'] != 'Debian' and docker_installed.rc != 0", + }, + { + "name": "Install Docker (Debian)", + "package": { + "name": "docker.io", + "state": "present", + }, + "become": True, + "when": "ansible_facts['os_family'] == 'Debian' and docker_installed.rc != 0", + }, +] + +DEP_MASSDNS = [ + { + "name": "install dev tools", + "package": {"name": ["gcc", "git", "make"], "state": "present"}, + "become": True, + "ignore_errors": True, + }, + { + "name": "Download massdns source code", + "git": { + "repo": "https://github.com/blechschmidt/massdns.git", + "dest": "#{BBOT_TEMP}/massdns", + "single_branch": True, + "version": "master", + }, + }, + { + "name": "Build massdns (Linux)", + "command": {"chdir": "#{BBOT_TEMP}/massdns", "cmd": "make", "creates": "#{BBOT_TEMP}/massdns/bin/massdns"}, + "when": "ansible_facts['system'] == 'Linux'", + }, + { + "name": "Build massdns (non-Linux)", + "command": { + "chdir": "#{BBOT_TEMP}/massdns", + "cmd": "make nolinux", + "creates": "#{BBOT_TEMP}/massdns/bin/massdns", + }, + "when": "ansible_facts['system'] != 'Linux'", + }, + { + "name": "Install massdns", + "copy": {"src": "#{BBOT_TEMP}/massdns/bin/massdns", "dest": "#{BBOT_TOOLS}/", "mode": "u+x,g+x,o+x"}, + }, +] + +DEP_CHROMIUM = [ + { + "name": "Install Chromium (Non-Debian)", + "package": {"name": "chromium", "state": "present"}, + "become": True, + "when": "ansible_facts['os_family'] != 'Debian'", + "ignore_errors": True, + }, + { + "name": "Install Chromium dependencies (Debian)", + "package": { + "name": "libasound2,libatk-bridge2.0-0,libatk1.0-0,libcairo2,libcups2,libdrm2,libgbm1,libnss3,libpango-1.0-0,libxcomposite1,libxdamage1,libxfixes3,libxkbcommon0,libxrandr2", + "state": "present", + }, + "become": True, + "when": "ansible_facts['os_family'] == 'Debian'", + "ignore_errors": True, + }, + { + "name": "Get latest Chromium version (Debian)", + "uri": { + "url": "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/Linux_x64%2FLAST_CHANGE?alt=media", + "return_content": True, + }, + "register": "chromium_version", + "when": "ansible_facts['os_family'] == 'Debian'", + "ignore_errors": True, + }, + { + "name": "Download Chromium (Debian)", + "unarchive": { + "src": "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/Linux_x64%2F{{ chromium_version.content }}%2Fchrome-linux.zip?alt=media", + "remote_src": True, + "dest": "#{BBOT_TOOLS}", + "creates": "#{BBOT_TOOLS}/chrome-linux", + }, + "when": "ansible_facts['os_family'] == 'Debian'", + "ignore_errors": True, + }, +] + +# shared module dependencies -- ffuf, massdns, chromium, etc. +SHARED_DEPS = {} +for var, val in list(locals().items()): + if var.startswith("DEP_") and isinstance(val, list): + var = var.split("_", 1)[-1].lower() + SHARED_DEPS[var] = val diff --git a/bbot/defaults.yml b/bbot/defaults.yml index 1e5a1a080..4b9b5210d 100644 --- a/bbot/defaults.yml +++ b/bbot/defaults.yml @@ -8,8 +8,6 @@ home: ~/.bbot scope_report_distance: 0 # Generate new DNS_NAME and IP_ADDRESS events through DNS resolution dns_resolution: true -# Limit the number of BBOT threads -max_threads: 25 # Rate-limit DNS dns_queries_per_second: 1000 # Rate-limit HTTP @@ -21,6 +19,11 @@ http_proxy: # Web user-agent user_agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.2151.97 +# Tool dependencies +deps: + ffuf: + version: "2.1.0" + ### WEB SPIDER ### # Set the maximum number of HTTP links that can be followed in a row (0 == no spidering allowed) @@ -33,6 +36,8 @@ web_spider_links_per_page: 25 ### ADVANCED OPTIONS ### +module_paths: [] + # How far out from the main scope to search scope_search_distance: 0 # How far out from the main scope to resolve DNS names / IPs @@ -40,12 +45,27 @@ scope_dns_search_distance: 1 # Limit how many DNS records can be followed in a row (stop malicious/runaway DNS records) dns_resolve_distance: 5 +# Limit the number of scan manager workers +manager_tasks: 5 + # Infer certain events from others, e.g. IPs from IP ranges, DNS_NAMEs from URLs, etc. speculate: True # Passively search event data for URLs, hostnames, emails, etc. excavate: True # Summarize activity at the end of a scan aggregate: True +# DNS resolution +dnsresolve: True +# Cloud provider tagging +cloudcheck: True + +# How to handle installation of module dependencies +# Choices are: +# - abort_on_failure (default) - if a module dependency fails to install, abort the scan +# - retry_failed - try again to install failed dependencies +# - ignore_failed - run the scan regardless of what happens with dependency installation +# - disable - completely disable BBOT's dependency system (you are responsible for install tools, pip packages, etc.) +deps_behavior: abort_on_failure # HTTP timeout (for Python requests; API calls, etc.) http_timeout: 10 @@ -64,11 +84,14 @@ httpx_retries: 1 http_debug: false # Maximum number of HTTP redirects to follow http_max_redirects: 5 + # DNS query timeout dns_timeout: 5 # How many times to retry DNS queries dns_retries: 1 -# Disable BBOT's smart DNS wildcard handling for select domains +# Completely disable BBOT's DNS wildcard detection +dns_wildcard_disable: False +# Disable BBOT's DNS wildcard detection for select domains dns_wildcard_ignore: [] # How many sanity checks to make when verifying wildcard DNS # Increase this value if BBOT's wildcard detection isn't working @@ -80,6 +103,7 @@ dns_abort_threshold: 50 dns_filter_ptrs: true # Enable/disable debug messages for dns queries dns_debug: false + # Whether to verify SSL certificates ssl_verify: false # How many scan results to keep before cleaning up the older ones @@ -124,10 +148,6 @@ omit_event_types: - URL_UNVERIFIED - DNS_NAME_UNRESOLVED # - IP_ADDRESS -# URL of BBOT server -agent_url: '' -# Agent Bearer authentication token -agent_token: '' # Custom interactsh server settings interactsh_server: null diff --git a/bbot/core/errors.py b/bbot/errors.py similarity index 65% rename from bbot/core/errors.py rename to bbot/errors.py index 5e5f57aeb..e50e581cd 100644 --- a/bbot/core/errors.py +++ b/bbot/errors.py @@ -1,6 +1,3 @@ -from httpx import HTTPError, RequestError # noqa - - class BBOTError(Exception): pass @@ -51,3 +48,27 @@ class DNSWildcardBreak(DNSError): class CurlError(BBOTError): pass + + +class PresetNotFoundError(BBOTError): + pass + + +class EnableModuleError(BBOTError): + pass + + +class EnableFlagError(BBOTError): + pass + + +class BBOTArgumentError(BBOTError): + pass + + +class PresetConditionError(BBOTError): + pass + + +class PresetAbortError(PresetConditionError): + pass diff --git a/bbot/core/helpers/logger.py b/bbot/logger.py similarity index 100% rename from bbot/core/helpers/logger.py rename to bbot/logger.py diff --git a/bbot/modules/__init__.py b/bbot/modules/__init__.py index 6062b0170..e69de29bb 100644 --- a/bbot/modules/__init__.py +++ b/bbot/modules/__init__.py @@ -1,14 +0,0 @@ -import re -from pathlib import Path -from bbot.core.helpers.modules import module_loader - -dir_regex = re.compile(r"^[a-z][a-z0-9_]*$") - -parent_dir = Path(__file__).parent.resolve() -module_dirs = set([parent_dir]) -for e in parent_dir.iterdir(): - if e.is_dir() and dir_regex.match(e.name) and not e.name == "modules": - module_dirs.add(e) - -for d in module_dirs: - module_loader.preload(d) diff --git a/bbot/modules/ajaxpro.py b/bbot/modules/ajaxpro.py index 46d475cca..ba3e0eb3e 100644 --- a/bbot/modules/ajaxpro.py +++ b/bbot/modules/ajaxpro.py @@ -1,4 +1,4 @@ -import re +import regex as re from bbot.modules.base import BaseModule @@ -38,7 +38,7 @@ async def handle_event(self, event): elif event.type == "HTTP_RESPONSE": resp_body = event.data.get("body", None) if resp_body: - ajaxpro_regex_result = self.ajaxpro_regex.search(resp_body) + ajaxpro_regex_result = await self.helpers.re.search(self.ajaxpro_regex, resp_body) if ajaxpro_regex_result: ajax_pro_path = ajaxpro_regex_result.group(0) await self.emit_event( diff --git a/bbot/modules/anubisdb.py b/bbot/modules/anubisdb.py index 9864e3c6d..bf4c88e93 100644 --- a/bbot/modules/anubisdb.py +++ b/bbot/modules/anubisdb.py @@ -30,7 +30,7 @@ def abort_if_pre(self, hostname): async def abort_if(self, event): # abort if dns name is unresolved - if not "resolved" in event.tags: + if event.type == "DNS_NAME_UNRESOLVED": return True, "DNS name is unresolved" return await super().abort_if(event) diff --git a/bbot/modules/azure_realm.py b/bbot/modules/azure_realm.py index 33772921e..a3d6ad6ba 100644 --- a/bbot/modules/azure_realm.py +++ b/bbot/modules/azure_realm.py @@ -4,7 +4,7 @@ class azure_realm(BaseModule): watched_events = ["DNS_NAME"] produced_events = ["URL_UNVERIFIED"] - flags = ["affiliates", "subdomain-enum", "cloud-enum", "web-basic", "web-thorough", "passive", "safe"] + flags = ["affiliates", "subdomain-enum", "cloud-enum", "web-basic", "passive", "safe"] meta = {"description": 'Retrieves the "AuthURL" from login.microsoftonline.com/getuserrealm'} async def setup(self): diff --git a/bbot/modules/azure_tenant.py b/bbot/modules/azure_tenant.py index 909acbe20..a15bbb68f 100644 --- a/bbot/modules/azure_tenant.py +++ b/bbot/modules/azure_tenant.py @@ -1,4 +1,4 @@ -import re +import regex as re from contextlib import suppress from bbot.modules.base import BaseModule @@ -25,7 +25,7 @@ async def handle_event(self, event): tenant_id = None authorization_endpoint = openid_config.get("authorization_endpoint", "") - matches = self.helpers.regexes.uuid_regex.findall(authorization_endpoint) + matches = await self.helpers.re.findall(self.helpers.regexes.uuid_regex, authorization_endpoint) if matches: tenant_id = matches[0] @@ -86,7 +86,7 @@ async def query(self, domain): if status_code not in (200, 421): self.verbose(f'Error retrieving azure_tenant domains for "{domain}" (status code: {status_code})') return set(), dict() - found_domains = list(set(self.d_xml_regex.findall(r.text))) + found_domains = list(set(await self.helpers.re.findall(self.d_xml_regex, r.text))) domains = set() for d in found_domains: diff --git a/bbot/modules/baddns.py b/bbot/modules/baddns.py index 9abfebc84..992ae5c0d 100644 --- a/bbot/modules/baddns.py +++ b/bbot/modules/baddns.py @@ -4,9 +4,6 @@ import asyncio import logging -from bbot.core.logger.logger import include_logger - -include_logger(logging.getLogger("baddns")) class baddns(BaseModule): @@ -30,6 +27,7 @@ def select_modules(self): return selected_modules async def setup(self): + self.preset.core.logger.include_logger(logging.getLogger("baddns")) self.custom_nameservers = self.config.get("custom_nameservers", []) or None if self.custom_nameservers: self.custom_nameservers = self.helpers.chain_lists(self.custom_nameservers) diff --git a/bbot/modules/baddns_zone.py b/bbot/modules/baddns_zone.py index a42fe2e21..ac0fc3c57 100644 --- a/bbot/modules/baddns_zone.py +++ b/bbot/modules/baddns_zone.py @@ -1,11 +1,6 @@ from baddns.base import get_all_modules from .baddns import baddns as baddns_module -import logging -from bbot.core.logger.logger import include_logger - -include_logger(logging.getLogger("baddns_zone")) - class baddns_zone(baddns_module): watched_events = ["DNS_NAME"] diff --git a/bbot/modules/badsecrets.py b/bbot/modules/badsecrets.py index 7fde4a8e3..01cc36ed8 100644 --- a/bbot/modules/badsecrets.py +++ b/bbot/modules/badsecrets.py @@ -8,7 +8,7 @@ class badsecrets(BaseModule): watched_events = ["HTTP_RESPONSE"] produced_events = ["FINDING", "VULNERABILITY", "TECHNOLOGY"] - flags = ["active", "safe", "web-basic", "web-thorough"] + flags = ["active", "safe", "web-basic"] meta = {"description": "Library for detecting known or weak secrets across many web frameworks"} deps_pip = ["badsecrets~=0.4.490"] @@ -33,7 +33,7 @@ async def handle_event(self, event): resp_cookies[c2[0]] = c2[1] if resp_body or resp_cookies: try: - r_list = await self.scan.run_in_executor_mp( + r_list = await self.helpers.run_in_executor_mp( carve_all_modules, body=resp_body, headers=resp_headers, diff --git a/bbot/modules/base.py b/bbot/modules/base.py index c0bf6d63b..c102b138d 100644 --- a/bbot/modules/base.py +++ b/bbot/modules/base.py @@ -4,8 +4,8 @@ from sys import exc_info from contextlib import suppress +from ..errors import ValidationError from ..core.helpers.misc import get_size # noqa -from ..core.errors import ValidationError from ..core.helpers.async_helpers import TaskCounter, ShuffleQueue @@ -21,6 +21,8 @@ class BaseModule: flags (List): Flags indicating the type of module (must have at least "safe" or "aggressive" and "passive" or "active"). + deps_modules (List): Other BBOT modules this module depends on. Empty list by default. + deps_pip (List): Python dependencies to install via pip. Empty list by default. deps_apt (List): APT package dependencies to install. Empty list by default. @@ -83,6 +85,7 @@ class BaseModule: options = {} options_desc = {} + deps_modules = [] deps_pip = [] deps_apt = [] deps_shell = [] @@ -108,6 +111,7 @@ class BaseModule: _priority = 3 _name = "base" _type = "scan" + _intercept = False def __init__(self, scan): """Initializes a module instance. @@ -391,8 +395,7 @@ async def _handle_batch(self): self.verbose(f"Handling batch of {len(events):,} events") submitted = True async with self.scan._acatch(f"{self.name}.handle_batch()"): - handle_batch_task = asyncio.create_task(self.handle_batch(*events)) - await handle_batch_task + await self.handle_batch(*events) self.verbose(f"Finished handling batch of {len(events):,} events") if finish: context = f"{self.name}.finish()" @@ -471,7 +474,7 @@ async def emit_event(self, *args, **kwargs): if event: await self.queue_outgoing_event(event, **emit_kwargs) - async def _events_waiting(self): + async def _events_waiting(self, batch_size=None): """ Asynchronously fetches events from the incoming_event_queue, up to a specified batch size. @@ -489,10 +492,12 @@ async def _events_waiting(self): - "FINISHED" events are handled differently and the finish flag is set to True. - If the queue is empty or the batch size is reached, the loop breaks. """ + if batch_size is None: + batch_size = self.batch_size events = [] finish = False while self.incoming_event_queue: - if len(events) > self.batch_size: + if batch_size != -1 and len(events) > self.batch_size: break try: event = self.incoming_event_queue.get_nowait() @@ -549,8 +554,7 @@ async def _setup(self): status = False self.debug(f"Setting up module {self.name}") try: - setup_task = asyncio.create_task(self.setup()) - result = await setup_task + result = await self.setup() if type(result) == tuple and len(result) == 2: status, msg = result else: @@ -558,10 +562,10 @@ async def _setup(self): msg = status_codes[status] self.debug(f"Finished setting up module {self.name}") except Exception as e: - self.set_error_state() + self.set_error_state(f"Unexpected error during module setup: {e}", critical=True) msg = f"{e}" self.trace() - return self.name, status, str(msg) + return self, status, str(msg) async def _worker(self): """ @@ -587,7 +591,7 @@ async def _worker(self): - Each event is subject to a post-check via '_event_postcheck()' to decide whether it should be handled. - Special 'FINISHED' events trigger the 'finish()' method of the module. """ - async with self.scan._acatch(context=self._worker): + async with self.scan._acatch(context=self._worker, unhandled_is_critical=True): try: while not self.scan.stopping and not self.errored: # hold the reigns if our outgoing queue is full @@ -617,16 +621,13 @@ async def _worker(self): if event.type == "FINISHED": context = f"{self.name}.finish()" async with self.scan._acatch(context), self._task_counter.count(context): - finish_task = asyncio.create_task(self.finish()) - await finish_task + await self.finish() else: context = f"{self.name}.handle_event({event})" self.scan.stats.event_consumed(event, self) self.debug(f"Handling {event}") async with self.scan._acatch(context), self._task_counter.count(context): - task_name = f"{self.name}.handle_event({event})" - handle_event_task = asyncio.create_task(self.handle_event(event), name=task_name) - await handle_event_task + await self.handle_event(event) self.debug(f"Finished handling {event}") else: self.debug(f"Not accepting {event} because {reason}") @@ -639,6 +640,8 @@ async def _worker(self): def max_scope_distance(self): if self.in_scope_only or self.target_only: return 0 + if self.scope_distance_modifier is None: + return 999 return max(0, self.scan.scope_search_distance + self.scope_distance_modifier) def _event_precheck(self, event): @@ -682,6 +685,7 @@ def _event_precheck(self, event): if "target" not in event.tags: return False, "it did not meet target_only filter criteria" # exclude certain URLs (e.g. javascript): + # TODO: revisit this after httpx rework if event.type.startswith("URL") and self.name != "httpx" and "httpx-only" in event.tags: return False, "its extension was listed in url_extension_httpx_only" @@ -691,7 +695,10 @@ async def _event_postcheck(self, event): """ A simple wrapper for dup tracking """ - acceptable, reason = await self.__event_postcheck(event) + # special exception for "FINISHED" event + if event.type in ("FINISHED",): + return True, "" + acceptable, reason = await self._event_postcheck_inner(event) if acceptable: # check duplicates is_incoming_duplicate, reason = self.is_incoming_duplicate(event, add=True) @@ -700,7 +707,7 @@ async def _event_postcheck(self, event): return acceptable, reason - async def __event_postcheck(self, event): + async def _event_postcheck_inner(self, event): """ Post-checks an event to determine if it should be accepted by the module for handling. @@ -718,10 +725,6 @@ async def __event_postcheck(self, event): - This method also maintains host-based tracking when the `per_host_only` or similar flags are set. - The method will also update event production stats for output modules. """ - # special exception for "FINISHED" event - if event.type in ("FINISHED",): - return True, "" - # force-output certain events to the graph if self._is_graph_important(event): return True, "event is critical to the graph" @@ -774,7 +777,7 @@ async def _cleanup(self): async with self.scan._acatch(context), self._task_counter.count(context): await self.helpers.execute_sync_or_async(callback) - async def queue_event(self, event, precheck=True): + async def queue_event(self, event): """ Asynchronously queues an incoming event to the module's event queue for further processing. @@ -797,9 +800,7 @@ async def queue_event(self, event, precheck=True): if self.incoming_event_queue is False: self.debug(f"Not in an acceptable state to queue incoming event") return - acceptable, reason = True, "precheck was skipped" - if precheck: - acceptable, reason = self._event_precheck(event) + acceptable, reason = self._event_precheck(event) if not acceptable: if reason and reason != "its type is not in watched_events": self.debug(f"Not queueing {event} because {reason}") @@ -811,7 +812,7 @@ async def queue_event(self, event, precheck=True): async with self._event_received: self._event_received.notify() if event.type != "FINISHED": - self.scan.manager._new_activity = True + self.scan._new_activity = True except AttributeError: self.debug(f"Not in an acceptable state to queue incoming event") @@ -840,7 +841,7 @@ async def queue_outgoing_event(self, event, **kwargs): except AttributeError: self.debug(f"Not in an acceptable state to queue outgoing event") - def set_error_state(self, message=None, clear_outgoing_queue=False): + def set_error_state(self, message=None, clear_outgoing_queue=False, critical=False): """ Puts the module into an errored state where it cannot accept new events. Optionally logs a warning message. @@ -865,7 +866,11 @@ def set_error_state(self, message=None, clear_outgoing_queue=False): log_msg = "Setting error state" if message is not None: log_msg += f": {message}" - self.warning(log_msg) + if critical: + log_fn = self.error + else: + log_fn = self.warning + log_fn(log_msg) self.errored = True # clear incoming queue if self.incoming_event_queue is not False: @@ -1068,6 +1073,10 @@ async def request_with_fail_count(self, *args, **kwargs): self.set_error_state(f"Setting error state due to {self._request_failures:,} failed HTTP requests") return r + @property + def preset(self): + return self.scan.preset + @property def config(self): """Property that provides easy access to the module's configuration in the scan's config. @@ -1391,3 +1400,119 @@ def critical(self, *args, trace=True, **kwargs): self.log.critical(*args, extra={"scan_id": self.scan.id}, **kwargs) if trace: self.trace() + + +class InterceptModule(BaseModule): + """ + An Intercept Module is a special type of high-priority module that gets early access to events. + + If you want your module to tag or modify an event before it's distributed to the scan, it should + probably be an intercept module. + + Examples of intercept modules include `dns` (for DNS resolution and wildcard detection) + and `cloud` (for detection and tagging of cloud assets). + """ + + accept_dupes = True + suppress_dupes = False + _intercept = True + + async def _worker(self): + async with self.scan._acatch(context=self._worker, unhandled_is_critical=True): + try: + while not self.scan.stopping and not self.errored: + try: + if self.incoming_event_queue is not False: + incoming = await self.get_incoming_event() + try: + event, kwargs = incoming + except ValueError: + event = incoming + kwargs = {} + else: + self.debug(f"Event queue is in bad state") + break + except asyncio.queues.QueueEmpty: + await asyncio.sleep(0.1) + continue + + if event.type == "FINISHED": + context = f"{self.name}.finish()" + async with self.scan._acatch(context), self._task_counter.count(context): + await self.finish() + continue + + self.debug(f"Got {event} from {getattr(event, 'module', 'unknown_module')}") + + acceptable = True + async with self._task_counter.count(f"event_precheck({event})"): + precheck_pass, reason = self._event_precheck(event) + if not precheck_pass: + self.debug(f"Not hooking {event} because precheck failed ({reason})") + acceptable = False + async with self._task_counter.count(f"event_postcheck({event})"): + postcheck_pass, reason = await self._event_postcheck(event) + if not postcheck_pass: + self.debug(f"Not hooking {event} because postcheck failed ({reason})") + acceptable = False + + # whether to pass the event on to the rest of the scan + # defaults to true, unless handle_event returns False + forward_event = True + forward_event_reason = "" + + if acceptable: + context = f"{self.name}.handle_event({event, kwargs})" + self.scan.stats.event_consumed(event, self) + self.debug(f"Hooking {event}") + async with self.scan._acatch(context), self._task_counter.count(context): + forward_event = await self.handle_event(event, kwargs) + with suppress(ValueError, TypeError): + forward_event, forward_event_reason = forward_event + + self.debug(f"Finished hooking {event}") + + if forward_event is False: + self.debug(f"Not forwarding {event} because {forward_event_reason}") + continue + + await self.forward_event(event, kwargs) + + except asyncio.CancelledError: + self.log.trace("Worker cancelled") + raise + self.log.trace(f"Worker stopped") + + async def get_incoming_event(self): + """ + Get an event from this module's incoming event queue + """ + return await self.incoming_event_queue.get() + + async def forward_event(self, event, kwargs): + """ + Used for forwarding the event on to the next intercept module + """ + await self.outgoing_event_queue.put((event, kwargs)) + + async def queue_outgoing_event(self, event, **kwargs): + """ + Used by emit_event() to raise new events to the scan + """ + # if this was a normal module, we'd put it in the outgoing queue + # but because it's a intercept module, we need to queue it with the first intercept module + await self.scan.ingress_module.queue_event(event, kwargs) + + async def queue_event(self, event, kwargs=None): + """ + Put an event in this module's incoming event queue + """ + if kwargs is None: + kwargs = {} + try: + self.incoming_event_queue.put_nowait((event, kwargs)) + except AttributeError: + self.debug(f"Not in an acceptable state to queue incoming event") + + async def _event_postcheck(self, event): + return await self._event_postcheck_inner(event) diff --git a/bbot/modules/bevigil.py b/bbot/modules/bevigil.py index 435ceae08..bbf339b08 100644 --- a/bbot/modules/bevigil.py +++ b/bbot/modules/bevigil.py @@ -34,7 +34,7 @@ async def handle_event(self, event): if self.urls: urls = await self.query(query, request_fn=self.request_urls, parse_fn=self.parse_urls) if urls: - for parsed_url in await self.scan.run_in_executor_mp(self.helpers.validators.collapse_urls, urls): + for parsed_url in await self.helpers.run_in_executor_mp(self.helpers.validators.collapse_urls, urls): await self.emit_event(parsed_url.geturl(), "URL_UNVERIFIED", source=event) async def request_subdomains(self, query): diff --git a/bbot/modules/bucket_amazon.py b/bbot/modules/bucket_amazon.py index ddb6d05f3..3e17b186a 100644 --- a/bbot/modules/bucket_amazon.py +++ b/bbot/modules/bucket_amazon.py @@ -4,7 +4,7 @@ class bucket_amazon(bucket_template): watched_events = ["DNS_NAME", "STORAGE_BUCKET"] produced_events = ["STORAGE_BUCKET", "FINDING"] - flags = ["active", "safe", "cloud-enum", "web-basic", "web-thorough"] + flags = ["active", "safe", "cloud-enum", "web-basic"] meta = {"description": "Check for S3 buckets related to target"} options = {"permutations": False} options_desc = { diff --git a/bbot/modules/bucket_azure.py b/bbot/modules/bucket_azure.py index 81e441b5f..6c828afed 100644 --- a/bbot/modules/bucket_azure.py +++ b/bbot/modules/bucket_azure.py @@ -4,7 +4,7 @@ class bucket_azure(bucket_template): watched_events = ["DNS_NAME", "STORAGE_BUCKET"] produced_events = ["STORAGE_BUCKET", "FINDING"] - flags = ["active", "safe", "cloud-enum", "web-basic", "web-thorough"] + flags = ["active", "safe", "cloud-enum", "web-basic"] meta = {"description": "Check for Azure storage blobs related to target"} options = {"permutations": False} options_desc = { diff --git a/bbot/modules/bucket_firebase.py b/bbot/modules/bucket_firebase.py index 9c883bc9c..01b1fc213 100644 --- a/bbot/modules/bucket_firebase.py +++ b/bbot/modules/bucket_firebase.py @@ -4,7 +4,7 @@ class bucket_firebase(bucket_template): watched_events = ["DNS_NAME", "STORAGE_BUCKET"] produced_events = ["STORAGE_BUCKET", "FINDING"] - flags = ["active", "safe", "cloud-enum", "web-basic", "web-thorough"] + flags = ["active", "safe", "cloud-enum", "web-basic"] meta = {"description": "Check for open Firebase databases related to target"} options = {"permutations": False} options_desc = { diff --git a/bbot/modules/bucket_google.py b/bbot/modules/bucket_google.py index 0bd22f0f1..9e63ddc8b 100644 --- a/bbot/modules/bucket_google.py +++ b/bbot/modules/bucket_google.py @@ -8,7 +8,7 @@ class bucket_google(bucket_template): watched_events = ["DNS_NAME", "STORAGE_BUCKET"] produced_events = ["STORAGE_BUCKET", "FINDING"] - flags = ["active", "safe", "cloud-enum", "web-basic", "web-thorough"] + flags = ["active", "safe", "cloud-enum", "web-basic"] meta = {"description": "Check for Google object storage related to target"} options = {"permutations": False} options_desc = { diff --git a/bbot/modules/bypass403.py b/bbot/modules/bypass403.py index c58463401..0ce3df899 100644 --- a/bbot/modules/bypass403.py +++ b/bbot/modules/bypass403.py @@ -1,5 +1,5 @@ +from bbot.errors import HttpCompareError from bbot.modules.base import BaseModule -from bbot.core.errors import HttpCompareError """ Port of https://github.com/iamj0ker/bypass-403/ and https://portswigger.net/bappstore/444407b96d9c4de0adb7aed89e826122 diff --git a/bbot/modules/deadly/dastardly.py b/bbot/modules/deadly/dastardly.py index c419f67d9..837b4a2c2 100644 --- a/bbot/modules/deadly/dastardly.py +++ b/bbot/modules/deadly/dastardly.py @@ -9,29 +9,7 @@ class dastardly(BaseModule): meta = {"description": "Lightweight web application security scanner"} deps_pip = ["lxml~=4.9.2"] - deps_ansible = [ - { - "name": "Check if Docker is already installed", - "command": "docker --version", - "register": "docker_installed", - "ignore_errors": True, - }, - { - "name": "Install Docker (Non-Debian)", - "package": {"name": "docker", "state": "present"}, - "become": True, - "when": "ansible_facts['os_family'] != 'Debian' and docker_installed.rc != 0", - }, - { - "name": "Install Docker (Debian)", - "package": { - "name": "docker.io", - "state": "present", - }, - "become": True, - "when": "ansible_facts['os_family'] == 'Debian' and docker_installed.rc != 0", - }, - ] + deps_common = ["docker"] per_hostport_only = True async def setup(self): @@ -107,7 +85,9 @@ def parse_dastardly_xml(self, xml_file): et = etree.parse(f) for testsuite in et.iter("testsuite"): yield TestSuite(testsuite) - except Exception as e: + except FileNotFoundError: + pass + except etree.ParseError as e: self.warning(f"Error parsing Dastardly XML at {xml_file}: {e}") diff --git a/bbot/modules/deadly/ffuf.py b/bbot/modules/deadly/ffuf.py index 8382f1e66..a56c73506 100644 --- a/bbot/modules/deadly/ffuf.py +++ b/bbot/modules/deadly/ffuf.py @@ -17,7 +17,6 @@ class ffuf(BaseModule): "wordlist": "https://raw.githubusercontent.com/danielmiessler/SecLists/master/Discovery/Web-Content/raft-small-directories.txt", "lines": 5000, "max_depth": 0, - "version": "2.0.0", "extensions": "", } @@ -25,21 +24,10 @@ class ffuf(BaseModule): "wordlist": "Specify wordlist to use when finding directories", "lines": "take only the first N lines from the wordlist when finding directories", "max_depth": "the maximum directory depth to attempt to solve", - "version": "ffuf version", "extensions": "Optionally include a list of extensions to extend the keyword with (comma separated)", } - deps_ansible = [ - { - "name": "Download ffuf", - "unarchive": { - "src": "https://github.com/ffuf/ffuf/releases/download/v#{BBOT_MODULES_FFUF_VERSION}/ffuf_#{BBOT_MODULES_FFUF_VERSION}_#{BBOT_OS}_#{BBOT_CPU_ARCH}.tar.gz", - "include": "ffuf", - "dest": "#{BBOT_TOOLS}", - "remote_src": True, - }, - } - ] + deps_common = ["ffuf"] banned_characters = [" "] diff --git a/bbot/modules/deadly/nuclei.py b/bbot/modules/deadly/nuclei.py index e11ef5af5..9234f7494 100644 --- a/bbot/modules/deadly/nuclei.py +++ b/bbot/modules/deadly/nuclei.py @@ -338,13 +338,15 @@ def get_yaml_request_attr(self, yf, attr): raw = r.get("raw") if not raw: res = r.get(attr) - yield res + if res is not None: + yield res def get_yaml_info_attr(self, yf, attr): p = self.parse_yaml(yf) info = p.get("info", []) res = info.get(attr) - yield res + if res is not None: + yield res # Parse through all templates and locate those which match the conditions necessary to collapse down to the budget setting def find_collapsible_templates(self): diff --git a/bbot/modules/deadly/vhost.py b/bbot/modules/deadly/vhost.py index e2908dbbe..cf7be1f67 100644 --- a/bbot/modules/deadly/vhost.py +++ b/bbot/modules/deadly/vhost.py @@ -22,17 +22,7 @@ class vhost(ffuf): "lines": "take only the first N lines from the wordlist when finding directories", } - deps_ansible = [ - { - "name": "Download ffuf", - "unarchive": { - "src": "https://github.com/ffuf/ffuf/releases/download/v#{BBOT_MODULES_FFUF_VERSION}/ffuf_#{BBOT_MODULES_FFUF_VERSION}_#{BBOT_OS}_#{BBOT_CPU_ARCH}.tar.gz", - "include": "ffuf", - "dest": "#{BBOT_TOOLS}", - "remote_src": True, - }, - } - ] + deps_common = ["ffuf"] in_scope_only = True diff --git a/bbot/modules/dehashed.py b/bbot/modules/dehashed.py index c1a35c419..caa5fb662 100644 --- a/bbot/modules/dehashed.py +++ b/bbot/modules/dehashed.py @@ -33,7 +33,7 @@ async def handle_event(self, event): for entry in entries: # we have to clean up the email field because dehashed does a poor job of it email_str = entry.get("email", "").replace("\\", "") - found_emails = list(self.helpers.extract_emails(email_str)) + found_emails = list(await self.helpers.re.extract_emails(email_str)) if not found_emails: self.debug(f"Invalid email from dehashed.com: {email_str}") continue diff --git a/bbot/modules/dockerhub.py b/bbot/modules/dockerhub.py index 1cf8aafa5..0427d6f22 100644 --- a/bbot/modules/dockerhub.py +++ b/bbot/modules/dockerhub.py @@ -4,7 +4,7 @@ class dockerhub(BaseModule): watched_events = ["SOCIAL", "ORG_STUB"] produced_events = ["SOCIAL", "CODE_REPOSITORY", "URL_UNVERIFIED"] - flags = ["active", "safe"] + flags = ["active", "safe", "code-enum"] meta = {"description": "Search for docker repositories of discovered orgs/usernames"} site_url = "https://hub.docker.com" diff --git a/bbot/modules/dotnetnuke.py b/bbot/modules/dotnetnuke.py index ea3d7b920..cd3753dc2 100644 --- a/bbot/modules/dotnetnuke.py +++ b/bbot/modules/dotnetnuke.py @@ -1,5 +1,5 @@ +from bbot.errors import InteractshError from bbot.modules.base import BaseModule -from bbot.core.errors import InteractshError class dotnetnuke(BaseModule): diff --git a/bbot/modules/emailformat.py b/bbot/modules/emailformat.py index 000c3d5cf..31cff1468 100644 --- a/bbot/modules/emailformat.py +++ b/bbot/modules/emailformat.py @@ -17,6 +17,6 @@ async def handle_event(self, event): r = await self.request_with_fail_count(url) if not r: return - for email in self.helpers.extract_emails(r.text): + for email in await self.helpers.re.extract_emails(r.text): if email.endswith(query): await self.emit_event(email, "EMAIL_ADDRESS", source=event) diff --git a/bbot/modules/ffuf_shortnames.py b/bbot/modules/ffuf_shortnames.py index 562ae681f..cfc58cba4 100644 --- a/bbot/modules/ffuf_shortnames.py +++ b/bbot/modules/ffuf_shortnames.py @@ -59,17 +59,7 @@ class ffuf_shortnames(ffuf): "find_delimiters": "Attempt to detect common delimiters and make additional ffuf runs against them", } - deps_ansible = [ - { - "name": "Download ffuf", - "unarchive": { - "src": "https://github.com/ffuf/ffuf/releases/download/v#{BBOT_MODULES_FFUF_VERSION}/ffuf_#{BBOT_MODULES_FFUF_VERSION}_#{BBOT_OS_PLATFORM}_#{BBOT_CPU_ARCH}.tar.gz", - "include": "ffuf", - "dest": "#{BBOT_TOOLS}", - "remote_src": True, - }, - } - ] + deps_common = ["ffuf"] in_scope_only = True @@ -92,8 +82,7 @@ async def setup(self): self.extensions = parse_list_string(self.config.get("extensions", "")) self.debug(f"Using custom extensions: [{','.join(self.extensions)}]") except ValueError as e: - self.warning(f"Error parsing extensions: {e}") - return False + return False, f"Error parsing extensions: {e}" self.ignore_redirects = self.config.get("ignore_redirects") @@ -123,78 +112,73 @@ def find_delimiter(self, hint): return None async def filter_event(self, event): + if event.source.type != "URL": + return False, "its source event is not of type URL" return True async def handle_event(self, event): - if event.source.type == "URL": - filename_hint = re.sub(r"~\d", "", event.parsed.path.rsplit(".", 1)[0].split("/")[-1]).lower() + filename_hint = re.sub(r"~\d", "", event.parsed.path.rsplit(".", 1)[0].split("/")[-1]).lower() - host = f"{event.source.parsed.scheme}://{event.source.parsed.netloc}/" - if host not in self.per_host_collection.keys(): - self.per_host_collection[host] = [(filename_hint, event.source.data)] + host = f"{event.source.parsed.scheme}://{event.source.parsed.netloc}/" + if host not in self.per_host_collection.keys(): + self.per_host_collection[host] = [(filename_hint, event.source.data)] - else: - self.per_host_collection[host].append((filename_hint, event.source.data)) + else: + self.per_host_collection[host].append((filename_hint, event.source.data)) - self.shortname_to_event[filename_hint] = event + self.shortname_to_event[filename_hint] = event - root_stub = "/".join(event.parsed.path.split("/")[:-1]) - root_url = f"{event.parsed.scheme}://{event.parsed.netloc}{root_stub}/" + root_stub = "/".join(event.parsed.path.split("/")[:-1]) + root_url = f"{event.parsed.scheme}://{event.parsed.netloc}{root_stub}/" - if "shortname-file" in event.tags: - used_extensions = self.build_extension_list(event) - - if len(filename_hint) == 6: - tempfile, tempfile_len = self.generate_templist(prefix=filename_hint) - self.verbose( - f"generated temp word list of size [{str(tempfile_len)}] for filename hint: [{filename_hint}]" - ) - - else: - tempfile = self.helpers.tempfile([filename_hint], pipe=False) - tempfile_len = 1 - - if tempfile_len > 0: - if "shortname-file" in event.tags: - for ext in used_extensions: - async for r in self.execute_ffuf(tempfile, root_url, suffix=f".{ext}"): - await self.emit_event( - r["url"], "URL_UNVERIFIED", source=event, tags=[f"status-{r['status']}"] - ) + if "shortname-file" in event.tags: + used_extensions = self.build_extension_list(event) - elif "shortname-directory" in event.tags: - async for r in self.execute_ffuf(tempfile, root_url, exts=["/"]): - r_url = f"{r['url'].rstrip('/')}/" - await self.emit_event(r_url, "URL_UNVERIFIED", source=event, tags=[f"status-{r['status']}"]) + if len(filename_hint) == 6: + tempfile, tempfile_len = self.generate_templist(prefix=filename_hint) + self.verbose( + f"generated temp word list of size [{str(tempfile_len)}] for filename hint: [{filename_hint}]" + ) - if self.config.get("find_delimiters"): - if "shortname-directory" in event.tags: + else: + tempfile = self.helpers.tempfile([filename_hint], pipe=False) + tempfile_len = 1 + + if tempfile_len > 0: + if "shortname-file" in event.tags: + for ext in used_extensions: + async for r in self.execute_ffuf(tempfile, root_url, suffix=f".{ext}"): + await self.emit_event(r["url"], "URL_UNVERIFIED", source=event, tags=[f"status-{r['status']}"]) + + elif "shortname-directory" in event.tags: + async for r in self.execute_ffuf(tempfile, root_url, exts=["/"]): + r_url = f"{r['url'].rstrip('/')}/" + await self.emit_event(r_url, "URL_UNVERIFIED", source=event, tags=[f"status-{r['status']}"]) + + if self.config.get("find_delimiters"): + if "shortname-directory" in event.tags: + delimiter_r = self.find_delimiter(filename_hint) + if delimiter_r: + delimiter, prefix, partial_hint = delimiter_r + self.verbose(f"Detected delimiter [{delimiter}] in hint [{filename_hint}]") + tempfile, tempfile_len = self.generate_templist(prefix=partial_hint) + async for r in self.execute_ffuf(tempfile, root_url, prefix=f"{prefix}{delimiter}", exts=["/"]): + await self.emit_event(r["url"], "URL_UNVERIFIED", source=event, tags=[f"status-{r['status']}"]) + + elif "shortname-file" in event.tags: + for ext in used_extensions: delimiter_r = self.find_delimiter(filename_hint) if delimiter_r: delimiter, prefix, partial_hint = delimiter_r self.verbose(f"Detected delimiter [{delimiter}] in hint [{filename_hint}]") tempfile, tempfile_len = self.generate_templist(prefix=partial_hint) async for r in self.execute_ffuf( - tempfile, root_url, prefix=f"{prefix}{delimiter}", exts=["/"] + tempfile, root_url, prefix=f"{prefix}{delimiter}", suffix=f".{ext}" ): await self.emit_event( r["url"], "URL_UNVERIFIED", source=event, tags=[f"status-{r['status']}"] ) - elif "shortname-file" in event.tags: - for ext in used_extensions: - delimiter_r = self.find_delimiter(filename_hint) - if delimiter_r: - delimiter, prefix, partial_hint = delimiter_r - self.verbose(f"Detected delimiter [{delimiter}] in hint [{filename_hint}]") - tempfile, tempfile_len = self.generate_templist(prefix=partial_hint) - async for r in self.execute_ffuf( - tempfile, root_url, prefix=f"{prefix}{delimiter}", suffix=f".{ext}" - ): - await self.emit_event( - r["url"], "URL_UNVERIFIED", source=event, tags=[f"status-{r['status']}"] - ) - async def finish(self): if self.config.get("find_common_prefixes"): per_host_collection = dict(self.per_host_collection) diff --git a/bbot/modules/filedownload.py b/bbot/modules/filedownload.py index 5cf190d1f..22cddabee 100644 --- a/bbot/modules/filedownload.py +++ b/bbot/modules/filedownload.py @@ -14,7 +14,7 @@ class filedownload(BaseModule): watched_events = ["URL_UNVERIFIED", "HTTP_RESPONSE"] produced_events = [] - flags = ["active", "safe", "web-basic", "web-thorough"] + flags = ["active", "safe", "web-basic"] meta = {"description": "Download common filetypes such as PDF, DOCX, PPTX, etc."} options = { "extensions": [ @@ -120,7 +120,8 @@ async def handle_event(self, event): if extension_matches or filedownload_requested: await self.download_file(event.data) elif event.type == "HTTP_RESPONSE": - content_type = event.data["header"].get("content_type", "") + headers = event.data.get("header", {}) + content_type = headers.get("content_type", "") if content_type: url = event.data["url"] await self.download_file(url, content_type=content_type) diff --git a/bbot/modules/generic_ssrf.py b/bbot/modules/generic_ssrf.py index 9d75f4a9e..42efa5050 100644 --- a/bbot/modules/generic_ssrf.py +++ b/bbot/modules/generic_ssrf.py @@ -1,5 +1,5 @@ +from bbot.errors import InteractshError from bbot.modules.base import BaseModule -from bbot.core.errors import InteractshError ssrf_params = [ diff --git a/bbot/modules/git.py b/bbot/modules/git.py index 0b19f7e6a..5ffb91331 100644 --- a/bbot/modules/git.py +++ b/bbot/modules/git.py @@ -6,7 +6,7 @@ class git(BaseModule): watched_events = ["URL"] produced_events = ["FINDING"] - flags = ["active", "safe", "web-basic", "web-thorough"] + flags = ["active", "safe", "web-basic", "code-enum"] meta = {"description": "Check for exposed .git repositories"} in_scope_only = True diff --git a/bbot/modules/github_codesearch.py b/bbot/modules/github_codesearch.py index 6a3ea57a5..634b38f58 100644 --- a/bbot/modules/github_codesearch.py +++ b/bbot/modules/github_codesearch.py @@ -4,7 +4,7 @@ class github_codesearch(github): watched_events = ["DNS_NAME"] produced_events = ["CODE_REPOSITORY", "URL_UNVERIFIED"] - flags = ["passive", "subdomain-enum", "safe"] + flags = ["passive", "subdomain-enum", "safe", "code-enum"] meta = {"description": "Query Github's API for code containing the target domain name", "auth_required": True} options = {"api_key": "", "limit": 100} options_desc = {"api_key": "Github token", "limit": "Limit code search to this many results"} diff --git a/bbot/modules/github_org.py b/bbot/modules/github_org.py index 70cba4560..cf836af3e 100644 --- a/bbot/modules/github_org.py +++ b/bbot/modules/github_org.py @@ -4,7 +4,7 @@ class github_org(github): watched_events = ["ORG_STUB", "SOCIAL"] produced_events = ["CODE_REPOSITORY"] - flags = ["passive", "subdomain-enum", "safe"] + flags = ["passive", "subdomain-enum", "safe", "code-enum"] meta = {"description": "Query Github's API for organization and member repositories"} options = {"api_key": "", "include_members": True, "include_member_repos": False} options_desc = { @@ -76,6 +76,8 @@ async def handle_event(self, event): user = event.data self.verbose(f"Validating whether the organization {user} is within our scope...") is_org, in_scope = await self.validate_org(user) + if "target" in event.tags: + in_scope = True if not is_org or not in_scope: self.verbose(f"Unable to validate that {user} is in-scope, skipping...") return diff --git a/bbot/modules/gitlab.py b/bbot/modules/gitlab.py index 6464daa2b..6f4892580 100644 --- a/bbot/modules/gitlab.py +++ b/bbot/modules/gitlab.py @@ -4,7 +4,7 @@ class gitlab(BaseModule): watched_events = ["HTTP_RESPONSE", "TECHNOLOGY", "SOCIAL"] produced_events = ["TECHNOLOGY", "SOCIAL", "CODE_REPOSITORY", "FINDING"] - flags = ["active", "safe"] + flags = ["active", "safe", "code-enum"] meta = {"description": "Detect GitLab instances and query them for repositories"} options = {"api_key": ""} options_desc = {"api_key": "Gitlab access token"} @@ -45,7 +45,8 @@ async def handle_http_response(self, event): # identify gitlab instances from HTTP responses # HTTP_RESPONSE --> TECHNOLOGY # HTTP_RESPONSE --> FINDING - if "x_gitlab_meta" in event.data["header"]: + headers = event.data.get("header", {}) + if "x_gitlab_meta" in headers: url = event.parsed._replace(path="/").geturl() await self.emit_event( {"host": str(event.host), "technology": "GitLab", "url": url}, "TECHNOLOGY", source=event diff --git a/bbot/modules/gowitness.py b/bbot/modules/gowitness.py index 3271ef93f..ea8663bb7 100644 --- a/bbot/modules/gowitness.py +++ b/bbot/modules/gowitness.py @@ -29,45 +29,8 @@ class gowitness(BaseModule): "output_path": "where to save screenshots", "social": "Whether to screenshot social media webpages", } + deps_common = ["chromium"] deps_ansible = [ - { - "name": "Install Chromium (Non-Debian)", - "package": {"name": "chromium", "state": "present"}, - "become": True, - "when": "ansible_facts['os_family'] != 'Debian'", - "ignore_errors": True, - }, - { - "name": "Install Chromium dependencies (Debian)", - "package": { - "name": "libasound2,libatk-bridge2.0-0,libatk1.0-0,libcairo2,libcups2,libdrm2,libgbm1,libnss3,libpango-1.0-0,libxcomposite1,libxdamage1,libxfixes3,libxkbcommon0,libxrandr2", - "state": "present", - }, - "become": True, - "when": "ansible_facts['os_family'] == 'Debian'", - "ignore_errors": True, - }, - { - "name": "Get latest Chromium version (Debian)", - "uri": { - "url": "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/Linux_x64%2FLAST_CHANGE?alt=media", - "return_content": True, - }, - "register": "chromium_version", - "when": "ansible_facts['os_family'] == 'Debian'", - "ignore_errors": True, - }, - { - "name": "Download Chromium (Debian)", - "unarchive": { - "src": "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/Linux_x64%2F{{ chromium_version.content }}%2Fchrome-linux.zip?alt=media", - "remote_src": True, - "dest": "#{BBOT_TOOLS}", - "creates": "#{BBOT_TOOLS}/chrome-linux", - }, - "when": "ansible_facts['os_family'] == 'Debian'", - "ignore_errors": True, - }, { "name": "Download gowitness", "get_url": { diff --git a/bbot/modules/host_header.py b/bbot/modules/host_header.py index 4adaa766a..3e0f8069f 100644 --- a/bbot/modules/host_header.py +++ b/bbot/modules/host_header.py @@ -1,5 +1,5 @@ +from bbot.errors import InteractshError from bbot.modules.base import BaseModule -from bbot.core.errors import InteractshError class host_header(BaseModule): diff --git a/bbot/modules/httpx.py b/bbot/modules/httpx.py index 2e5dc0ffc..0f74fbcfc 100644 --- a/bbot/modules/httpx.py +++ b/bbot/modules/httpx.py @@ -10,7 +10,7 @@ class httpx(BaseModule): watched_events = ["OPEN_TCP_PORT", "URL_UNVERIFIED", "URL"] produced_events = ["URL", "HTTP_RESPONSE"] - flags = ["active", "safe", "web-basic", "web-thorough", "social-enum", "subdomain-enum", "cloud-enum"] + flags = ["active", "safe", "web-basic", "social-enum", "subdomain-enum", "cloud-enum"] meta = {"description": "Visit webpages. Many other modules rely on httpx"} options = { @@ -173,8 +173,6 @@ async def handle_batch(self, *events): if url_event: if url_event != source_event: await self.emit_event(url_event) - else: - url_event._resolved.set() # HTTP response await self.emit_event(j, "HTTP_RESPONSE", url_event, tags=url_event.tags) diff --git a/bbot/modules/hunt.py b/bbot/modules/hunt.py index add45b665..0a759f2cf 100644 --- a/bbot/modules/hunt.py +++ b/bbot/modules/hunt.py @@ -1,7 +1,6 @@ # adapted from https://github.com/bugcrowd/HUNT from bbot.modules.base import BaseModule -from bbot.core.helpers.misc import extract_params_html hunt_param_dict = { "Command Injection": [ @@ -281,7 +280,7 @@ class hunt(BaseModule): async def handle_event(self, event): body = event.data.get("body", "") - for p in extract_params_html(body): + for p in await self.helpers.re.extract_params_html(body): for k in hunt_param_dict.keys(): if p.lower() in hunt_param_dict[k]: description = f"Found potential {k.upper()} parameter [{p}]" diff --git a/bbot/modules/iis_shortnames.py b/bbot/modules/iis_shortnames.py index 7d558a23a..94d325df8 100644 --- a/bbot/modules/iis_shortnames.py +++ b/bbot/modules/iis_shortnames.py @@ -16,7 +16,7 @@ class IISShortnamesError(Exception): class iis_shortnames(BaseModule): watched_events = ["URL"] produced_events = ["URL_HINT"] - flags = ["active", "safe", "web-basic", "web-thorough", "iis-shortnames"] + flags = ["active", "safe", "web-basic", "iis-shortnames"] meta = {"description": "Check for IIS shortname vulnerability"} options = {"detect_only": True, "max_node_count": 50} options_desc = { diff --git a/bbot/modules/internal/base.py b/bbot/modules/internal/base.py index 9e7967b42..8ef1b7fd9 100644 --- a/bbot/modules/internal/base.py +++ b/bbot/modules/internal/base.py @@ -9,13 +9,6 @@ class BaseInternalModule(BaseModule): # Priority, 1-5, lower numbers == higher priority _priority = 3 - @property - def config(self): - config = self.scan.config.get("internal_modules", {}).get(self.name, {}) - if config is None: - config = {} - return config - @property def log(self): if self._log is None: diff --git a/bbot/modules/internal/cloud.py b/bbot/modules/internal/cloud.py new file mode 100644 index 000000000..7939487fd --- /dev/null +++ b/bbot/modules/internal/cloud.py @@ -0,0 +1,76 @@ +from bbot.modules.base import InterceptModule + + +class cloud(InterceptModule): + watched_events = ["*"] + meta = {"description": "Tag events by cloud provider, identify cloud resources like storage buckets"} + scope_distance_modifier = 1 + _priority = 3 + + async def setup(self): + self.dummy_modules = {} + for provider_name, provider in self.helpers.cloud.providers.items(): + self.dummy_modules[provider_name] = self.scan._make_dummy_module(f"cloud_{provider_name}", _type="scan") + + return True + + async def filter_event(self, event): + if (not event.host) or (event.type in ("IP_RANGE",)): + return False, "event does not have host attribute" + return True + + async def handle_event(self, event, kwargs): + # cloud tagging by hosts + hosts_to_check = set(str(s) for s in event.resolved_hosts) + hosts_to_check.add(str(event.host_original)) + for host in hosts_to_check: + for provider, provider_type, subnet in self.helpers.cloudcheck(host): + if provider: + event.add_tag(f"{provider_type}-{provider}") + + found = set() + # look for cloud assets in hosts, http responses + # loop through each provider + for provider in self.helpers.cloud.providers.values(): + provider_name = provider.name.lower() + base_kwargs = dict( + source=event, tags=[f"{provider.provider_type}-{provider_name}"], _provider=provider_name + ) + # loop through the provider's regex signatures, if any + for event_type, sigs in provider.signatures.items(): + if event_type != "STORAGE_BUCKET": + raise ValueError(f'Unknown cloudcheck event type "{event_type}"') + base_kwargs["event_type"] = event_type + for sig in sigs: + matches = [] + if event.type == "HTTP_RESPONSE": + matches = await self.helpers.re.findall(sig, event.data.get("body", "")) + elif event.type.startswith("DNS_NAME"): + for host in hosts_to_check: + match = sig.match(host) + if match: + matches.append(match.groups()) + for match in matches: + if not match in found: + found.add(match) + + _kwargs = dict(base_kwargs) + event_type_tag = f"cloud-{event_type}" + _kwargs["tags"].append(event_type_tag) + if event.type.startswith("DNS_NAME"): + event.add_tag(event_type_tag) + + if event_type == "STORAGE_BUCKET": + bucket_name, bucket_domain = match + _kwargs["data"] = { + "name": bucket_name, + "url": f"https://{bucket_name}.{bucket_domain}", + } + await self.emit_event(**_kwargs) + + async def emit_event(self, *args, **kwargs): + provider_name = kwargs.pop("_provider") + dummy_module = self.dummy_modules[provider_name] + event = dummy_module.make_event(*args, **kwargs) + if event: + await super().emit_event(event) diff --git a/bbot/modules/internal/dns.py b/bbot/modules/internal/dns.py new file mode 100644 index 000000000..b96b9b19c --- /dev/null +++ b/bbot/modules/internal/dns.py @@ -0,0 +1,246 @@ +import ipaddress +from contextlib import suppress +from cachetools import LRUCache + +from bbot.errors import ValidationError +from bbot.core.helpers.dns.engine import all_rdtypes +from bbot.core.helpers.async_helpers import NamedLock +from bbot.modules.base import InterceptModule, BaseModule + + +class DNS(InterceptModule): + watched_events = ["*"] + options = {"max_event_handlers": 25} + options_desc = {"max_event_handlers": "Number of concurrent DNS workers"} + _priority = 1 + _max_event_handlers = 25 + scope_distance_modifier = None + + class HostModule(BaseModule): + _name = "host" + _type = "internal" + + def _outgoing_dedup_hash(self, event): + return hash((event, self.name, event.always_emit)) + + async def setup(self): + self.dns_resolution = True + # you can disable DNS resolution with either the "dns" or "dns_resolution" config options + for key in ("dns", "dns_resolution"): + if self.scan.config.get(key, None) is False: + self.dns_resolution = False + self.scope_search_distance = max(0, int(self.scan.config.get("scope_search_distance", 0))) + self.scope_dns_search_distance = max(0, int(self.scan.config.get("scope_dns_search_distance", 1))) + + # event resolution cache + self._event_cache = LRUCache(maxsize=10000) + self._event_cache_locks = NamedLock() + + self.host_module = self.HostModule(self.scan) + + return True + + @property + def _dns_search_distance(self): + return max(self.scope_search_distance, self.scope_dns_search_distance) + + async def filter_event(self, event): + if (not event.host) or (event.type in ("IP_RANGE",)): + return False, "event does not have host attribute" + return True + + async def handle_event(self, event, kwargs): + dns_tags = set() + dns_children = dict() + event_whitelisted = False + event_blacklisted = False + + event_host = str(event.host) + event_host_hash = hash(str(event.host)) + event_is_ip = self.helpers.is_ip(event.host) + + # only emit DNS children if we haven't seen this host before + emit_children = self.dns_resolution and event_host_hash not in self._event_cache + + # we do DNS resolution inside a lock to make sure we don't duplicate work + # once the resolution happens, it will be cached so it doesn't need to happen again + async with self._event_cache_locks.lock(event_host_hash): + try: + # try to get from cache + dns_tags, dns_children, event_whitelisted, event_blacklisted = self._event_cache[event_host_hash] + except KeyError: + if event_is_ip: + rdtypes_to_resolve = ["PTR"] + else: + if self.dns_resolution: + rdtypes_to_resolve = all_rdtypes + else: + rdtypes_to_resolve = ("A", "AAAA", "CNAME") + + # if missing from cache, do DNS resolution + queries = [(event_host, rdtype) for rdtype in rdtypes_to_resolve] + error_rdtypes = [] + async for (query, rdtype), (answers, errors) in self.helpers.dns.resolve_raw_batch(queries): + if errors: + error_rdtypes.append(rdtype) + for answer, _rdtype in answers: + dns_tags.add(f"{rdtype.lower()}-record") + try: + dns_children[_rdtype].add(answer) + except KeyError: + dns_children[_rdtype] = {answer} + + for rdtype in error_rdtypes: + if rdtype not in dns_children: + dns_tags.add(f"{rdtype.lower()}-error") + + if not dns_children and not event_is_ip: + dns_tags.add("unresolved") + + for rdtype, children in dns_children.items(): + if event_blacklisted: + break + for host in children: + # whitelisting / blacklisting based on resolved hosts + if rdtype in ("A", "AAAA", "CNAME"): + # having a CNAME to an in-scope resource doesn't make you in-scope + if not event_whitelisted and rdtype != "CNAME": + with suppress(ValidationError): + if self.scan.whitelisted(host): + event_whitelisted = True + # CNAME to a blacklisted resources, means you're blacklisted + with suppress(ValidationError): + if self.scan.blacklisted(host): + dns_tags.add("blacklisted") + event_blacklisted = True + break + + # check for private IPs + try: + ip = ipaddress.ip_address(host) + if ip.is_private: + dns_tags.add("private-ip") + except ValueError: + continue + + # store results in cache + self._event_cache[event_host_hash] = dns_tags, dns_children, event_whitelisted, event_blacklisted + + # abort if the event resolves to something blacklisted + if event_blacklisted: + event.add_tag("blacklisted") + return False, f"it has a blacklisted DNS record" + + # set resolved_hosts attribute + for rdtype, children in dns_children.items(): + if rdtype in ("A", "AAAA", "CNAME"): + for host in children: + event.resolved_hosts.add(host) + + # set dns_children attribute + event.dns_children = dns_children + + # if the event resolves to an in-scope IP, set its scope distance to 0 + if event_whitelisted: + self.debug(f"Making {event} in-scope because it resolves to an in-scope resource") + event.scope_distance = 0 + + # check for wildcards, only if the event resolves to something that isn't an IP + if (not event_is_ip) and (dns_children): + if event.scope_distance <= self.scan.scope_search_distance: + await self.handle_wildcard_event(event) + + # kill runaway DNS chains + dns_resolve_distance = getattr(event, "dns_resolve_distance", 0) + if dns_resolve_distance >= self.helpers.dns.max_dns_resolve_distance: + self.debug( + f"Skipping DNS children for {event} because their DNS resolve distances would be greater than the configured value for this scan ({self.helpers.dns.max_dns_resolve_distance})" + ) + dns_children = {} + + # if the event is a DNS_NAME or IP, tag with "a-record", "ptr-record", etc. + if event.type in ("DNS_NAME", "IP_ADDRESS"): + for tag in dns_tags: + event.add_tag(tag) + + # If the event is unresolved, change its type to DNS_NAME_UNRESOLVED + if event.type == "DNS_NAME" and "unresolved" in event.tags and not "target" in event.tags: + event.type = "DNS_NAME_UNRESOLVED" + + # speculate DNS_NAMES and IP_ADDRESSes from other event types + source_event = event + if ( + event.host + and event.type not in ("DNS_NAME", "DNS_NAME_UNRESOLVED", "IP_ADDRESS", "IP_RANGE") + and not ((event.type in ("OPEN_TCP_PORT", "URL_UNVERIFIED") and str(event.module) == "speculate")) + ): + source_event = self.scan.make_event(event.host, "DNS_NAME", module=self.host_module, source=event) + # only emit the event if it's not already in the parent chain + if source_event is not None and (source_event.always_emit or source_event not in event.get_sources()): + source_event.scope_distance = event.scope_distance + if "target" in event.tags: + source_event.add_tag("target") + await self.emit_event(source_event) + + # emit DNS children + if emit_children: + in_dns_scope = -1 < event.scope_distance < self._dns_search_distance + dns_child_events = [] + if dns_children: + for rdtype, records in dns_children.items(): + module = self.scan._make_dummy_module_dns(rdtype) + module._priority = 4 + for record in records: + try: + child_event = self.scan.make_event(record, "DNS_NAME", module=module, source=source_event) + # if it's a hostname and it's only one hop away, mark it as affiliate + if child_event.type == "DNS_NAME" and child_event.scope_distance == 1: + child_event.add_tag("affiliate") + if in_dns_scope or self.preset.in_scope(child_event): + dns_child_events.append(child_event) + except ValidationError as e: + self.warning( + f'Event validation failed for DNS child of {source_event}: "{record}" ({rdtype}): {e}' + ) + for child_event in dns_child_events: + self.debug(f"Queueing DNS child for {event}: {child_event}") + await self.emit_event(child_event) + + async def handle_wildcard_event(self, event): + self.debug(f"Entering handle_wildcard_event({event}, children={event.dns_children})") + try: + event_host = str(event.host) + # check if the dns name itself is a wildcard entry + wildcard_rdtypes = await self.helpers.is_wildcard(event_host) + for rdtype, (is_wildcard, wildcard_host) in wildcard_rdtypes.items(): + wildcard_tag = "error" + if is_wildcard == True: + event.add_tag("wildcard") + wildcard_tag = "wildcard" + event.add_tag(f"{rdtype.lower()}-{wildcard_tag}") + + # wildcard event modification (www.evilcorp.com --> _wildcard.evilcorp.com) + if wildcard_rdtypes: + # these are the rdtypes that successfully resolve + resolved_rdtypes = set([c.upper() for c in event.dns_children]) + # these are the rdtypes that have wildcards + wildcard_rdtypes_set = set(wildcard_rdtypes) + # consider the event a full wildcard if all its records are wildcards + event_is_wildcard = False + if resolved_rdtypes: + event_is_wildcard = all(r in wildcard_rdtypes_set for r in resolved_rdtypes) + + if event_is_wildcard: + if event.type in ("DNS_NAME",) and not "_wildcard" in event.data.split("."): + wildcard_parent = self.helpers.parent_domain(event_host) + for rdtype, (_is_wildcard, _parent_domain) in wildcard_rdtypes.items(): + if _is_wildcard: + wildcard_parent = _parent_domain + break + wildcard_data = f"_wildcard.{wildcard_parent}" + if wildcard_data != event.data: + self.debug(f'Wildcard detected, changing event.data "{event.data}" --> "{wildcard_data}"') + event.data = wildcard_data + + finally: + self.debug(f"Finished handle_wildcard_event({event}, children={event.dns_children})") diff --git a/bbot/modules/internal/excavate.py b/bbot/modules/internal/excavate.py index 24322d06b..6b819c0d2 100644 --- a/bbot/modules/internal/excavate.py +++ b/bbot/modules/internal/excavate.py @@ -1,7 +1,7 @@ -import re import html import base64 import jwt as j +import regex as re from urllib.parse import urljoin from bbot.core.helpers.regexes import _email_regex, dns_name_regex @@ -14,6 +14,7 @@ class BaseExtractor: def __init__(self, excavate): self.excavate = excavate + self.helpers = excavate.helpers self.compiled_regexes = {} for rname, r in self.regexes.items(): self.compiled_regexes[rname] = re.compile(r) @@ -29,7 +30,7 @@ async def _search(self, content, event, **kwargs): for name, regex in self.compiled_regexes.items(): # yield to event loop await self.excavate.helpers.sleep(0) - for result in regex.findall(content): + for result in await self.helpers.re.findall(regex, content): yield result, name async def report(self, result, name, event): @@ -39,14 +40,14 @@ async def report(self, result, name, event): class CSPExtractor(BaseExtractor): regexes = {"CSP": r"(?i)(?m)Content-Security-Policy:.+$"} - def extract_domains(self, csp): - domains = dns_name_regex.findall(csp) + async def extract_domains(self, csp): + domains = await self.helpers.re.findall(dns_name_regex, csp) unique_domains = set(domains) return unique_domains async def search(self, content, event, **kwargs): async for csp, name in self._search(content, event, **kwargs): - extracted_domains = self.extract_domains(csp) + extracted_domains = await self.extract_domains(csp) for domain in extracted_domains: await self.report(domain, event, **kwargs) @@ -125,7 +126,7 @@ async def _search(self, content, event, **kwargs): for name, regex in self.compiled_regexes.items(): # yield to event loop await self.excavate.helpers.sleep(0) - for result in regex.findall(content): + for result in await self.helpers.re.findall(regex, content): if name.startswith("full"): protocol, other = result result = f"{protocol}://{other}" @@ -386,9 +387,8 @@ async def handle_event(self, event): else: self.verbose(f"Exceeded max HTTP redirects ({self.max_redirects}): {location}") - body = self.helpers.recursive_decode(event.data.get("body", "")) - # Cloud extractors - self.helpers.cloud.excavate(event, body) + body = await self.helpers.re.recursive_decode(event.data.get("body", "")) + await self.search( body, [ @@ -405,7 +405,7 @@ async def handle_event(self, event): consider_spider_danger=True, ) - headers = self.helpers.recursive_decode(event.data.get("raw_header", "")) + headers = await self.helpers.re.recursive_decode(event.data.get("raw_header", "")) await self.search( headers, [self.hostname, self.url, self.email, self.error_extractor, self.jwt, self.serialization, self.csp], diff --git a/bbot/modules/internal/speculate.py b/bbot/modules/internal/speculate.py index bb92c09a8..9f39598d2 100644 --- a/bbot/modules/internal/speculate.py +++ b/bbot/modules/internal/speculate.py @@ -142,9 +142,6 @@ async def handle_event(self, event): quick=True, ) - # storage buckets etc. - self.helpers.cloud.speculate(event) - # ORG_STUB from TLD, SOCIAL, AZURE_TENANT org_stubs = set() if event.type == "DNS_NAME" and event.scope_distance == 0: @@ -153,7 +150,9 @@ async def handle_event(self, event): if registered_domain: tld_stub = getattr(tldextracted, "domain", "") if tld_stub: - org_stubs.add(tld_stub) + decoded_tld_stub = self.helpers.smart_decode_punycode(tld_stub) + org_stubs.add(decoded_tld_stub) + org_stubs.add(self.helpers.unidecode(decoded_tld_stub)) elif event.type == "SOCIAL": stub = event.data.get("stub", "") if stub: diff --git a/bbot/modules/massdns.py b/bbot/modules/massdns.py index e453ee8d4..ffacb8c64 100644 --- a/bbot/modules/massdns.py +++ b/bbot/modules/massdns.py @@ -1,8 +1,7 @@ -import re import json import random -import asyncio import subprocess +import regex as re from bbot.modules.templates.subdomain_enum import subdomain_enum @@ -39,41 +38,7 @@ class massdns(subdomain_enum): "max_depth": "How many subdomains deep to brute force, i.e. 5.4.3.2.1.evilcorp.com", } subdomain_file = None - deps_ansible = [ - { - "name": "install dev tools", - "package": {"name": ["gcc", "git", "make"], "state": "present"}, - "become": True, - "ignore_errors": True, - }, - { - "name": "Download massdns source code", - "git": { - "repo": "https://github.com/blechschmidt/massdns.git", - "dest": "#{BBOT_TEMP}/massdns", - "single_branch": True, - "version": "master", - }, - }, - { - "name": "Build massdns (Linux)", - "command": {"chdir": "#{BBOT_TEMP}/massdns", "cmd": "make", "creates": "#{BBOT_TEMP}/massdns/bin/massdns"}, - "when": "ansible_facts['system'] == 'Linux'", - }, - { - "name": "Build massdns (non-Linux)", - "command": { - "chdir": "#{BBOT_TEMP}/massdns", - "cmd": "make nolinux", - "creates": "#{BBOT_TEMP}/massdns/bin/massdns", - }, - "when": "ansible_facts['system'] != 'Linux'", - }, - { - "name": "Install massdns", - "copy": {"src": "#{BBOT_TEMP}/massdns/bin/massdns", "dest": "#{BBOT_TOOLS}/", "mode": "u+x,g+x,o+x"}, - }, - ] + deps_common = ["massdns"] reject_wildcards = "strict" _qsize = 10000 @@ -101,10 +66,8 @@ async def setup(self): cache_hrs=24 * 7, ) self.devops_mutations = list(self.helpers.word_cloud.devops_mutations) - self.mutation_run = 1 + self._mutation_run = 1 - self.resolve_and_emit_queue = asyncio.Queue() - self.resolve_and_emit_task = asyncio.create_task(self.resolve_and_emit()) return await super().setup() async def filter_event(self, event): @@ -137,8 +100,8 @@ async def handle_event(self, event): query = self.make_query(event) self.source_events.add_target(event) self.info(f"Brute-forcing subdomains for {query} (source: {event.data})") - results = await self.massdns(query, self.subdomain_list) - await self.resolve_and_emit_queue.put((results, event, None)) + for hostname in await self.massdns(query, self.subdomain_list): + await self.emit_result(hostname, event, query) def abort_if(self, event): if not event.scope_distance == 0: @@ -149,6 +112,13 @@ def abort_if(self, event): return True, "event is unresolved" return False, "" + async def emit_result(self, result, source_event, query, tags=None): + if not result == source_event: + kwargs = {"abort_if": self.abort_if} + if tags is not None: + kwargs["tags"] = tags + await self.emit_event(result, "DNS_NAME", source_event, **kwargs) + def already_processed(self, hostname): if hash(hostname) in self.processed: return True @@ -221,35 +191,6 @@ async def massdns(self, domain, subdomains): # everything checks out return results - async def resolve_and_emit(self): - """ - When results are found, they are placed into self.resolve_and_emit_queue. - The purpose of this function (which is started as a task in the module's setup()) is to consume results from - the queue, resolve them, and if they resolve, emit them. - - This exists to prevent disrupting the scan with huge batches of DNS resolutions. - """ - while 1: - results, source_event, tags = await self.resolve_and_emit_queue.get() - self.verbose(f"Resolving batch of {len(results):,} results") - async with self._task_counter.count(f"{self.name}.resolve_and_emit()"): - async for hostname, r in self.helpers.resolve_batch(results, type=("A", "CNAME")): - if not r: - self.debug(f"Discarding {hostname} because it didn't resolve") - continue - self.add_found(hostname) - if source_event is None: - source_event = self.source_events.get(hostname) - if source_event is None: - self.warning(f"Could not correlate source event from: {hostname}") - source_event = self.scan.root_event - kwargs = {"abort_if": self.abort_if, "tags": tags} - await self.emit_event(hostname, "DNS_NAME", source_event, **kwargs) - - @property - def running(self): - return super().running or self.resolve_and_emit_queue.qsize() > 0 - async def _canary_check(self, domain, num_checks=50): random_subdomains = list(self.gen_random_subdomains(num_checks)) self.verbose(f"Testing {len(random_subdomains):,} canaries against {domain}") @@ -378,9 +319,6 @@ def add_mutation(_domain_hash, m): self.mutations_tried.add(h) mutations.add(m) - num_base_mutations = len(base_mutations) - self.debug(f"Base mutations for {domain}: {num_base_mutations:,}") - # try every subdomain everywhere else for _domain, _subdomains in found: if _domain == domain: @@ -388,7 +326,10 @@ def add_mutation(_domain_hash, m): for s in _subdomains: first_segment = s.split(".")[0] # skip stuff with lots of numbers (e.g. PTRs) - if self.has_excessive_digits(first_segment): + digits = self.digit_regex.findall(first_segment) + excessive_digits = len(digits) > 2 + long_digits = any(len(d) > 3 for d in digits) + if excessive_digits or long_digits: continue add_mutation(domain_hash, first_segment) for word in self.helpers.extract_words( @@ -396,9 +337,6 @@ def add_mutation(_domain_hash, m): ): add_mutation(domain_hash, word) - num_massdns_mutations = len(mutations) - num_base_mutations - self.debug(f"Mutations from previous subdomains for {domain}: {num_massdns_mutations:,}") - # numbers + devops mutations for mutation in self.helpers.word_cloud.mutations( subdomains, cloud=False, numbers=3, number_padding=1 @@ -407,26 +345,24 @@ def add_mutation(_domain_hash, m): m = delimiter.join(mutation).lower() add_mutation(domain_hash, m) - num_word_cloud_mutations = len(mutations) - num_massdns_mutations - self.debug(f"Mutations added by word cloud for {domain}: {num_word_cloud_mutations:,}") - # special dns mutator - self.debug( - f"DNS Mutator size: {len(self.helpers.word_cloud.dns_mutator):,} (limited to {self.max_mutations:,})" - ) for subdomain in self.helpers.word_cloud.dns_mutator.mutations( subdomains, max_mutations=self.max_mutations ): add_mutation(domain_hash, subdomain) - num_mutations = len(mutations) - num_word_cloud_mutations - self.debug(f"Mutations added by DNS Mutator: {num_mutations:,}") - if mutations: self.info(f"Trying {len(mutations):,} mutations against {domain} ({i+1}/{len(found)})") results = list(await self.massdns(query, mutations)) + for hostname in results: + source_event = self.source_events.get(hostname) + if source_event is None: + self.warning(f"Could not correlate source event from: {hostname}") + source_event = self.scan.root_event + await self.emit_result( + hostname, source_event, query, tags=[f"mutation-{self._mutation_run}"] + ) if results: - await self.resolve_and_emit_queue.put((results, None, [f"mutation-{self.mutation_run}"])) found_mutations = True continue break @@ -434,7 +370,7 @@ def add_mutation(_domain_hash, m): self.warning(e) if found_mutations: - self.mutation_run += 1 + self._mutation_run += 1 def add_found(self, host): if not isinstance(host, str): diff --git a/bbot/modules/ntlm.py b/bbot/modules/ntlm.py index c69beb941..93f622566 100644 --- a/bbot/modules/ntlm.py +++ b/bbot/modules/ntlm.py @@ -1,4 +1,4 @@ -from bbot.core.errors import NTLMError +from bbot.errors import NTLMError from bbot.modules.base import BaseModule ntlm_discovery_endpoints = [ @@ -68,7 +68,7 @@ class ntlm(BaseModule): watched_events = ["URL", "HTTP_RESPONSE"] produced_events = ["FINDING", "DNS_NAME"] - flags = ["active", "safe", "web-basic", "web-thorough"] + flags = ["active", "safe", "web-basic"] meta = {"description": "Watch for HTTP endpoints that support NTLM authentication"} options = {"try_all": False} options_desc = {"try_all": "Try every NTLM endpoint"} diff --git a/bbot/modules/oauth.py b/bbot/modules/oauth.py index 13a483aad..fd6188acd 100644 --- a/bbot/modules/oauth.py +++ b/bbot/modules/oauth.py @@ -6,7 +6,7 @@ class OAUTH(BaseModule): watched_events = ["DNS_NAME", "URL_UNVERIFIED"] produced_events = ["DNS_NAME"] - flags = ["affiliates", "subdomain-enum", "cloud-enum", "web-basic", "web-thorough", "active", "safe"] + flags = ["affiliates", "subdomain-enum", "cloud-enum", "web-basic", "active", "safe"] meta = {"description": "Enumerate OAUTH and OpenID Connect services"} options = {"try_all": False} options_desc = {"try_all": "Check for OAUTH/IODC on every subdomain and URL."} @@ -119,7 +119,7 @@ async def getoidc(self, url): return url, token_endpoint, results if json and isinstance(json, dict): token_endpoint = json.get("token_endpoint", "") - for found in self.helpers.search_dict_values(json, *self.regexes): + for found in await self.helpers.re.search_dict_values(json, *self.regexes): results.add(found) results -= {token_endpoint} return url, token_endpoint, results diff --git a/bbot/modules/output/base.py b/bbot/modules/output/base.py index 98c3f0cbc..7be0e7e40 100644 --- a/bbot/modules/output/base.py +++ b/bbot/modules/output/base.py @@ -73,13 +73,6 @@ def file(self): self._file = open(self.output_file, mode="a") return self._file - @property - def config(self): - config = self.scan.config.get("output_modules", {}).get(self.name, {}) - if config is None: - config = {} - return config - @property def log(self): if self._log is None: diff --git a/bbot/modules/output/emails.py b/bbot/modules/output/emails.py index e96c5d97c..1798f0135 100644 --- a/bbot/modules/output/emails.py +++ b/bbot/modules/output/emails.py @@ -4,7 +4,6 @@ class Emails(Human): watched_events = ["EMAIL_ADDRESS"] - flags = ["email-enum"] meta = {"description": "Output any email addresses found belonging to the target domain"} options = {"output_file": ""} options_desc = {"output_file": "Output to file"} diff --git a/bbot/modules/output/http.py b/bbot/modules/output/http.py index 014610736..18182056a 100644 --- a/bbot/modules/output/http.py +++ b/bbot/modules/output/http.py @@ -1,4 +1,4 @@ -from bbot.core.errors import RequestError +from httpx import RequestError from bbot.modules.output.base import BaseOutputModule diff --git a/bbot/modules/output/human.py b/bbot/modules/output/human.py index e1f4746c4..389a4bd84 100644 --- a/bbot/modules/output/human.py +++ b/bbot/modules/output/human.py @@ -1,6 +1,6 @@ from contextlib import suppress -from bbot.core.helpers.logger import log_to_stderr +from bbot.logger import log_to_stderr from bbot.modules.output.base import BaseOutputModule diff --git a/bbot/modules/output/json.py b/bbot/modules/output/json.py index bf8517db9..0b7a16a5f 100644 --- a/bbot/modules/output/json.py +++ b/bbot/modules/output/json.py @@ -16,7 +16,7 @@ class JSON(BaseOutputModule): _preserve_graph = True async def setup(self): - self._prep_output_dir("output.ndjson") + self._prep_output_dir("output.json") self.siem_friendly = self.config.get("siem_friendly", False) return True diff --git a/bbot/modules/output/neo4j.py b/bbot/modules/output/neo4j.py index 2cc083544..2b0548ea9 100644 --- a/bbot/modules/output/neo4j.py +++ b/bbot/modules/output/neo4j.py @@ -1,3 +1,4 @@ +from contextlib import suppress from neo4j import AsyncGraphDatabase from bbot.modules.output.base import BaseOutputModule @@ -78,5 +79,7 @@ async def merge_event(self, event, id_only=False): return (await result.single()).get("id(_)") async def cleanup(self): - await self.session.close() - await self.driver.close() + with suppress(Exception): + await self.session.close() + with suppress(Exception): + await self.driver.close() diff --git a/bbot/modules/output/splunk.py b/bbot/modules/output/splunk.py index 242f1759e..00d70876b 100644 --- a/bbot/modules/output/splunk.py +++ b/bbot/modules/output/splunk.py @@ -1,4 +1,4 @@ -from bbot.core.errors import RequestError +from httpx import RequestError from bbot.modules.output.base import BaseOutputModule diff --git a/bbot/modules/output/subdomains.py b/bbot/modules/output/subdomains.py index bfb7174ac..b0ec08aeb 100644 --- a/bbot/modules/output/subdomains.py +++ b/bbot/modules/output/subdomains.py @@ -4,7 +4,6 @@ class Subdomains(Human): watched_events = ["DNS_NAME", "DNS_NAME_UNRESOLVED"] - flags = ["subdomain-enum"] meta = {"description": "Output only resolved, in-scope subdomains"} options = {"output_file": "", "include_unresolved": False} options_desc = {"output_file": "Output to file", "include_unresolved": "Include unresolved subdomains in output"} diff --git a/bbot/modules/paramminer_headers.py b/bbot/modules/paramminer_headers.py index 3458edaa9..561a05fe2 100644 --- a/bbot/modules/paramminer_headers.py +++ b/bbot/modules/paramminer_headers.py @@ -1,6 +1,6 @@ +from bbot.errors import HttpCompareError from bbot.modules.base import BaseModule -from bbot.core.errors import HttpCompareError -from bbot.core.helpers.misc import extract_params_json, extract_params_xml, extract_params_html +from bbot.core.helpers.misc import extract_params_json, extract_params_xml class paramminer_headers(BaseModule): @@ -158,7 +158,7 @@ async def handle_event(self, event): wl = set(self.wl) if self.config.get("http_extract"): - extracted_words = self.load_extracted_words(event.data.get("body"), event.data.get("content_type")) + extracted_words = await self.load_extracted_words(event.data.get("body"), event.data.get("content_type")) if extracted_words: self.debug(f"Extracted {str(len(extracted_words))} words from {url}") self.extracted_words_master.update(extracted_words - wl) @@ -195,7 +195,7 @@ def gen_count_args(self, url): yield header_count, (url,), {"headers": fake_headers} header_count -= 5 - def load_extracted_words(self, body, content_type): + async def load_extracted_words(self, body, content_type): if not body: return None if content_type and "json" in content_type.lower(): @@ -203,7 +203,7 @@ def load_extracted_words(self, body, content_type): elif content_type and "xml" in content_type.lower(): return extract_params_xml(body) else: - return set(extract_params_html(body)) + return set(await self.helpers.re.extract_params_html(body)) async def binary_search(self, compare_helper, url, group, reasons=None, reflection=False): if reasons is None: @@ -218,7 +218,9 @@ async def binary_search(self, compare_helper, url, group, reasons=None, reflecti async for r in self.binary_search(compare_helper, url, group_slice, reasons, reflection): yield r else: - self.warning(f"Submitted group of size 0 to binary_search()") + self.debug( + f"binary_search() failed to start with group of size {str(len(group))} and {str(len(reasons))} length reasons" + ) async def check_batch(self, compare_helper, url, header_list): rand = self.rand_string() diff --git a/bbot/modules/pgp.py b/bbot/modules/pgp.py index 2c378f585..78becbf0e 100644 --- a/bbot/modules/pgp.py +++ b/bbot/modules/pgp.py @@ -28,7 +28,7 @@ async def query(self, query): url = url.replace("", self.helpers.quote(query)) response = await self.helpers.request(url) if response is not None: - for email in self.helpers.extract_emails(response.text): + for email in await self.helpers.re.extract_emails(response.text): email = email.lower() if email.endswith(query): results.add(email) diff --git a/bbot/modules/postman.py b/bbot/modules/postman.py index 5a63f824e..348d0dc28 100644 --- a/bbot/modules/postman.py +++ b/bbot/modules/postman.py @@ -4,7 +4,7 @@ class postman(subdomain_enum): watched_events = ["DNS_NAME"] produced_events = ["URL_UNVERIFIED"] - flags = ["passive", "subdomain-enum", "safe"] + flags = ["passive", "subdomain-enum", "safe", "code-enum"] meta = {"description": "Query Postman's API for related workspaces, collections, requests"} base_url = "https://www.postman.com/_api" diff --git a/bbot/modules/report/asn.py b/bbot/modules/report/asn.py index f906c785e..982c76584 100644 --- a/bbot/modules/report/asn.py +++ b/bbot/modules/report/asn.py @@ -149,7 +149,7 @@ async def get_asn_metadata_ripe(self, asn_number): for item in record: key = item.get("key", "") value = item.get("value", "") - for email in self.helpers.extract_emails(value): + for email in await self.helpers.re.extract_emails(value): emails.add(email.lower()) if not key: continue diff --git a/bbot/modules/robots.py b/bbot/modules/robots.py index 717900bee..d801a755e 100644 --- a/bbot/modules/robots.py +++ b/bbot/modules/robots.py @@ -4,7 +4,7 @@ class robots(BaseModule): watched_events = ["URL"] produced_events = ["URL_UNVERIFIED"] - flags = ["active", "safe", "web-basic", "web-thorough"] + flags = ["active", "safe", "web-basic"] meta = {"description": "Look for and parse robots.txt"} options = {"include_sitemap": False, "include_allow": True, "include_disallow": True} diff --git a/bbot/modules/secretsdb.py b/bbot/modules/secretsdb.py index 3fc8ad539..d94a3b0a2 100644 --- a/bbot/modules/secretsdb.py +++ b/bbot/modules/secretsdb.py @@ -7,7 +7,7 @@ class secretsdb(BaseModule): watched_events = ["HTTP_RESPONSE"] produced_events = ["FINDING"] - flags = ["active", "safe", "web-basic", "web-thorough"] + flags = ["active", "safe", "web-basic"] meta = {"description": "Detect common secrets with secrets-patterns-db"} options = { "min_confidence": 99, @@ -46,7 +46,7 @@ async def setup(self): async def handle_event(self, event): resp_body = event.data.get("body", "") resp_headers = event.data.get("raw_header", "") - all_matches = await self.scan.run_in_executor(self.search_data, resp_body, resp_headers) + all_matches = await self.helpers.run_in_executor(self.search_data, resp_body, resp_headers) for matches, name in all_matches: matches = [m.string[m.start() : m.end()] for m in matches] description = f"Possible secret ({name}): {matches}" diff --git a/bbot/modules/sitedossier.py b/bbot/modules/sitedossier.py index 86872c052..e6571ea85 100644 --- a/bbot/modules/sitedossier.py +++ b/bbot/modules/sitedossier.py @@ -36,12 +36,11 @@ async def query(self, query, parse_fn=None, request_fn=None): if response.status_code == 302: self.verbose("Hit rate limit captcha") break - for regex in self.scan.dns_regexes: - for match in regex.finditer(response.text): - hostname = match.group().lower() - if hostname and hostname not in results: - results.add(hostname) - yield hostname + for match in await self.helpers.re.finditer_multi(self.scan.dns_regexes, response.text): + hostname = match.group().lower() + if hostname and hostname not in results: + results.add(hostname) + yield hostname if '= self.scan.helpers.dns.max_dns_resolve_distance: - log.debug( - f"Skipping DNS children for {event} because their DNS resolve distances would be greater than the configured value for this scan ({self.scan.helpers.dns.max_dns_resolve_distance})" - ) - dns_children = {} - - if event.type in ("DNS_NAME", "IP_ADDRESS"): - event._dns_children = dns_children - for tag in dns_tags: - event.add_tag(tag) - - event._resolved_hosts = resolved_hosts - - event_whitelisted = event_whitelisted_dns | self.scan.whitelisted(event) - event_blacklisted = event_blacklisted_dns | self.scan.blacklisted(event) - if event_blacklisted: - event.add_tag("blacklisted") - reason = "event host" - if event_blacklisted_dns: - reason = "DNS associations" - log.debug(f"Omitting due to blacklisted {reason}: {event}") - return - - # other blacklist rejections - URL extensions, etc. - if "blacklisted" in event.tags: - log.debug(f"Omitting blacklisted event: {event}") - return - - # DNS_NAME --> DNS_NAME_UNRESOLVED - if event.type == "DNS_NAME" and "unresolved" in event.tags and not "target" in event.tags: - event.type = "DNS_NAME_UNRESOLVED" - - # Cloud tagging - await self.scan.helpers.cloud.tag_event(event) - - # Scope shepherding - # here is where we make sure in-scope events are set to their proper scope distance - if event.host and event_whitelisted: - log.debug(f"Making {event} in-scope") - event.scope_distance = 0 - - # check for wildcards - if event.scope_distance <= self.scan.scope_search_distance: - if not "unresolved" in event.tags: - if not self.scan.helpers.is_ip_type(event.host): - await self.scan.helpers.dns.handle_wildcard_event(event, dns_children) - - # For DNS_NAMEs, we've waited to do this until now, in case event.data changed during handle_wildcard_event() - if event.type == "DNS_NAME": - acceptable = self._event_precheck(event) - if not acceptable: - return - - # now that the event is properly tagged, we can finally make decisions about it - abort_result = False - if callable(abort_if): - async with self.scan._acatch(context=abort_if): - abort_result = await self.scan.helpers.execute_sync_or_async(abort_if, event) - msg = f"{event.module}: not raising event {event} due to custom criteria in abort_if()" - with suppress(ValueError, TypeError): - abort_result, reason = abort_result - msg += f": {reason}" - if abort_result: - log.verbose(msg) - return - - # run success callback before distributing event (so it can add tags, etc.) - if callable(on_success_callback): - async with self.scan._acatch(context=on_success_callback): - await self.scan.helpers.execute_sync_or_async(on_success_callback, event) - - await self.distribute_event(event) - - # speculate DNS_NAMES and IP_ADDRESSes from other event types - source_event = event - if ( - event.host - and event.type not in ("DNS_NAME", "DNS_NAME_UNRESOLVED", "IP_ADDRESS", "IP_RANGE") - and not (event.type in ("OPEN_TCP_PORT", "URL_UNVERIFIED") and str(event.module) == "speculate") - ): - source_module = self.scan.helpers._make_dummy_module("host", _type="internal") - source_module._priority = 4 - source_event = self.scan.make_event(event.host, "DNS_NAME", module=source_module, source=event) - # only emit the event if it's not already in the parent chain - if source_event is not None and source_event not in source_event.get_sources(): - source_event.scope_distance = event.scope_distance - if "target" in event.tags: - source_event.add_tag("target") - self.queue_event(source_event) - - ### Emit DNS children ### - if self.dns_resolution: - emit_children = True - in_dns_scope = -1 < event.scope_distance < self.scan.scope_dns_search_distance - # only emit DNS children once for each unique host - host_hash = hash(str(event.host)) - if host_hash in self.outgoing_dup_tracker: - emit_children = False - self.outgoing_dup_tracker.add(host_hash) - - if emit_children: - dns_child_events = [] - if dns_children: - for rdtype, records in dns_children.items(): - module = self.scan.helpers.dns._get_dummy_module(rdtype) - module._priority = 4 - for record in records: - try: - child_event = self.scan.make_event( - record, "DNS_NAME", module=module, source=source_event - ) - # if it's a hostname and it's only one hop away, mark it as affiliate - if child_event.type == "DNS_NAME" and child_event.scope_distance == 1: - child_event.add_tag("affiliate") - host_hash = hash(str(child_event.host)) - if in_dns_scope or self.scan.in_scope(child_event): - dns_child_events.append(child_event) - except ValidationError as e: - log.warning( - f'Event validation failed for DNS child of {source_event}: "{record}" ({rdtype}): {e}' - ) - for child_event in dns_child_events: - log.debug(f"Queueing DNS child for {event}: {child_event}") - self.queue_event(child_event) - - except ValidationError as e: - log.warning(f"Event validation failed with kwargs={kwargs}: {e}") - log.trace(traceback.format_exc()) - - finally: - event._resolved.set() - log.debug(f"{event.module}.emit_event() finished for {event}") + async def get_incoming_event(self): + for q in self.helpers.weighted_shuffle(self.incoming_queues, self.module_priority_weights): + try: + return q.get_nowait() + except (asyncio.queues.QueueEmpty, AttributeError): + continue + raise asyncio.queues.QueueEmpty() def is_incoming_duplicate(self, event, add=False): """ @@ -331,7 +127,8 @@ def is_incoming_duplicate(self, event, add=False): try: event_hash = event.module._outgoing_dedup_hash(event) except AttributeError: - event_hash = hash((event, str(getattr(event, "module", "")))) + module_name = str(getattr(event, "module", "")) + event_hash = hash((event, module_name)) is_dup = event_hash in self.incoming_dup_tracker if add: self.incoming_dup_tracker.add(event_hash) @@ -340,6 +137,89 @@ def is_incoming_duplicate(self, event, add=False): return True return False + +class ScanEgress(InterceptModule): + """ + This is always the last intercept module in the chain, responsible for executing and acting on the + `abort_if` and `on_success_callback` functions. + """ + + watched_events = ["*"] + # accept all events regardless of scope distance + scope_distance_modifier = None + _name = "_scan_egress" + + @property + def priority(self): + # we are the lowest priority + return 99 + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # track outgoing duplicates (for `accept_dupes` attribute of modules) + self.outgoing_dup_tracker = set() + + async def handle_event(self, event, kwargs): + abort_if = kwargs.pop("abort_if", None) + on_success_callback = kwargs.pop("on_success_callback", None) + + # make event internal if it's above our configured report distance + event_in_report_distance = event.scope_distance <= self.scan.scope_report_distance + event_will_be_output = event.always_emit or event_in_report_distance + if not event_will_be_output: + log.debug( + f"Making {event} internal because its scope_distance ({event.scope_distance}) > scope_report_distance ({self.scan.scope_report_distance})" + ) + event.internal = True + + # if we discovered something interesting from an internal event, + # make sure we preserve its chain of parents + source = event.source + if source.internal and ((not event.internal) or event._graph_important): + source_in_report_distance = source.scope_distance <= self.scan.scope_report_distance + if source_in_report_distance: + source.internal = False + if not source._graph_important: + source._graph_important = True + log.debug(f"Re-queuing internal event {source} with parent {event}") + await self.emit_event(source) + + abort_result = False + if callable(abort_if): + async with self.scan._acatch(context=abort_if): + abort_result = await self.scan.helpers.execute_sync_or_async(abort_if, event) + msg = f"{event.module}: not raising event {event} due to custom criteria in abort_if()" + with suppress(ValueError, TypeError): + abort_result, reason = abort_result + msg += f": {reason}" + if abort_result: + return False, msg + + # run success callback before distributing event (so it can add tags, etc.) + if callable(on_success_callback): + async with self.scan._acatch(context=on_success_callback): + await self.scan.helpers.execute_sync_or_async(on_success_callback, event) + + async def forward_event(self, event, kwargs): + """ + Queue event with modules + """ + is_outgoing_duplicate = self.is_outgoing_duplicate(event) + if is_outgoing_duplicate: + self.verbose(f"{event.module}: Duplicate event: {event}") + # absorb event into the word cloud if it's in scope + if not is_outgoing_duplicate and -1 < event.scope_distance < 1: + self.scan.word_cloud.absorb_event(event) + + for mod in self.scan.modules.values(): + # don't distribute events to intercept modules + if mod._intercept: + continue + acceptable_dup = (not is_outgoing_duplicate) or mod.accept_dupes + graph_important = mod._is_graph_important(event) + if acceptable_dup or graph_important: + await mod.queue_event(event) + def is_outgoing_duplicate(self, event, add=False): """ Calculate whether an event is a duplicate in the context of the whole scan, @@ -352,272 +232,3 @@ def is_outgoing_duplicate(self, event, add=False): if add: self.outgoing_dup_tracker.add(event_hash) return is_dup - - async def distribute_event(self, event): - """ - Queue event with modules - """ - async with self.scan._acatch(context=self.distribute_event): - # make event internal if it's above our configured report distance - event_in_report_distance = event.scope_distance <= self.scan.scope_report_distance - event_will_be_output = event.always_emit or event_in_report_distance - if not event_will_be_output: - log.debug( - f"Making {event} internal because its scope_distance ({event.scope_distance}) > scope_report_distance ({self.scan.scope_report_distance})" - ) - event.internal = True - - # if we discovered something interesting from an internal event, - # make sure we preserve its chain of parents - source = event.source - if source.internal and ((not event.internal) or event._graph_important): - source_in_report_distance = source.scope_distance <= self.scan.scope_report_distance - if source_in_report_distance: - source.internal = False - if not source._graph_important: - source._graph_important = True - log.debug(f"Re-queuing internal event {source} with parent {event}") - self.queue_event(source) - - is_outgoing_duplicate = self.is_outgoing_duplicate(event) - if is_outgoing_duplicate: - self.scan.verbose(f"{event.module}: Duplicate event: {event}") - # absorb event into the word cloud if it's in scope - if not is_outgoing_duplicate and -1 < event.scope_distance < 1: - self.scan.word_cloud.absorb_event(event) - for mod in self.scan.modules.values(): - acceptable_dup = (not is_outgoing_duplicate) or mod.accept_dupes - # graph_important = mod._type == "output" and event._graph_important == True - graph_important = mod._is_graph_important(event) - if acceptable_dup or graph_important: - await mod.queue_event(event) - - async def _worker_loop(self): - try: - while not self.scan.stopped: - try: - async with self._task_counter.count("get_event_from_modules()"): - event, kwargs = self.get_event_from_modules() - except asyncio.queues.QueueEmpty: - await asyncio.sleep(0.1) - continue - async with self._task_counter.count(f"emit_event({event})"): - emit_event_task = asyncio.create_task( - self.emit_event(event, **kwargs), name=f"emit_event({event})" - ) - await emit_event_task - - except Exception: - log.critical(traceback.format_exc()) - - def kill_module(self, module_name, message=None): - from signal import SIGINT - - module = self.scan.modules[module_name] - module.set_error_state(message=message, clear_outgoing_queue=True) - for proc in module._proc_tracker: - with suppress(Exception): - proc.send_signal(SIGINT) - self.scan.helpers.cancel_tasks_sync(module._tasks) - - @property - def modules_by_priority(self): - if not self._modules_by_priority: - self._modules_by_priority = sorted(list(self.scan.modules.values()), key=lambda m: m.priority) - return self._modules_by_priority - - @property - def incoming_queues(self): - if not self._incoming_queues: - queues_by_priority = [m.outgoing_event_queue for m in self.modules_by_priority] - self._incoming_queues = [self.incoming_event_queue] + queues_by_priority - return self._incoming_queues - - @property - def incoming_qsize(self): - incoming_events = 0 - for q in self.incoming_queues: - incoming_events += q.qsize() - return incoming_events - - @property - def module_priority_weights(self): - if not self._module_priority_weights: - # we subtract from six because lower priorities == higher weights - priorities = [5] + [6 - m.priority for m in self.modules_by_priority] - self._module_priority_weights = priorities - return self._module_priority_weights - - def get_event_from_modules(self): - for q in self.scan.helpers.weighted_shuffle(self.incoming_queues, self.module_priority_weights): - try: - return q.get_nowait() - except (asyncio.queues.QueueEmpty, AttributeError): - continue - raise asyncio.queues.QueueEmpty() - - @property - def queued_event_types(self): - event_types = {} - for q in self.incoming_queues: - for event, _ in q._queue: - event_type = getattr(event, "type", None) - if event_type is not None: - try: - event_types[event_type] += 1 - except KeyError: - event_types[event_type] = 1 - return event_types - - def queue_event(self, event, **kwargs): - if event: - # nerf event's priority if it's likely not to be in scope - if event.scope_distance > 0: - event_in_scope = self.scan.whitelisted(event) and not self.scan.blacklisted(event) - if not event_in_scope: - event.module_priority += event.scope_distance - # Wait for parent event to resolve (in case its scope distance changes) - # await resolved = event.source._resolved.wait() - # update event's scope distance based on its parent - event.scope_distance = event.source.scope_distance + 1 - self.incoming_event_queue.put_nowait((event, kwargs)) - - @property - def running(self): - active_tasks = self._task_counter.value - incoming_events = self.incoming_qsize - return active_tasks > 0 or incoming_events > 0 - - @property - def modules_finished(self): - finished_modules = [m.finished for m in self.scan.modules.values()] - return all(finished_modules) - - @property - def active(self): - return self.running or not self.modules_finished - - def modules_status(self, _log=False): - finished = True - status = {"modules": {}} - - for m in self.scan.modules.values(): - mod_status = m.status - if mod_status["running"]: - finished = False - status["modules"][m.name] = mod_status - - for mod in self.scan.modules.values(): - if mod.errored and mod.incoming_event_queue not in [None, False]: - with suppress(Exception): - mod.set_error_state() - - status["finished"] = finished - - modules_errored = [m for m, s in status["modules"].items() if s["errored"]] - - max_mem_percent = 90 - mem_status = self.scan.helpers.memory_status() - # abort if we don't have the memory - mem_percent = mem_status.percent - if mem_percent > max_mem_percent: - free_memory = mem_status.available - free_memory_human = self.scan.helpers.bytes_to_human(free_memory) - self.scan.warning(f"System memory is at {mem_percent:.1f}% ({free_memory_human} remaining)") - - if _log: - modules_status = [] - for m, s in status["modules"].items(): - running = s["running"] - incoming = s["events"]["incoming"] - outgoing = s["events"]["outgoing"] - tasks = s["tasks"] - total = sum([incoming, outgoing, tasks]) - if running or total > 0: - modules_status.append((m, running, incoming, outgoing, tasks, total)) - modules_status.sort(key=lambda x: x[-1], reverse=True) - - if modules_status: - modules_status_str = ", ".join([f"{m}({i:,}:{t:,}:{o:,})" for m, r, i, o, t, _ in modules_status]) - self.scan.info( - f"{self.scan.name}: Modules running (incoming:processing:outgoing) {modules_status_str}" - ) - else: - self.scan.info(f"{self.scan.name}: No modules running") - event_type_summary = sorted( - self.scan.stats.events_emitted_by_type.items(), key=lambda x: x[-1], reverse=True - ) - if event_type_summary: - self.scan.info( - f'{self.scan.name}: Events produced so far: {", ".join([f"{k}: {v}" for k,v in event_type_summary])}' - ) - else: - self.scan.info(f"{self.scan.name}: No events produced yet") - - if modules_errored: - self.scan.verbose( - f'{self.scan.name}: Modules errored: {len(modules_errored):,} ({", ".join([m for m in modules_errored])})' - ) - - queued_events_by_type = [(k, v) for k, v in self.queued_event_types.items() if v > 0] - if queued_events_by_type: - queued_events_by_type.sort(key=lambda x: x[-1], reverse=True) - queued_events_by_type_str = ", ".join(f"{m}: {t:,}" for m, t in queued_events_by_type) - num_queued_events = sum(v for k, v in queued_events_by_type) - self.scan.info( - f"{self.scan.name}: {num_queued_events:,} events in queue ({queued_events_by_type_str})" - ) - else: - self.scan.info(f"{self.scan.name}: No events in queue") - - if self.scan.log_level <= logging.DEBUG: - # status debugging - scan_active_status = [] - scan_active_status.append(f"scan._finished_init: {self.scan._finished_init}") - scan_active_status.append(f"manager.active: {self.active}") - scan_active_status.append(f" manager.running: {self.running}") - scan_active_status.append(f" manager._task_counter.value: {self._task_counter.value}") - scan_active_status.append(f" manager._task_counter.tasks:") - for task in list(self._task_counter.tasks.values()): - scan_active_status.append(f" - {task}:") - scan_active_status.append( - f" manager.incoming_event_queue.qsize: {self.incoming_event_queue.qsize()}" - ) - scan_active_status.append(f" manager.modules_finished: {self.modules_finished}") - for m in sorted(self.scan.modules.values(), key=lambda m: m.name): - running = m.running - scan_active_status.append(f" {m}.finished: {m.finished}") - scan_active_status.append(f" running: {running}") - if running: - scan_active_status.append(f" tasks:") - for task in list(m._task_counter.tasks.values()): - scan_active_status.append(f" - {task}:") - scan_active_status.append(f" incoming_queue_size: {m.num_incoming_events}") - scan_active_status.append(f" outgoing_queue_size: {m.outgoing_event_queue.qsize()}") - for line in scan_active_status: - self.scan.debug(line) - - # log module memory usage - module_memory_usage = [] - for module in self.scan.modules.values(): - memory_usage = module.memory_usage - module_memory_usage.append((module.name, memory_usage)) - module_memory_usage.sort(key=lambda x: x[-1], reverse=True) - self.scan.debug(f"MODULE MEMORY USAGE:") - for module_name, usage in module_memory_usage: - self.scan.debug(f" - {module_name}: {self.scan.helpers.bytes_to_human(usage)}") - - # Uncomment these lines to enable debugging of event queues - - # queued_events = self.incoming_event_queue.events - # if queued_events: - # queued_events_str = ", ".join(str(e) for e in queued_events) - # self.scan.verbose(f"Queued events: {queued_events_str}") - # queued_events_by_module = [(k, v) for k, v in self.incoming_event_queue.modules.items() if v > 0] - # queued_events_by_module.sort(key=lambda x: x[-1], reverse=True) - # queued_events_by_module_str = ", ".join(f"{m}: {t:,}" for m, t in queued_events_by_module) - # self.scan.verbose(f"{self.scan.name}: Queued events by module: {queued_events_by_module_str}") - - status.update({"modules_errored": len(modules_errored)}) - - return status diff --git a/bbot/scanner/preset/__init__.py b/bbot/scanner/preset/__init__.py new file mode 100644 index 000000000..a6fbc24bb --- /dev/null +++ b/bbot/scanner/preset/__init__.py @@ -0,0 +1 @@ +from .preset import Preset diff --git a/bbot/scanner/preset/args.py b/bbot/scanner/preset/args.py new file mode 100644 index 000000000..1a993e6ab --- /dev/null +++ b/bbot/scanner/preset/args.py @@ -0,0 +1,319 @@ +import re +import logging +import argparse +from omegaconf import OmegaConf + +from bbot.errors import * +from bbot.core.helpers.misc import chain_lists, get_closest_match + +log = logging.getLogger("bbot.presets.args") + + +class BBOTArgs: + + # module config options to exclude from validation + exclude_from_validation = re.compile(r".*modules\.[a-z0-9_]+\.(?:batch_size|max_event_handlers)$") + + scan_examples = [ + ( + "Subdomains", + "Perform a full subdomain enumeration on evilcorp.com", + "bbot -t evilcorp.com -p subdomain-enum", + ), + ( + "Subdomains (passive only)", + "Perform a passive-only subdomain enumeration on evilcorp.com", + "bbot -t evilcorp.com -p subdomain-enum -rf passive", + ), + ( + "Subdomains + port scan + web screenshots", + "Port-scan every subdomain, screenshot every webpage, output to current directory", + "bbot -t evilcorp.com -p subdomain-enum -m nmap gowitness -n my_scan -o .", + ), + ( + "Subdomains + basic web scan", + "A basic web scan includes wappalyzer, robots.txt, and other non-intrusive web modules", + "bbot -t evilcorp.com -p subdomain-enum web-basic", + ), + ( + "Web spider", + "Crawl www.evilcorp.com up to a max depth of 2, automatically extracting emails, secrets, etc.", + "bbot -t www.evilcorp.com -p spider -c web_spider_distance=2 web_spider_depth=2", + ), + ( + "Everything everywhere all at once", + "Subdomains, emails, cloud buckets, port scan, basic web, web screenshots, nuclei", + "bbot -t evilcorp.com -p kitchen-sink", + ), + ] + + usage_examples = [ + ( + "List modules", + "", + "bbot -l", + ), + ( + "List presets", + "", + "bbot -lp", + ), + ( + "List flags", + "", + "bbot -lf", + ), + ] + + epilog = "EXAMPLES\n" + for example in (scan_examples, usage_examples): + for title, description, command in example: + epilog += f"\n {title}:\n {command}\n" + + def __init__(self, preset): + self.preset = preset + self._config = None + + self.parser = self.create_parser() + self._parsed = None + + @property + def parsed(self): + if self._parsed is None: + self._parsed = self.parser.parse_args() + self.sanitize_args() + return self._parsed + + def preset_from_args(self): + # the order here is important + # first we make the preset + args_preset = self.preset.__class__( + *self.parsed.targets, + whitelist=self.parsed.whitelist, + blacklist=self.parsed.blacklist, + strict_scope=self.parsed.strict_scope, + name="args_preset", + ) + + # then we set verbosity levels (so if the user enables -d they can see debug output) + if self.parsed.silent: + args_preset.silent = True + if self.parsed.verbose: + args_preset.verbose = True + if self.parsed.debug: + args_preset.debug = True + + # then we load requested preset + # this is important so we can load custom module directories, pull in custom flags, module config options, etc. + for preset_arg in self.parsed.preset: + try: + args_preset.include_preset(preset_arg) + except BBOTArgumentError: + raise + except Exception as e: + raise BBOTArgumentError(f'Error parsing preset "{preset_arg}": {e}') + + # modules + flags + args_preset.exclude_modules.update(set(self.parsed.exclude_modules)) + args_preset.exclude_flags.update(set(self.parsed.exclude_flags)) + args_preset.require_flags.update(set(self.parsed.require_flags)) + args_preset.explicit_scan_modules.update(set(self.parsed.modules)) + args_preset.explicit_output_modules.update(set(self.parsed.output_modules)) + args_preset.flags.update(set(self.parsed.flags)) + + # dependencies + if self.parsed.retry_deps: + args_preset.core.custom_config["deps_behavior"] = "retry_failed" + elif self.parsed.force_deps: + args_preset.core.custom_config["deps_behavior"] = "force_install" + elif self.parsed.no_deps: + args_preset.core.custom_config["deps_behavior"] = "disable" + elif self.parsed.ignore_failed_deps: + args_preset.core.custom_config["deps_behavior"] = "ignore_failed" + + # other scan options + args_preset.scan_name = self.parsed.name + args_preset.output_dir = self.parsed.output_dir + args_preset.force_start = self.parsed.force + + # CLI config options (dot-syntax) + for config_arg in self.parsed.config: + try: + # if that fails, try to parse as key=value syntax + args_preset.core.merge_custom(OmegaConf.from_cli([config_arg])) + except Exception as e: + raise BBOTArgumentError(f'Error parsing command-line config option: "{config_arg}": {e}') + + return args_preset + + def create_parser(self, *args, **kwargs): + kwargs.update( + dict( + description="Bighuge BLS OSINT Tool", formatter_class=argparse.RawTextHelpFormatter, epilog=self.epilog + ) + ) + p = argparse.ArgumentParser(*args, **kwargs) + target = p.add_argument_group(title="Target") + target.add_argument( + "-t", "--targets", nargs="+", default=[], help="Targets to seed the scan", metavar="TARGET" + ) + target.add_argument( + "-w", + "--whitelist", + nargs="+", + default=[], + help="What's considered in-scope (by default it's the same as --targets)", + ) + target.add_argument("-b", "--blacklist", nargs="+", default=[], help="Don't touch these things") + target.add_argument( + "--strict-scope", + action="store_true", + help="Don't consider subdomains of target/whitelist to be in-scope", + ) + presets = p.add_argument_group(title="Presets") + presets.add_argument( + "-p", + "--preset", + nargs="*", + help="Enable BBOT preset(s)", + metavar="PRESET", + default=[], + ) + presets.add_argument( + "-c", + "--config", + nargs="*", + help="Custom config options in key=value format: e.g. 'modules.shodan.api_key=1234'", + metavar="CONFIG", + default=[], + ) + presets.add_argument("-lp", "--list-presets", action="store_true", help=f"List available presets.") + modules = p.add_argument_group(title="Modules") + modules.add_argument( + "-m", + "--modules", + nargs="+", + default=[], + help=f'Modules to enable. Choices: {",".join(self.preset.module_loader.scan_module_choices)}', + metavar="MODULE", + ) + modules.add_argument("-l", "--list-modules", action="store_true", help=f"List available modules.") + modules.add_argument( + "-lmo", "--list-module-options", action="store_true", help="Show all module config options" + ) + modules.add_argument( + "-em", "--exclude-modules", nargs="+", default=[], help=f"Exclude these modules.", metavar="MODULE" + ) + modules.add_argument( + "-om", + "--output-modules", + nargs="+", + default=[], + help=f'Output module(s). Choices: {",".join(self.preset.module_loader.output_module_choices)}', + metavar="MODULE", + ) + modules.add_argument( + "-f", + "--flags", + nargs="+", + default=[], + help=f'Enable modules by flag. Choices: {",".join(self.preset.module_loader.flag_choices)}', + metavar="FLAG", + ) + modules.add_argument("-lf", "--list-flags", action="store_true", help=f"List available flags.") + modules.add_argument( + "-rf", + "--require-flags", + nargs="+", + default=[], + help=f"Only enable modules with these flags (e.g. -rf passive)", + metavar="FLAG", + ) + modules.add_argument( + "-ef", + "--exclude-flags", + nargs="+", + default=[], + help=f"Disable modules with these flags. (e.g. -ef aggressive)", + metavar="FLAG", + ) + modules.add_argument("--allow-deadly", action="store_true", help="Enable the use of highly aggressive modules") + scan = p.add_argument_group(title="Scan") + scan.add_argument("-n", "--name", help="Name of scan (default: random)", metavar="SCAN_NAME") + scan.add_argument( + "-o", + "--output-dir", + metavar="DIR", + ) + scan.add_argument("-v", "--verbose", action="store_true", help="Be more verbose") + scan.add_argument("-d", "--debug", action="store_true", help="Enable debugging") + scan.add_argument("-s", "--silent", action="store_true", help="Be quiet") + scan.add_argument( + "--force", + action="store_true", + help="Run scan even in the case of condition violations or failed module setups", + ) + scan.add_argument("-y", "--yes", action="store_true", help="Skip scan confirmation prompt") + scan.add_argument("--dry-run", action="store_true", help=f"Abort before executing scan") + scan.add_argument( + "--current-preset", + action="store_true", + help="Show the current preset in YAML format", + ) + scan.add_argument( + "--current-preset-full", + action="store_true", + help="Show the current preset in its full form, including defaults", + ) + deps = p.add_argument_group( + title="Module dependencies", description="Control how modules install their dependencies" + ) + g2 = deps.add_mutually_exclusive_group() + g2.add_argument("--no-deps", action="store_true", help="Don't install module dependencies") + g2.add_argument("--force-deps", action="store_true", help="Force install all module dependencies") + g2.add_argument("--retry-deps", action="store_true", help="Try again to install failed module dependencies") + g2.add_argument( + "--ignore-failed-deps", action="store_true", help="Run modules even if they have failed dependencies" + ) + g2.add_argument("--install-all-deps", action="store_true", help="Install dependencies for all modules") + misc = p.add_argument_group(title="Misc") + misc.add_argument("--version", action="store_true", help="show BBOT version and exit") + return p + + def sanitize_args(self): + # silent implies -y + if self.parsed.silent: + self.parsed.yes = True + # chain_lists allows either comma-separated or space-separated lists + self.parsed.modules = chain_lists(self.parsed.modules) + self.parsed.exclude_modules = chain_lists(self.parsed.exclude_modules) + self.parsed.output_modules = chain_lists(self.parsed.output_modules) + self.parsed.targets = chain_lists( + self.parsed.targets, try_files=True, msg="Reading targets from file: {filename}" + ) + self.parsed.whitelist = chain_lists( + self.parsed.whitelist, try_files=True, msg="Reading whitelist from file: {filename}" + ) + self.parsed.blacklist = chain_lists( + self.parsed.blacklist, try_files=True, msg="Reading blacklist from file: {filename}" + ) + self.parsed.flags = chain_lists(self.parsed.flags) + self.parsed.exclude_flags = chain_lists(self.parsed.exclude_flags) + self.parsed.require_flags = chain_lists(self.parsed.require_flags) + + def validate(self): + # validate config options + sentinel = object() + all_options = set(self.preset.core.default_config.keys()) - {"modules"} + for module_options in self.preset.module_loader.modules_options().values(): + all_options.update(set(o[0] for o in module_options)) + for c in self.parsed.config: + c = c.split("=")[0].strip() + v = OmegaConf.select(self.preset.core.default_config, c, default=sentinel) + # if option isn't in the default config + if v is sentinel: + # skip if it's excluded from validation + if self.exclude_from_validation.match(c): + continue + # otherwise, ensure it exists as a module option + raise ValidationError(get_closest_match(c, all_options, msg="module option")) diff --git a/bbot/scanner/preset/conditions.py b/bbot/scanner/preset/conditions.py new file mode 100644 index 000000000..261a5c76e --- /dev/null +++ b/bbot/scanner/preset/conditions.py @@ -0,0 +1,54 @@ +import logging + +from bbot.errors import * + +log = logging.getLogger("bbot.preset.conditions") + +JINJA_ENV = None + + +class ConditionEvaluator: + def __init__(self, preset): + self.preset = preset + + @property + def context(self): + return { + "preset": self.preset, + "config": self.preset.config, + "abort": self.abort, + "warn": self.warn, + } + + def abort(self, message): + if not self.preset.force_start: + raise PresetAbortError(message) + + def warn(self, message): + log.warning(message) + + def evaluate(self): + context = self.context + already_evaluated = set() + for preset_name, condition in self.preset.conditions: + condition_str = str(condition) + if condition_str not in already_evaluated: + already_evaluated.add(condition_str) + try: + self.check_condition(condition_str, context) + except PresetAbortError as e: + raise PresetAbortError(f'Preset "{preset_name}" requested abort: {e} (--force to override)') + + @property + def jinja_env(self): + from jinja2.sandbox import SandboxedEnvironment + + global JINJA_ENV + if JINJA_ENV is None: + JINJA_ENV = SandboxedEnvironment() + return JINJA_ENV + + def check_condition(self, condition_str, context): + log.debug(f'Evaluating condition "{repr(condition_str)}"') + template = self.jinja_env.from_string(condition_str) + template.render(context) diff --git a/bbot/scanner/preset/environ.py b/bbot/scanner/preset/environ.py new file mode 100644 index 000000000..9bf2ad49b --- /dev/null +++ b/bbot/scanner/preset/environ.py @@ -0,0 +1,134 @@ +import os +import sys +import omegaconf +from pathlib import Path + +from bbot.core.helpers.misc import cpu_architecture, os_platform, os_platform_friendly + + +def increase_limit(new_limit): + try: + import resource + + # Get current limit + soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE) + + new_limit = min(new_limit, hard_limit) + + # Attempt to set new limit + resource.setrlimit(resource.RLIMIT_NOFILE, (new_limit, hard_limit)) + except Exception as e: + sys.stderr.write(f"Failed to set new ulimit: {e}\n") + + +increase_limit(65535) + + +# Custom custom omegaconf resolver to get environment variables +def env_resolver(env_name, default=None): + return os.getenv(env_name, default) + + +# Register the new resolver +# this allows you to substitute environment variables in your config like "${env:PATH}"" +omegaconf.OmegaConf.register_new_resolver("env", env_resolver) + + +class BBOTEnviron: + + def __init__(self, preset): + self.preset = preset + + def flatten_config(self, config, base="bbot"): + """ + Flatten a JSON-like config into a list of environment variables: + {"modules": [{"httpx": {"timeout": 5}}]} --> "BBOT_MODULES_HTTPX_TIMEOUT=5" + """ + if type(config) == omegaconf.dictconfig.DictConfig: + for k, v in config.items(): + new_base = f"{base}_{k}" + if type(v) == omegaconf.dictconfig.DictConfig: + yield from self.flatten_config(v, base=new_base) + elif type(v) != omegaconf.listconfig.ListConfig: + yield (new_base.upper(), str(v)) + + def add_to_path(self, v, k="PATH", environ=None): + """ + Add an entry to a colon-separated PATH variable. + If it's already contained in the value, shift it to be in first position. + """ + if environ is None: + environ = os.environ + var_list = os.environ.get(k, "").split(":") + deduped_var_list = [] + for _ in var_list: + if _ != v and _ not in deduped_var_list: + deduped_var_list.append(_) + deduped_var_list = [v] + deduped_var_list + new_var_str = ":".join(deduped_var_list) + environ[k] = new_var_str + + def prepare(self): + """ + Sync config to OS environment variables + """ + environ = dict(os.environ) + + # if we're running in a virtual environment, make sure to include its /bin in PATH + if sys.prefix != sys.base_prefix: + bin_dir = str(Path(sys.prefix) / "bin") + self.add_to_path(bin_dir, environ=environ) + + # add ~/.local/bin to PATH + local_bin_dir = str(Path.home() / ".local" / "bin") + self.add_to_path(local_bin_dir, environ=environ) + + # ensure bbot_tools + environ["BBOT_TOOLS"] = str(self.preset.core.tools_dir) + self.add_to_path(str(self.preset.core.tools_dir), environ=environ) + # ensure bbot_cache + environ["BBOT_CACHE"] = str(self.preset.core.cache_dir) + # ensure bbot_temp + environ["BBOT_TEMP"] = str(self.preset.core.temp_dir) + # ensure bbot_lib + environ["BBOT_LIB"] = str(self.preset.core.lib_dir) + # export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:~/.bbot/lib/ + self.add_to_path(str(self.preset.core.lib_dir), k="LD_LIBRARY_PATH", environ=environ) + + # platform variables + environ["BBOT_OS_PLATFORM"] = os_platform() + environ["BBOT_OS"] = os_platform_friendly() + environ["BBOT_CPU_ARCH"] = cpu_architecture() + + # copy config to environment + bbot_environ = self.flatten_config(self.preset.config) + environ.update(bbot_environ) + + # handle HTTP proxy + http_proxy = self.preset.config.get("http_proxy", "") + if http_proxy: + environ["HTTP_PROXY"] = http_proxy + environ["HTTPS_PROXY"] = http_proxy + else: + environ.pop("HTTP_PROXY", None) + environ.pop("HTTPS_PROXY", None) + + # ssl verification + import urllib3 + + urllib3.disable_warnings() + ssl_verify = self.preset.config.get("ssl_verify", False) + if not ssl_verify: + import requests + import functools + + requests.adapters.BaseAdapter.send = functools.partialmethod( + requests.adapters.BaseAdapter.send, verify=False + ) + requests.adapters.HTTPAdapter.send = functools.partialmethod( + requests.adapters.HTTPAdapter.send, verify=False + ) + requests.Session.request = functools.partialmethod(requests.Session.request, verify=False) + requests.request = functools.partial(requests.request, verify=False) + + return environ diff --git a/bbot/scanner/preset/path.py b/bbot/scanner/preset/path.py new file mode 100644 index 000000000..ee5235fbf --- /dev/null +++ b/bbot/scanner/preset/path.py @@ -0,0 +1,65 @@ +import logging +from pathlib import Path + +from bbot.errors import * + +log = logging.getLogger("bbot.presets.path") + +DEFAULT_PRESET_PATH = Path(__file__).parent.parent.parent / "presets" + + +class PresetPath: + """ + Keeps track of where to look for preset .yaml files + """ + + def __init__(self): + self.paths = [DEFAULT_PRESET_PATH] + + def find(self, filename): + filename_path = Path(filename).resolve() + extension = filename_path.suffix.lower() + file_candidates = set() + extension_candidates = {".yaml", ".yml"} + if extension: + extension_candidates.add(extension.lower()) + else: + file_candidates.add(filename_path.stem) + for ext in extension_candidates: + file_candidates.add(f"{filename_path.stem}{ext}") + file_candidates = sorted(file_candidates) + file_candidates_str = ",".join([str(s) for s in file_candidates]) + paths_to_search = self.paths + if "/" in str(filename): + if filename_path.parent not in paths_to_search: + paths_to_search.append(filename_path.parent) + log.debug(f"Searching for preset in {paths_to_search}, file candidates: {file_candidates_str}") + for path in paths_to_search: + for candidate in file_candidates: + for file in path.rglob(candidate): + log.verbose(f'Found preset matching "{filename}" at {file}') + self.add_path(file.parent) + return file.resolve() + raise ValidationError( + f'Could not find preset at "{filename}" - file does not exist. Use -lp to list available presets' + ) + + def __str__(self): + return ":".join([str(s) for s in self.paths]) + + def add_path(self, path): + path = Path(path).resolve() + if path in self.paths: + return + if any(path.is_relative_to(p) for p in self.paths): + return + if not path.is_dir(): + log.debug(f'Path "{path.resolve()}" is not a directory') + return + self.paths.append(path) + + def __iter__(self): + yield from self.paths + + +PRESET_PATH = PresetPath() diff --git a/bbot/scanner/preset/preset.py b/bbot/scanner/preset/preset.py new file mode 100644 index 000000000..57eb11a1a --- /dev/null +++ b/bbot/scanner/preset/preset.py @@ -0,0 +1,930 @@ +import os +import yaml +import logging +import omegaconf +import traceback +from copy import copy +from pathlib import Path +from contextlib import suppress + +from .path import PRESET_PATH + +from bbot.errors import * +from bbot.core import CORE +from bbot.core.event.base import make_event +from bbot.core.helpers.misc import make_table, mkdir, get_closest_match + + +log = logging.getLogger("bbot.presets") + + +# cache default presets to prevent having to reload from disk +DEFAULT_PRESETS = None + + +class Preset: + """ + A preset is the central config for a BBOT scan. It contains everything a scan needs to run -- + targets, modules, flags, config options like API keys, etc. + + You can create a preset manually and pass it into `Scanner(preset=preset)`. + Or, you can pass `Preset`'s kwargs into `Scanner()` and it will create the preset for you implicitly. + + Presets can include other presets (which can in turn include other presets, and so on). + This works by merging each preset in turn using `Preset.merge()`. + The order matters. In case of a conflict, the last preset to be merged wins priority. + + Presets can be loaded from or saved to YAML. BBOT has a number of ready-made presets for common tasks like + subdomain enumeration, web spidering, dirbusting, etc. + + Presets are highly customizable via `conditions`, which use the Jinja2 templating engine. + Using `conditions`, you can define custom logic to inspect the final preset before the scan starts, and change it if need be. + Based on the state of the preset, you can print a warning message, abort the scan, enable/disable modules, etc.. + + Attributes: + target (Target): Target(s) of scan. + whitelist (Target): Scan whitelist (by default this is the same as `target`). + blacklist (Target): Scan blacklist (this takes ultimate precedence). + strict_scope (bool): If True, subdomains of targets are not considered to be in-scope. + helpers (ConfigAwareHelper): Helper containing various reusable functions, regexes, etc. + output_dir (pathlib.Path): Output directory for scan. + scan_name (str): Name of scan. Defaults to random value, e.g. "demonic_jimmy". + name (str): Human-friendly name of preset. Used mainly for logging purposes. + description (str): Description of preset. + modules (set): Combined modules to enable for the scan. Includes scan modules, internal modules, and output modules. + scan_modules (set): Modules to enable for the scan. + output_modules (set): Output modules to enable for the scan. (note: if no output modules are specified, this is not populated until .bake()) + internal_modules (set): Internal modules for the scan. (note: not populated until .bake()) + exclude_modules (set): Modules to exclude from the scan. When set, automatically removes excluded modules. + flags (set): Flags to enable for the scan. When set, automatically enables modules. + require_flags (set): Require modules to have these flags. When set, automatically removes offending modules. + exclude_flags (set): Exclude modules that have any of these flags. When set, automatically removes offending modules. + module_dirs (set): Custom directories from which to load modules (alias to `self.module_loader.module_dirs`). When set, automatically preloads contained modules. + config (omegaconf.dictconfig.DictConfig): BBOT config (alias to `core.config`) + core (BBOTCore): Local copy of BBOTCore object. + verbose (bool): Whether log level is currently set to verbose. When set, updates log level for all BBOT log handlers. + debug (bool): Whether log level is currently set to debug. When set, updates log level for all BBOT log handlers. + silent (bool): Whether logging is currently disabled. When set to True, silences all stderr. + + Examples: + >>> preset = Preset( + "evilcorp.com", + "1.2.3.0/24", + flags=["subdomain-enum"], + modules=["nuclei"], + config={"http_proxy": "http://127.0.0.1"} + ) + >>> scan = Scanner(preset=preset) + + >>> preset = Preset.from_yaml_file("my_preset.yml") + >>> scan = Scanner(preset=preset) + """ + + def __init__( + self, + *targets, + whitelist=None, + blacklist=None, + strict_scope=False, + modules=None, + output_modules=None, + exclude_modules=None, + flags=None, + require_flags=None, + exclude_flags=None, + config=None, + module_dirs=None, + include=None, + output_dir=None, + scan_name=None, + name=None, + description=None, + conditions=None, + force_start=False, + verbose=False, + debug=False, + silent=False, + _exclude=None, + _log=True, + ): + """ + Initializes the Preset class. + + Args: + *targets (str): Target(s) to scan. Types supported: hostnames, IPs, CIDRs, emails, open ports. + whitelist (list, optional): Whitelisted target(s) to scan. Defaults to the same as `targets`. + blacklist (list, optional): Blacklisted target(s). Takes ultimate precedence. Defaults to empty. + strict_scope (bool, optional): If True, subdomains of targets are not in-scope. + modules (list[str], optional): List of scan modules to enable for the scan. Defaults to empty list. + output_modules (list[str], optional): List of output modules to use. Defaults to csv, human, and json. + exclude_modules (list[str], optional): List of modules to exclude from the scan. + require_flags (list[str], optional): Only enable modules if they have these flags. + exclude_flags (list[str], optional): Don't enable modules if they have any of these flags. + module_dirs (list[str], optional): additional directories to load modules from. + config (dict, optional): Additional scan configuration settings. + include (list[str], optional): names or filenames of other presets to include. + output_dir (str or Path, optional): Directory to store scan output. Defaults to BBOT home directory (`~/.bbot`). + scan_name (str, optional): Human-readable name of the scan. If not specified, it will be random, e.g. "demonic_jimmy". + name (str, optional): Human-readable name of the preset. Used mainly for logging. + description (str, optional): Description of the preset. + conditions (list[str], optional): Custom conditions to be executed before scan start. Written in Jinja2. + force_start (bool, optional): If True, ignore conditional aborts and failed module setups. Just run the scan! + verbose (bool, optional): Set the BBOT logger to verbose mode. + debug (bool, optional): Set the BBOT logger to debug mode. + silent (bool, optional): Silence all stderr (effectively disables the BBOT logger). + _exclude (list[Path], optional): Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets. + _log (bool, optional): Whether to enable logging for the preset. This will record which modules/flags are enabled, etc. + """ + # internal variables + self._cli = False + self._log = _log + self.scan = None + self._args = None + self._environ = None + self._helpers = None + self._module_loader = None + self._yaml_str = "" + self._verbose = False + self._debug = False + self._silent = False + + # modules / flags + self.modules = set() + self.exclude_modules = set() + self.flags = set() + self.exclude_flags = set() + self.require_flags = set() + + # modules + flags + if modules is None: + modules = [] + if isinstance(modules, str): + modules = [modules] + if output_modules is None: + output_modules = [] + if isinstance(output_modules, str): + output_modules = [output_modules] + if exclude_modules is None: + exclude_modules = [] + if isinstance(exclude_modules, str): + exclude_modules = [exclude_modules] + if flags is None: + flags = [] + if isinstance(flags, str): + flags = [flags] + if exclude_flags is None: + exclude_flags = [] + if isinstance(exclude_flags, str): + exclude_flags = [exclude_flags] + if require_flags is None: + require_flags = [] + if isinstance(require_flags, str): + require_flags = [require_flags] + + # these are used only for preserving the modules as specified in the original preset + # this is to ensure the preset looks the same when reserialized + self.explicit_scan_modules = set() if modules is None else set(modules) + self.explicit_output_modules = set() if output_modules is None else set(output_modules) + + # whether to force-start the scan (ignoring conditional aborts and failed module setups) + self.force_start = force_start + + # scan output directory + self.output_dir = output_dir + # name of scan + self.scan_name = scan_name + + # name of preset, default blank + self.name = name or "" + # preset description, default blank + self.description = description or "" + + # custom conditions, evaluated during .bake() + self.conditions = [] + if conditions is not None: + for condition in conditions: + self.conditions.append((self.name, condition)) + + # keeps track of loaded preset files to prevent infinite circular inclusions + self._preset_files_loaded = set() + if _exclude is not None: + for _filename in _exclude: + self._preset_files_loaded.add(Path(_filename).resolve()) + + # bbot core config + self.core = CORE.copy() + if config is None: + config = omegaconf.OmegaConf.create({}) + # merge custom configs if specified by the user + self.core.merge_custom(config) + + # log verbosity + # setting these automatically sets the log level for all log handlers. + if verbose: + self.verbose = verbose + if debug: + self.debug = debug + if silent: + self.silent = silent + + # custom module directories + self._module_dirs = set() + self.module_dirs = module_dirs + + self.strict_scope = strict_scope + + # target / whitelist / blacklist + from bbot.scanner.target import Target + + self.target = Target(*targets, strict_scope=self.strict_scope) + if not whitelist: + self.whitelist = self.target.copy() + else: + self.whitelist = Target(*whitelist, strict_scope=self.strict_scope) + if not blacklist: + blacklist = [] + self.blacklist = Target(*blacklist) + + # include other presets + if include and not isinstance(include, (list, tuple, set)): + include = [include] + if include: + for included_preset in include: + self.include_preset(included_preset) + + # we don't fill self.modules yet (that happens in .bake()) + self.explicit_scan_modules.update(set(modules)) + self.explicit_output_modules.update(set(output_modules)) + self.exclude_modules.update(set(exclude_modules)) + self.flags.update(set(flags)) + self.exclude_flags.update(set(exclude_flags)) + self.require_flags.update(set(require_flags)) + + @property + def bbot_home(self): + return Path(self.config.get("home", "~/.bbot")).expanduser().resolve() + + @property + def preset_dir(self): + return self.bbot_home / "presets" + + def merge(self, other): + """ + Merge another preset into this one. + + If there are any config conflicts, `other` will win over `self`. + + Args: + other (Preset): The preset to merge into this one. + + Examples: + >>> preset1 = Preset(modules=["nmap"]) + >>> preset1.scan_modules + ['nmap'] + >>> preset2 = Preset(modules=["sslcert"]) + >>> preset2.scan_modules + ['sslcert'] + >>> preset1.merge(preset2) + >>> preset1.scan_modules + ['nmap', 'sslcert'] + """ + self.log_debug(f'Merging preset "{other.name}" into "{self.name}"') + # config + self.core.merge_custom(other.core.custom_config) + self.module_loader.core = self.core + # module dirs + # modules + flags + # establish requirements / exclusions first + self.exclude_modules.update(other.exclude_modules) + self.require_flags.update(other.require_flags) + self.exclude_flags.update(other.exclude_flags) + # then it's okay to start enabling modules + self.explicit_scan_modules.update(other.explicit_scan_modules) + self.explicit_output_modules.update(other.explicit_output_modules) + self.flags.update(other.flags) + # scope + self.target.add_target(other.target) + self.whitelist.add_target(other.whitelist) + self.blacklist.add_target(other.blacklist) + self.strict_scope = self.strict_scope or other.strict_scope + for t in (self.target, self.whitelist): + t.strict_scope = self.strict_scope + # log verbosity + if other.silent: + self.silent = other.silent + if other.verbose: + self.verbose = other.verbose + if other.debug: + self.debug = other.debug + # scan name + if other.scan_name is not None: + self.scan_name = other.scan_name + if other.output_dir is not None: + self.output_dir = other.output_dir + # conditions + if other.conditions: + self.conditions.extend(other.conditions) + # misc + self.force_start = self.force_start | other.force_start + + def bake(self): + """ + Return a "baked" copy of this preset, ready for use by a BBOT scan. + + Baking a preset finalizes it by populating `preset.modules` based on flags, + performing final validations, and substituting environment variables in preloaded modules. + It also evaluates custom `conditions` as specified in the preset. + + This function is automatically called in Scanner.__init__(). There is no need to call it manually. + """ + self.log_debug("Getting baked") + # create a copy of self + baked_preset = copy(self) + # copy core + baked_preset.core = self.core.copy() + # copy module loader + baked_preset._module_loader = self.module_loader.copy() + # prepare os environment + os_environ = baked_preset.environ.prepare() + # find and replace preloaded modules with os environ + # this is different from the config variable substitution because it modifies + # the preloaded modules, i.e. their ansible playbooks + baked_preset.module_loader.find_and_replace(**os_environ) + # update os environ + os.environ.clear() + os.environ.update(os_environ) + + # validate flags, config options + baked_preset.validate() + + # now that our requirements / exclusions are validated, we can start enabling modules + # enable scan modules + for module in baked_preset.explicit_scan_modules: + baked_preset.add_module(module, module_type="scan") + # enable output modules + for module in baked_preset.explicit_output_modules: + baked_preset.add_module(module, module_type="output", raise_error=False) + + # enable internal modules + for internal_module, preloaded in baked_preset.module_loader.preloaded(type="internal").items(): + is_enabled = baked_preset.config.get(internal_module, True) + is_excluded = internal_module in baked_preset.exclude_modules + if is_enabled and not is_excluded: + baked_preset.add_module(internal_module, module_type="internal", raise_error=False) + + # disable internal modules if requested + for internal_module in baked_preset.internal_modules: + if baked_preset.config.get(internal_module, True) == False: + baked_preset.exclude_modules.add(internal_module) + + # enable modules by flag + for flag in baked_preset.flags: + for module, preloaded in baked_preset.module_loader.preloaded().items(): + module_flags = preloaded.get("flags", []) + module_type = preloaded.get("type", "scan") + if flag in module_flags: + self.log_debug(f'Enabling module "{module}" because it has flag "{flag}"') + baked_preset.add_module(module, module_type, raise_error=False) + + # ensure we have output modules + if not baked_preset.output_modules: + for output_module in ("python", "csv", "human", "json"): + baked_preset.add_module(output_module, module_type="output", raise_error=False) + + # evaluate conditions + if baked_preset.conditions: + from .conditions import ConditionEvaluator + + evaluator = ConditionEvaluator(baked_preset) + evaluator.evaluate() + + return baked_preset + + def parse_args(self): + """ + Parse CLI arguments, and merge them into this preset. + + Used in `cli.py`. + """ + self._cli = True + self.merge(self.args.preset_from_args()) + + @property + def module_dirs(self): + return self.module_loader.module_dirs + + @module_dirs.setter + def module_dirs(self, module_dirs): + if module_dirs: + if isinstance(module_dirs, str): + module_dirs = [module_dirs] + for m in module_dirs: + self.module_loader.add_module_dir(m) + self._module_dirs.add(m) + + @property + def scan_modules(self): + return [m for m in self.modules if self.preloaded_module(m).get("type", "scan") == "scan"] + + @property + def output_modules(self): + return [m for m in self.modules if self.preloaded_module(m).get("type", "scan") == "output"] + + @property + def internal_modules(self): + return [m for m in self.modules if self.preloaded_module(m).get("type", "scan") == "internal"] + + def add_module(self, module_name, module_type="scan", raise_error=True): + self.log_debug(f'Adding module "{module_name}" of type "{module_type}"') + is_valid, reason, preloaded = self._is_valid_module(module_name, module_type, raise_error=raise_error) + if not is_valid: + self.log_debug(f'Unable to add {module_type} module "{module_name}": {reason}') + return + self.modules.add(module_name) + for module_dep in preloaded.get("deps", {}).get("modules", []): + if module_dep != module_name and module_dep not in self.modules: + self.log_verbose(f'Adding module "{module_dep}" because {module_name} depends on it') + self.add_module(module_dep, raise_error=False) + + def preloaded_module(self, module): + return self.module_loader.preloaded()[module] + + @property + def config(self): + return self.core.config + + @property + def verbose(self): + return self._verbose + + @verbose.setter + def verbose(self, value): + if value: + self._debug = False + self._silent = False + self.core.logger.log_level = "VERBOSE" + else: + with suppress(omegaconf.errors.ConfigKeyError): + del self.core.custom_config["verbose"] + self.core.logger.log_level = "INFO" + self._verbose = value + + @property + def debug(self): + return self._debug + + @debug.setter + def debug(self, value): + if value: + self._verbose = False + self._silent = False + self.core.logger.log_level = "DEBUG" + else: + with suppress(omegaconf.errors.ConfigKeyError): + del self.core.custom_config["debug"] + self.core.logger.log_level = "INFO" + self._debug = value + + @property + def silent(self): + return self._silent + + @silent.setter + def silent(self, value): + if value: + self._verbose = False + self._debug = False + self.core.logger.log_level = "CRITICAL" + else: + with suppress(omegaconf.errors.ConfigKeyError): + del self.core.custom_config["silent"] + self.core.logger.log_level = "INFO" + self._silent = value + + @property + def helpers(self): + if self._helpers is None: + from bbot.core.helpers.helper import ConfigAwareHelper + + self._helpers = ConfigAwareHelper(preset=self) + return self._helpers + + @property + def module_loader(self): + self.environ + if self._module_loader is None: + from bbot.core.modules import MODULE_LOADER + + self._module_loader = MODULE_LOADER + + return self._module_loader + + @property + def environ(self): + if self._environ is None: + from .environ import BBOTEnviron + + self._environ = BBOTEnviron(self) + return self._environ + + @property + def args(self): + if self._args is None: + from .args import BBOTArgs + + self._args = BBOTArgs(self) + return self._args + + def in_scope(self, host): + """ + Check whether a hostname, url, IP, etc. is in scope. + Accepts either events or string data. + + Checks whitelist and blacklist. + If `host` is an event and its scope distance is zero, it will automatically be considered in-scope. + + Examples: + Check if a URL is in scope: + >>> preset.in_scope("http://www.evilcorp.com") + True + """ + try: + e = make_event(host, dummy=True) + except ValidationError: + return False + in_scope = e.scope_distance == 0 or self.whitelisted(e) + return in_scope and not self.blacklisted(e) + + def blacklisted(self, host): + """ + Check whether a hostname, url, IP, etc. is blacklisted. + + Note that `host` can be a hostname, IP address, CIDR, email address, or any BBOT `Event` with the `host` attribute. + + Args: + host (str or IPAddress or Event): The host to check against the blacklist + + Examples: + Check if a URL's host is blacklisted: + >>> preset.blacklisted("http://www.evilcorp.com") + True + """ + e = make_event(host, dummy=True) + return e in self.blacklist + + def whitelisted(self, host): + """ + Check whether a hostname, url, IP, etc. is whitelisted. + + Note that `host` can be a hostname, IP address, CIDR, email address, or any BBOT `Event` with the `host` attribute. + + Args: + host (str or IPAddress or Event): The host to check against the whitelist + + Examples: + Check if a URL's host is whitelisted: + >>> preset.whitelisted("http://www.evilcorp.com") + True + """ + e = make_event(host, dummy=True) + return e in self.whitelist + + @classmethod + def from_dict(cls, preset_dict, name=None, _exclude=None, _log=False): + """ + Create a preset from a Python dictionary object. + + Args: + preset_dict (dict): Preset in dictionary form + name (str, optional): Name of preset + _exclude (list[Path], optional): Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets. + _log (bool, optional): Whether to enable logging for the preset. This will record which modules/flags are enabled, etc. + + Returns: + Preset: The loaded preset + + Examples: + >>> preset = Preset.from_dict({"target": "evilcorp.com", "modules": ["nmap}]}) + """ + new_preset = cls( + *preset_dict.get("target", []), + whitelist=preset_dict.get("whitelist"), + blacklist=preset_dict.get("blacklist"), + modules=preset_dict.get("modules"), + output_modules=preset_dict.get("output_modules"), + exclude_modules=preset_dict.get("exclude_modules"), + flags=preset_dict.get("flags"), + require_flags=preset_dict.get("require_flags"), + exclude_flags=preset_dict.get("exclude_flags"), + verbose=preset_dict.get("verbose", False), + debug=preset_dict.get("debug", False), + silent=preset_dict.get("silent", False), + config=preset_dict.get("config"), + strict_scope=preset_dict.get("strict_scope", False), + module_dirs=preset_dict.get("module_dirs", []), + include=list(preset_dict.get("include", [])), + scan_name=preset_dict.get("scan_name"), + output_dir=preset_dict.get("output_dir"), + name=preset_dict.get("name", name), + description=preset_dict.get("description"), + conditions=preset_dict.get("conditions", []), + _exclude=_exclude, + _log=_log, + ) + return new_preset + + def include_preset(self, filename): + """ + Load a preset from a yaml file and merge it into this one + + Args: + filename (Path): The preset YAML file to merge + """ + self.log_debug(f'Including preset "{filename}"') + preset_filename = PRESET_PATH.find(filename) + preset_from_yaml = self.from_yaml_file(preset_filename, _exclude=self._preset_files_loaded) + if preset_from_yaml is not False: + self.merge(preset_from_yaml) + self._preset_files_loaded.add(preset_filename) + + @classmethod + def from_yaml_file(cls, filename, _exclude=None, _log=False): + """ + Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it. + + The file extension is optional. + """ + filename = Path(filename).resolve() + if _exclude is None: + _exclude = set() + if _exclude is not None and filename in _exclude: + log.debug(f"Not loading {filename} because it was already loaded {_exclude}") + return False + log.debug(f"Loading {filename} because it's not in excluded list ({_exclude})") + _exclude = set(_exclude) + _exclude.add(filename) + try: + yaml_str = open(filename).read() + except FileNotFoundError: + raise PresetNotFoundError(f'Could not find preset at "{filename}" - file does not exist') + preset = cls.from_dict(omegaconf.OmegaConf.create(yaml_str), name=filename.stem, _exclude=_exclude, _log=_log) + preset._yaml_str = yaml_str + return preset + + @classmethod + def from_yaml_string(cls, yaml_preset): + return cls.from_dict(omegaconf.OmegaConf.create(yaml_preset)) + + def to_dict(self, include_target=False, full_config=False): + """ + Convert this preset into a Python dictionary. + + Args: + include_target (bool, optional): If True, include target, whitelist, and blacklist in the dictionary + full_config (bool, optional): If True, include the entire config, not just what's changed from the defaults. + + Returns: + dict: The preset in dictionary form + + Examples: + >>> preset = Preset(flags=["subdomain-enum"], modules=["nmap"]) + >>> preset.to_dict() + {"flags": ["subdomain-enum"], "modules": ["nmap"]} + """ + preset_dict = {} + + # config + if full_config: + config = self.core.config + else: + config = self.core.custom_config + config = omegaconf.OmegaConf.to_container(config) + if config: + preset_dict["config"] = config + + # scope + if include_target: + target = sorted(str(t.data) for t in self.target) + whitelist = sorted(str(t.data) for t in self.whitelist) + blacklist = sorted(str(t.data) for t in self.blacklist) + if target: + preset_dict["target"] = target + if whitelist and whitelist != target: + preset_dict["whitelist"] = whitelist + if blacklist: + preset_dict["blacklist"] = blacklist + if self.strict_scope: + preset_dict["strict_scope"] = True + + # flags + modules + if self.require_flags: + preset_dict["require_flags"] = sorted(self.require_flags) + if self.exclude_flags: + preset_dict["exclude_flags"] = sorted(self.exclude_flags) + if self.exclude_modules: + preset_dict["exclude_modules"] = sorted(self.exclude_modules) + if self.flags: + preset_dict["flags"] = sorted(self.flags) + if self.explicit_scan_modules: + preset_dict["modules"] = sorted(self.explicit_scan_modules) + if self.explicit_output_modules: + preset_dict["output_modules"] = sorted(self.explicit_output_modules) + + # log verbosity + if self.verbose: + preset_dict["verbose"] = True + if self.debug: + preset_dict["debug"] = True + if self.silent: + preset_dict["silent"] = True + + # misc scan options + if self.scan_name: + preset_dict["scan_name"] = self.scan_name + if self.scan_name: + preset_dict["output_dir"] = self.output_dir + + # conditions + if self.conditions: + preset_dict["conditions"] = [c[-1] for c in self.conditions] + + return preset_dict + + def to_yaml(self, include_target=False, full_config=False, sort_keys=False): + """ + Return the preset in the form of a YAML string. + + Args: + include_target (bool, optional): If True, include target, whitelist, and blacklist in the dictionary + full_config (bool, optional): If True, include the entire config, not just what's changed from the defaults. + sort_keys (bool, optional): If True, sort YAML keys alphabetically + + Returns: + str: The preset in the form of a YAML string + + Examples: + >>> preset = Preset(flags=["subdomain-enum"], modules=["nmap"]) + >>> print(preset.to_yaml()) + flags: + - subdomain-enum + modules: + - nmap + """ + preset_dict = self.to_dict(include_target=include_target, full_config=full_config) + return yaml.dump(preset_dict, sort_keys=sort_keys) + + def _is_valid_module(self, module, module_type, name_only=False, raise_error=True): + if module_type == "scan": + module_choices = self.module_loader.scan_module_choices + elif module_type == "output": + module_choices = self.module_loader.output_module_choices + elif module_type == "internal": + module_choices = self.module_loader.internal_module_choices + else: + raise ValidationError(f'Unknown module type "{module}"') + + if not module in module_choices: + raise ValidationError(get_closest_match(module, module_choices, msg=f"{module_type} module")) + + try: + preloaded = self.module_loader.preloaded()[module] + except KeyError: + raise ValidationError(f'Unknown module "{module}"') + + if name_only: + return True, "", preloaded + + if module in self.exclude_modules: + reason = "the module has been excluded" + if raise_error: + raise ValidationError(f'Unable to add {module_type} module "{module}" because {reason}') + return False, reason, {} + + module_flags = preloaded.get("flags", []) + _module_type = preloaded.get("type", "scan") + if module_type: + if _module_type != module_type: + reason = f'its type ({_module_type}) is not "{module_type}"' + if raise_error: + raise ValidationError(f'Unable to add {module_type} module "{module}" because {reason}') + return False, reason, preloaded + + if _module_type == "scan": + if self.exclude_flags: + for f in module_flags: + if f in self.exclude_flags: + return False, f'it has excluded flag, "{f}"', preloaded + if self.require_flags and not all(f in module_flags for f in self.require_flags): + return False, f'it doesn\'t have the required flags ({",".join(self.require_flags)})', preloaded + + return True, "", preloaded + + def validate(self): + """ + Validate module/flag exclusions/requirements, and CLI config options if applicable. + """ + if self._cli: + self.args.validate() + + # validate excluded modules + for excluded_module in self.exclude_modules: + if not excluded_module in self.module_loader.all_module_choices: + raise ValidationError( + get_closest_match(excluded_module, self.module_loader.all_module_choices, msg="module") + ) + # validate excluded flags + for excluded_flag in self.exclude_flags: + if not excluded_flag in self.module_loader.flag_choices: + raise ValidationError(get_closest_match(excluded_flag, self.module_loader.flag_choices, msg="flag")) + # validate required flags + for required_flag in self.require_flags: + if not required_flag in self.module_loader.flag_choices: + raise ValidationError(get_closest_match(required_flag, self.module_loader.flag_choices, msg="flag")) + # validate flags + for flag in self.flags: + if not flag in self.module_loader.flag_choices: + raise ValidationError(get_closest_match(flag, self.module_loader.flag_choices, msg="flag")) + + @property + def all_presets(self): + """ + Recursively find all the presets and return them as a dictionary + """ + preset_dir = self.preset_dir + home_dir = Path.home() + + # first, add local preset dir to PRESET_PATH + PRESET_PATH.add_path(self.preset_dir) + + # ensure local preset directory exists + mkdir(preset_dir) + + global DEFAULT_PRESETS + if DEFAULT_PRESETS is None: + presets = dict() + for ext in ("yml", "yaml"): + for preset_path in PRESET_PATH: + # for every yaml file + for original_filename in preset_path.rglob(f"**/*.{ext}"): + # not including symlinks + if original_filename.is_symlink(): + continue + + # try to load it as a preset + try: + loaded_preset = self.from_yaml_file(original_filename, _log=True) + if loaded_preset is False: + continue + except Exception as e: + log.warning(f'Failed to load preset at "{original_filename}": {e}') + log.trace(traceback.format_exc()) + continue + + # category is the parent folder(s), if any + category = str(original_filename.relative_to(preset_path).parent) + if category == ".": + category = "" + + local_preset = original_filename + # populate symlinks in local preset dir + if not original_filename.is_relative_to(preset_dir): + relative_preset = original_filename.relative_to(preset_path) + local_preset = preset_dir / relative_preset + mkdir(local_preset.parent, check_writable=False) + if not local_preset.exists(): + local_preset.symlink_to(original_filename) + + # collapse home directory into "~" + if local_preset.is_relative_to(home_dir): + local_preset = Path("~") / local_preset.relative_to(home_dir) + + presets[local_preset] = (loaded_preset, category, preset_path, original_filename) + + # sort by name + DEFAULT_PRESETS = dict(sorted(presets.items(), key=lambda x: x[-1][0].name)) + return DEFAULT_PRESETS + + def presets_table(self, include_modules=True): + """ + Return a table of all the presets in the form of a string + """ + table = [] + header = ["Preset", "Category", "Description", "# Modules"] + if include_modules: + header.append("Modules") + for yaml_file, (loaded_preset, category, preset_path, original_file) in self.all_presets.items(): + loaded_preset = loaded_preset.bake() + num_modules = f"{len(loaded_preset.scan_modules):,}" + row = [loaded_preset.name, category, loaded_preset.description, num_modules] + if include_modules: + row.append(", ".join(sorted(loaded_preset.scan_modules))) + table.append(row) + return make_table(table, header) + + def log_verbose(self, msg): + if self._log: + log.verbose(f"Preset {self.name}: {msg}") + + def log_debug(self, msg): + if self._log: + log.debug(f"Preset {self.name}: {msg}") diff --git a/bbot/scanner/scanner.py b/bbot/scanner/scanner.py index 6af0f704c..9f444d3cb 100644 --- a/bbot/scanner/scanner.py +++ b/bbot/scanner/scanner.py @@ -1,46 +1,29 @@ -import re import sys import asyncio import logging import traceback import contextlib -from sys import exc_info +import regex as re from pathlib import Path -import multiprocessing as mp +from sys import exc_info from datetime import datetime -from functools import partial -from omegaconf import OmegaConf from collections import OrderedDict -from concurrent.futures import ProcessPoolExecutor from bbot import __version__ -from bbot import config as bbot_config -from .target import Target + +from .preset import Preset from .stats import ScanStats -from .manager import ScanManager from .dispatcher import Dispatcher -from bbot.modules import module_loader from bbot.core.event import make_event +from .manager import ScanIngress, ScanEgress from bbot.core.helpers.misc import sha1, rand_string -from bbot.core.helpers.helper import ConfigAwareHelper from bbot.core.helpers.names_generator import random_name from bbot.core.helpers.async_helpers import async_to_sync_gen -from bbot.core.configurator.environ import prepare_environment -from bbot.core.errors import BBOTError, ScanError, ValidationError -from bbot.core.logger import ( - init_logging, - get_log_level, - set_log_level, - add_log_handler, - get_log_handlers, - remove_log_handler, -) +from bbot.errors import BBOTError, ScanError, ValidationError log = logging.getLogger("bbot.scanner") -init_logging() - class Scanner: """A class representing a single BBOT scan @@ -81,16 +64,17 @@ class Scanner: - "FINISHED" (8): Status when the scan has successfully completed. ``` _status_code (int): The numerical representation of the current scan status, stored for internal use. It is mapped according to the values in `_status_codes`. - target (Target): Target of scan - config (omegaconf.dictconfig.DictConfig): BBOT config - whitelist (Target): Scan whitelist (by default this is the same as `target`) - blacklist (Target): Scan blacklist (this takes ultimate precedence) - helpers (ConfigAwareHelper): Helper containing various reusable functions, regexes, etc. - manager (ScanManager): Coordinates and monitors the flow of events between modules during a scan - dispatcher (Dispatcher): Triggers certain events when the scan `status` changes - modules (dict): Holds all loaded modules in this format: `{"module_name": Module()}` - stats (ScanStats): Holds high-level scan statistics such as how many events have been produced and consumed by each module - home (pathlib.Path): Base output directory of the scan (default: `~/.bbot/scans/`) + target (Target): Target of scan (alias to `self.preset.target`). + config (omegaconf.dictconfig.DictConfig): BBOT config (alias to `self.preset.config`). + whitelist (Target): Scan whitelist (by default this is the same as `target`) (alias to `self.preset.whitelist`). + blacklist (Target): Scan blacklist (this takes ultimate precedence) (alias to `self.preset.blacklist`). + helpers (ConfigAwareHelper): Helper containing various reusable functions, regexes, etc. (alias to `self.preset.helpers`). + output_dir (pathlib.Path): Output directory for scan (alias to `self.preset.output_dir`). + name (str): Name of scan (alias to `self.preset.scan_name`). + dispatcher (Dispatcher): Triggers certain events when the scan `status` changes. + modules (dict): Holds all loaded modules in this format: `{"module_name": Module()}`. + stats (ScanStats): Holds high-level scan statistics such as how many events have been produced and consumed by each module. + home (pathlib.Path): Base output directory of the scan (default: `~/.bbot/scans/`). running (bool): Whether the scan is currently running. stopping (bool): Whether the scan is currently stopping. stopped (bool): Whether the scan is currently stopped. @@ -117,104 +101,71 @@ class Scanner: def __init__( self, *targets, - whitelist=None, - blacklist=None, scan_id=None, - name=None, - modules=None, - output_modules=None, - output_dir=None, - config=None, dispatcher=None, - strict_scope=False, - force_start=False, + **kwargs, ): """ Initializes the Scanner class. + If a premade `preset` is specified, it will be used for the scan. + Otherwise, `Scan` accepts the same arguments as `Preset`, which are passed through and used to create a new preset. + Args: - *targets (str): Target(s) to scan. - whitelist (list, optional): Whitelisted target(s) to scan. Defaults to the same as `targets`. - blacklist (list, optional): Blacklisted target(s). Takes ultimate precedence. Defaults to empty. + *targets (list[str], optional): Scan targets (passed through to `Preset`). + preset (Preset, optional): Preset to use for the scan. scan_id (str, optional): Unique identifier for the scan. Auto-generates if None. - name (str, optional): Human-readable name of the scan. Auto-generates if None. - modules (list[str], optional): List of module names to use during the scan. Defaults to empty list. - output_modules (list[str], optional): List of output modules to use. Defaults to ['python']. - output_dir (str or Path, optional): Directory to store scan output. Defaults to BBOT home directory (`~/.bbot`). - config (dict, optional): Configuration settings. Merged with BBOT config. dispatcher (Dispatcher, optional): Dispatcher object to use. Defaults to new Dispatcher. - strict_scope (bool, optional): If True, only targets explicitly in whitelist are scanned. Defaults to False. - force_start (bool, optional): If True, allows the scan to start even when module setups hard-fail. Defaults to False. + *kwargs (list[str], optional): Additional keyword arguments (passed through to `Preset`). """ - if modules is None: - modules = [] - if output_modules is None: - output_modules = ["python"] - - if isinstance(modules, str): - modules = [modules] - if isinstance(output_modules, str): - output_modules = [output_modules] - - if config is None: - config = OmegaConf.create({}) - else: - config = OmegaConf.create(config) - self.config = OmegaConf.merge(bbot_config, config) - prepare_environment(self.config) - if self.config.get("debug", False): - set_log_level(logging.DEBUG) - - self.strict_scope = strict_scope - self.force_start = force_start - if scan_id is not None: - self.id = str(scan_id) + self.id = str(id) else: self.id = f"SCAN:{sha1(rand_string(20)).hexdigest()}" - self._status = "NOT_STARTED" - self._status_code = 0 - self.max_workers = max(1, self.config.get("max_threads", 25)) - self.helpers = ConfigAwareHelper(config=self.config, scan=self) + preset = kwargs.pop("preset", None) + kwargs["_log"] = True + if preset is None: + preset = Preset(*targets, **kwargs) + else: + if not isinstance(preset, Preset): + raise ValidationError(f'Preset must be of type Preset, not "{type(preset).__name__}"') + self.preset = preset.bake() + self.preset.scan = self - if name is None: + # scan name + if preset.scan_name is None: tries = 0 while 1: if tries > 5: - self.name = f"{self.helpers.rand_string(4)}_{self.helpers.rand_string(4)}" + scan_name = f"{rand_string(4)}_{rand_string(4)}" break - self.name = random_name() - if output_dir is not None: - home_path = Path(output_dir).resolve() / self.name + scan_name = random_name() + if self.preset.output_dir is not None: + home_path = Path(self.preset.output_dir).resolve() / scan_name else: - home_path = self.helpers.bbot_home / "scans" / self.name + home_path = self.preset.bbot_home / "scans" / scan_name if not home_path.exists(): break tries += 1 else: - self.name = str(name) + scan_name = str(preset.scan_name) + self.name = scan_name - if output_dir is not None: - self.home = Path(output_dir).resolve() / self.name + # scan output dir + if preset.output_dir is not None: + self.home = Path(preset.output_dir).resolve() / self.name else: - self.home = self.helpers.bbot_home / "scans" / self.name + self.home = self.preset.bbot_home / "scans" / self.name + + self._status = "NOT_STARTED" + self._status_code = 0 - self.target = Target(self, *targets, strict_scope=strict_scope, make_in_scope=True) + self.max_workers = max(1, self.config.get("manager_tasks", 5)) self.modules = OrderedDict({}) - self._scan_modules = modules - self._internal_modules = list(self._internal_modules()) - self._output_modules = output_modules self._modules_loaded = False - - if not whitelist: - self.whitelist = self.target.copy() - else: - self.whitelist = Target(self, *whitelist, strict_scope=strict_scope) - if not blacklist: - blacklist = [] - self.blacklist = Target(self, *blacklist) + self.dummy_modules = {} if dispatcher is None: self.dispatcher = Dispatcher() @@ -222,14 +173,10 @@ def __init__( self.dispatcher = dispatcher self.dispatcher.set_scan(self) - self.manager = ScanManager(self) self.stats = ScanStats(self) # scope distance self.scope_search_distance = max(0, int(self.config.get("scope_search_distance", 0))) - self.scope_dns_search_distance = max( - self.scope_search_distance, int(self.config.get("scope_dns_search_distance", 1)) - ) self.scope_report_distance = int(self.config.get("scope_report_distance", 1)) # url file extensions @@ -248,6 +195,7 @@ def __init__( self._prepped = False self._finished_init = False + self._new_activity = False self._cleanedup = False self.__loop = None @@ -256,16 +204,6 @@ def __init__( self.ticker_task = None self.dispatcher_tasks = [] - # multiprocessing thread pool - try: - mp.set_start_method("spawn") - except Exception: - self.warning(f"Failed to set multiprocessing spawn method. This may negatively affect performance.") - # we spawn 1 fewer processes than cores - # this helps to avoid locking up the system or competing with the main python process for cpu time - num_processes = max(1, mp.cpu_count() - 1) - self.process_pool = ProcessPoolExecutor(max_workers=num_processes) - self._stopping = False self._dns_regexes = None @@ -274,12 +212,17 @@ def __init__( async def _prep(self): """ - Calls .load_modules() and .setup_modules() in preparation for a scan + Creates the scan's output folder, loads its modules, and calls their .setup() methods. """ self.helpers.mkdir(self.home) if not self._prepped: - start_msg = f"Scan with {len(self._scan_modules):,} modules seeded with {len(self.target):,} targets" + # save scan preset + with open(self.home / "preset.yml", "w") as f: + f.write(self.preset.to_yaml()) + + # log scan overview + start_msg = f"Scan with {len(self.preset.scan_modules):,} modules seeded with {len(self.target):,} targets" details = [] if self.whitelist != self.target: details.append(f"{len(self.whitelist):,} in whitelist") @@ -289,14 +232,27 @@ async def _prep(self): start_msg += f" ({', '.join(details)})" self.hugeinfo(start_msg) + # load scan modules (this imports and instantiates them) + # up to this point they were only preloaded await self.load_modules() - self.info(f"Setting up modules...") + # run each module's .setup() method succeeded, hard_failed, soft_failed = await self.setup_modules() + # intercept modules get sewn together like human centipede + self.intercept_modules = [m for m in self.modules.values() if m._intercept] + for i, intercept_module in enumerate(self.intercept_modules[:-1]): + next_intercept_module = self.intercept_modules[i + 1] + self.debug( + f"Setting intercept module {intercept_module.name}.outgoing_event_queue to next intercept module {next_intercept_module.name}.incoming_event_queue" + ) + intercept_module._outgoing_event_queue = next_intercept_module.incoming_event_queue + + # abort if there are no output modules num_output_modules = len([m for m in self.modules.values() if m._type == "output"]) if num_output_modules < 1: raise ScanError("Failed to load output modules. Aborting.") + # abort if any of the module .setup()s hard-failed (i.e. they errored or returned False) total_failed = len(hard_failed + soft_failed) if hard_failed: msg = f"Setup hard-failed for {len(hard_failed):,} modules ({','.join(hard_failed)})" @@ -347,18 +303,13 @@ async def async_start(self): await self.dispatcher.on_start(self) - # start manager worker loops - self._manager_worker_loop_tasks = [ - asyncio.create_task(self.manager._worker_loop()) for _ in range(self.max_workers) - ] - - # distribute seed events - self.init_events_task = asyncio.create_task(self.manager.init_events()) - self.status = "RUNNING" self._start_modules() self.verbose(f"{len(self.modules):,} modules started") + # distribute seed events + self.init_events_task = asyncio.create_task(self.ingress_module.init_events(self.target.events)) + # main scan loop while 1: # abort if we're aborting @@ -366,13 +317,14 @@ async def async_start(self): self._drain_queues() break + # yield events as they come (async for event in scan.async_start()) if "python" in self.modules: - events, finish = await self.modules["python"]._events_waiting() + events, finish = await self.modules["python"]._events_waiting(batch_size=-1) for e in events: yield e - # if initialization finished and the scan is no longer active - if self._finished_init and not self.manager.active: + # break if initialization finished and the scan is no longer active + if self._finished_init and self.modules_finished: new_activity = await self.finish() if not new_activity: break @@ -399,7 +351,13 @@ async def async_start(self): self.critical(f"Unexpected error during scan:\n{traceback.format_exc()}") finally: - self._cancel_tasks() + tasks = self._cancel_tasks() + self.debug(f"Awaiting {len(tasks):,} tasks") + for task in tasks: + self.debug(f"Awaiting {task}") + with contextlib.suppress(BaseException): + await task + self.debug(f"Awaited {len(tasks):,} tasks") await self._report() await self._cleanup() @@ -423,7 +381,7 @@ async def async_start(self): def _start_modules(self): self.verbose(f"Starting module worker loops") - for module_name, module in self.modules.items(): + for module in self.modules.values(): module.start() async def setup_modules(self, remove_failed=True): @@ -452,19 +410,20 @@ async def setup_modules(self, remove_failed=True): soft_failed = [] async for task in self.helpers.as_completed([m._setup() for m in self.modules.values()]): - module_name, status, msg = await task + module, status, msg = await task if status == True: - self.debug(f"Setup succeeded for {module_name} ({msg})") - succeeded.append(module_name) + self.debug(f"Setup succeeded for {module.name} ({msg})") + succeeded.append(module.name) elif status == False: - self.warning(f"Setup hard-failed for {module_name}: {msg}") - self.modules[module_name].set_error_state() - hard_failed.append(module_name) + self.warning(f"Setup hard-failed for {module.name}: {msg}") + self.modules[module.name].set_error_state() + hard_failed.append(module.name) else: - self.info(f"Setup soft-failed for {module_name}: {msg}") - soft_failed.append(module_name) - if not status and remove_failed: - self.modules.pop(module_name) + self.info(f"Setup soft-failed for {module.name}: {msg}") + soft_failed.append(module.name) + if (not status) and (module._intercept or remove_failed): + # if a intercept module fails setup, we always remove it + self.modules.pop(module.name) return succeeded, hard_failed, soft_failed @@ -479,7 +438,7 @@ async def load_modules(self): 4. Load output modules and updates the `modules` dictionary. 5. Sorts modules based on their `_priority` attribute. - If any modules fail to load or their dependencies fail to install, a ScanError will be raised (unless `self.force_start` is set to True). + If any modules fail to load or their dependencies fail to install, a ScanError will be raised (unless `self.force_start` is True). Attributes: succeeded, failed (tuple): A tuple containing lists of modules that succeeded or failed during the dependency installation. @@ -487,7 +446,7 @@ async def load_modules(self): failed, failed_internal, failed_output (list): Lists of module names that failed to load. Raises: - ScanError: If any module dependencies fail to install or modules fail to load, and if self.force_start is False. + ScanError: If any module dependencies fail to install or modules fail to load, and if `self.force_start` is False. Returns: None @@ -496,24 +455,21 @@ async def load_modules(self): After all modules are loaded, they are sorted by `_priority` and stored in the `modules` dictionary. """ if not self._modules_loaded: - all_modules = list(set(self._scan_modules + self._output_modules + self._internal_modules)) - if not all_modules: + if not self.preset.modules: self.warning(f"No modules to load") return - if not self._scan_modules: + if not self.preset.scan_modules: self.warning(f"No scan modules to load") # install module dependencies - succeeded, failed = await self.helpers.depsinstaller.install( - *self._scan_modules, *self._output_modules, *self._internal_modules - ) + succeeded, failed = await self.helpers.depsinstaller.install(*self.preset.modules) if failed: msg = f"Failed to install dependencies for {len(failed):,} modules: {','.join(failed)}" self._fail_setup(msg) - modules = sorted([m for m in self._scan_modules if m in succeeded]) - output_modules = sorted([m for m in self._output_modules if m in succeeded]) - internal_modules = sorted([m for m in self._internal_modules if m in succeeded]) + modules = sorted([m for m in self.preset.scan_modules if m in succeeded]) + output_modules = sorted([m for m in self.preset.output_modules if m in succeeded]) + internal_modules = sorted([m for m in self.preset.internal_modules if m in succeeded]) # Load scan modules self.verbose(f"Loading {len(modules):,} scan modules: {','.join(modules)}") @@ -524,7 +480,7 @@ async def load_modules(self): self._fail_setup(msg) if loaded_modules: self.info( - f"Loaded {len(loaded_modules):,}/{len(self._scan_modules):,} scan modules ({','.join(loaded_modules)})" + f"Loaded {len(loaded_modules):,}/{len(self.preset.scan_modules):,} scan modules ({','.join(loaded_modules)})" ) # Load internal modules @@ -536,7 +492,7 @@ async def load_modules(self): self._fail_setup(msg) if loaded_internal_modules: self.info( - f"Loaded {len(loaded_internal_modules):,}/{len(self._internal_modules):,} internal modules ({','.join(loaded_internal_modules)})" + f"Loaded {len(loaded_internal_modules):,}/{len(self.preset.internal_modules):,} internal modules ({','.join(loaded_internal_modules)})" ) # Load output modules @@ -548,12 +504,160 @@ async def load_modules(self): self._fail_setup(msg) if loaded_output_modules: self.info( - f"Loaded {len(loaded_output_modules):,}/{len(self._output_modules):,} output modules, ({','.join(loaded_output_modules)})" + f"Loaded {len(loaded_output_modules):,}/{len(self.preset.output_modules):,} output modules, ({','.join(loaded_output_modules)})" ) - self.modules = OrderedDict(sorted(self.modules.items(), key=lambda x: getattr(x[-1], "_priority", 0))) + # builtin intercept modules + self.ingress_module = ScanIngress(self) + self.egress_module = ScanEgress(self) + self.modules[self.ingress_module.name] = self.ingress_module + self.modules[self.egress_module.name] = self.egress_module + + # sort modules by priority + self.modules = OrderedDict(sorted(self.modules.items(), key=lambda x: getattr(x[-1], "priority", 3))) + self._modules_loaded = True + @property + def modules_finished(self): + finished_modules = [m.finished for m in self.modules.values()] + return all(finished_modules) + + def kill_module(self, module_name, message=None): + from signal import SIGINT + + module = self.modules[module_name] + module.set_error_state(message=message, clear_outgoing_queue=True) + for proc in module._proc_tracker: + with contextlib.suppress(Exception): + proc.send_signal(SIGINT) + self.helpers.cancel_tasks_sync(module._tasks) + + @property + def queued_event_types(self): + event_types = {} + queues = set() + + for module in self.modules.values(): + queues.add(module.incoming_event_queue) + queues.add(module.outgoing_event_queue) + + for q in queues: + for item in q._queue: + try: + event, _ = item + except ValueError: + event = item + event_type = getattr(event, "type", None) + if event_type is not None: + try: + event_types[event_type] += 1 + except KeyError: + event_types[event_type] = 1 + + return event_types + + def modules_status(self, _log=False): + finished = True + status = {"modules": {}} + + sorted_modules = [] + for module_name, module in self.modules.items(): + # if module_name.startswith("_"): + # continue + sorted_modules.append(module) + mod_status = module.status + if mod_status["running"]: + finished = False + status["modules"][module_name] = mod_status + + # sort modules by name + sorted_modules.sort(key=lambda m: m.name) + + status["finished"] = finished + + modules_errored = [m for m, s in status["modules"].items() if s["errored"]] + + max_mem_percent = 90 + mem_status = self.helpers.memory_status() + # abort if we don't have the memory + mem_percent = mem_status.percent + if mem_percent > max_mem_percent: + free_memory = mem_status.available + free_memory_human = self.helpers.bytes_to_human(free_memory) + self.warning(f"System memory is at {mem_percent:.1f}% ({free_memory_human} remaining)") + + if _log: + modules_status = [] + for m, s in status["modules"].items(): + running = s["running"] + incoming = s["events"]["incoming"] + outgoing = s["events"]["outgoing"] + tasks = s["tasks"] + total = sum([incoming, outgoing, tasks]) + if running or total > 0: + modules_status.append((m, running, incoming, outgoing, tasks, total)) + modules_status.sort(key=lambda x: x[-1], reverse=True) + + if modules_status: + modules_status_str = ", ".join([f"{m}({i:,}:{t:,}:{o:,})" for m, r, i, o, t, _ in modules_status]) + self.info(f"{self.name}: Modules running (incoming:processing:outgoing) {modules_status_str}") + else: + self.info(f"{self.name}: No modules running") + event_type_summary = sorted(self.stats.events_emitted_by_type.items(), key=lambda x: x[-1], reverse=True) + if event_type_summary: + self.info( + f'{self.name}: Events produced so far: {", ".join([f"{k}: {v}" for k,v in event_type_summary])}' + ) + else: + self.info(f"{self.name}: No events produced yet") + + if modules_errored: + self.verbose( + f'{self.name}: Modules errored: {len(modules_errored):,} ({", ".join([m for m in modules_errored])})' + ) + + queued_events_by_type = [(k, v) for k, v in self.queued_event_types.items() if v > 0] + if queued_events_by_type: + queued_events_by_type.sort(key=lambda x: x[-1], reverse=True) + queued_events_by_type_str = ", ".join(f"{m}: {t:,}" for m, t in queued_events_by_type) + num_queued_events = sum(v for k, v in queued_events_by_type) + self.info(f"{self.name}: {num_queued_events:,} events in queue ({queued_events_by_type_str})") + else: + self.info(f"{self.name}: No events in queue") + + if self.log_level <= logging.DEBUG: + # status debugging + scan_active_status = [] + scan_active_status.append(f"scan._finished_init: {self._finished_init}") + scan_active_status.append(f"scan.modules_finished: {self.modules_finished}") + for m in sorted_modules: + running = m.running + scan_active_status.append(f" {m}.finished: {m.finished}") + scan_active_status.append(f" running: {running}") + if running: + scan_active_status.append(f" tasks:") + for task in list(m._task_counter.tasks.values()): + scan_active_status.append(f" - {task}:") + scan_active_status.append(f" incoming_queue_size: {m.num_incoming_events}") + scan_active_status.append(f" outgoing_queue_size: {m.outgoing_event_queue.qsize()}") + for line in scan_active_status: + self.debug(line) + + # log module memory usage + module_memory_usage = [] + for module in sorted_modules: + memory_usage = module.memory_usage + module_memory_usage.append((module.name, memory_usage)) + module_memory_usage.sort(key=lambda x: x[-1], reverse=True) + self.debug(f"MODULE MEMORY USAGE:") + for module_name, usage in module_memory_usage: + self.debug(f" - {module_name}: {self.helpers.bytes_to_human(usage)}") + + status.update({"modules_errored": len(modules_errored)}) + + return status + def stop(self): """Stops the in-progress scan and performs necessary cleanup. @@ -565,13 +669,14 @@ def stop(self): if not self._stopping: self._stopping = True self.status = "ABORTING" - self.hugewarning(f"Aborting scan") + self.hugewarning("Aborting scan") self.trace() self._cancel_tasks() self._drain_queues() self.helpers.kill_children() self._drain_queues() self.helpers.kill_children() + self.debug("Finished aborting scan") async def finish(self): """Finalizes the scan by invoking the `finished()` method on all active modules if new activity is detected. @@ -586,13 +691,13 @@ async def finish(self): This method alters the scan's status to "FINISHING" if new activity is detected. """ # if new events were generated since last time we were here - if self.manager._new_activity: - self.manager._new_activity = False + if self._new_activity: + self._new_activity = False self.status = "FINISHING" # Trigger .finished() on every module and start over log.info("Finishing scan") - finished_event = self.make_event("FINISHED", "FINISHED", dummy=True) for module in self.modules.values(): + finished_event = self.make_event(f"FINISHED", "FINISHED", dummy=True, tags={module.name}) await module.queue_event(finished_event) self.verbose("Completed finish()") return True @@ -618,9 +723,6 @@ def _drain_queues(self): while 1: if module.outgoing_event_queue: module.outgoing_event_queue.get_nowait() - with contextlib.suppress(asyncio.queues.QueueEmpty): - while 1: - self.manager.incoming_event_queue.get_nowait() self.debug("Finished draining queues") def _cancel_tasks(self): @@ -634,6 +736,7 @@ def _cancel_tasks(self): Returns: None """ + self.debug("Cancelling all scan tasks") tasks = [] # module workers for m in self.modules.values(): @@ -650,7 +753,9 @@ def _cancel_tasks(self): tasks += self._manager_worker_loop_tasks self.helpers.cancel_tasks_sync(tasks) # process pool - self.process_pool.shutdown(cancel_futures=True) + self.helpers.process_pool.shutdown(cancel_futures=True) + self.debug("Finished cancelling all scan tasks") + return tasks async def _report(self): """Asynchronously executes the `report()` method for each module in the scan. @@ -683,47 +788,54 @@ async def _cleanup(self): None """ self.status = "CLEANING_UP" + # clean up dns engine + self.helpers.dns.cleanup() + # clean up modules for mod in self.modules.values(): await mod._cleanup() + # clean up self if not self._cleanedup: self._cleanedup = True with contextlib.suppress(Exception): self.home.rmdir() self.helpers.clean_old_scans() - def in_scope(self, e): - """ - Check whether a hostname, url, IP, etc. is in scope. - Accepts either events or string data. + def in_scope(self, *args, **kwargs): + return self.preset.in_scope(*args, **kwargs) - Checks whitelist and blacklist. - If `e` is an event and its scope distance is zero, it will be considered in-scope. + def whitelisted(self, *args, **kwargs): + return self.preset.whitelisted(*args, **kwargs) - Examples: - Check if a URL is in scope: - >>> scan.in_scope("http://www.evilcorp.com") - True - """ - try: - e = make_event(e, dummy=True) - except ValidationError: - return False - in_scope = e.scope_distance == 0 or self.whitelisted(e) - return in_scope and not self.blacklisted(e) + def blacklisted(self, *args, **kwargs): + return self.preset.blacklisted(*args, **kwargs) - def blacklisted(self, e): - """ - Check whether a hostname, url, IP, etc. is blacklisted. - """ - e = make_event(e, dummy=True) - return e in self.blacklist + @property + def core(self): + return self.preset.core - def whitelisted(self, e): - """ - Check whether a hostname, url, IP, etc. is whitelisted. - """ - e = make_event(e, dummy=True) - return e in self.whitelist + @property + def config(self): + return self.preset.core.config + + @property + def target(self): + return self.preset.target + + @property + def whitelist(self): + return self.preset.whitelist + + @property + def blacklist(self): + return self.preset.blacklist + + @property + def helpers(self): + return self.preset.helpers + + @property + def force_start(self): + return self.preset.force_start @property def word_cloud(self): @@ -799,34 +911,10 @@ def root_event(self): root_event = self.make_event(data=f"{self.name} ({self.id})", event_type="SCAN", dummy=True) root_event._id = self.id root_event.scope_distance = 0 - root_event._resolved.set() root_event.source = root_event - root_event.module = self.helpers._make_dummy_module(name="TARGET", _type="TARGET") + root_event.module = self._make_dummy_module(name="TARGET", _type="TARGET") return root_event - def run_in_executor(self, callback, *args, **kwargs): - """ - Run a synchronous task in the event loop's default thread pool executor - - Examples: - Execute callback: - >>> result = await self.scan.run_in_executor(callback_fn, arg1, arg2) - """ - callback = partial(callback, **kwargs) - return self._loop.run_in_executor(None, callback, *args) - - def run_in_executor_mp(self, callback, *args, **kwargs): - """ - Same as run_in_executor() except with a process pool executor - Use only in cases where callback is CPU-bound - - Examples: - Execute callback: - >>> result = await self.scan.run_in_executor_mp(callback_fn, arg1, arg2) - """ - callback = partial(callback, **kwargs) - return self._loop.run_in_executor(self.process_pool, callback, *args) - @property def dns_regexes(self): """ @@ -949,7 +1037,7 @@ def log_level(self): """ Return the current log level, e.g. logging.INFO """ - return get_log_level() + return self.core.logger.log_level @property def _log_handlers(self): @@ -971,47 +1059,35 @@ def _log_handlers(self): def _start_log_handlers(self): # add log handlers for handler in self._log_handlers: - add_log_handler(handler) + self.core.logger.add_log_handler(handler) # temporarily disable main ones for handler_name in ("file_main", "file_debug"): - handler = get_log_handlers().get(handler_name, None) + handler = self.core.logger.log_handlers.get(handler_name, None) if handler is not None and handler not in self._log_handler_backup: self._log_handler_backup.append(handler) - remove_log_handler(handler) + self.core.logger.remove_log_handler(handler) def _stop_log_handlers(self): # remove log handlers for handler in self._log_handlers: - remove_log_handler(handler) + self.core.logger.remove_log_handler(handler) # restore main ones for handler in self._log_handler_backup: - add_log_handler(handler) - - def _internal_modules(self): - for modname in module_loader.preloaded(type="internal"): - if self.config.get(modname, True): - yield modname + self.core.logger.add_log_handler(handler) def _fail_setup(self, msg): msg = str(msg) - if not self.force_start: - msg += " (--force to run module anyway)" if self.force_start: self.error(msg) else: + msg += " (--force to run module anyway)" raise ScanError(msg) - @property - def _loop(self): - if self.__loop is None: - self.__loop = asyncio.get_event_loop() - return self.__loop - def _load_modules(self, modules): modules = [str(m) for m in modules] loaded_modules = {} failed = set() - for module_name, module_class in module_loader.load_modules(modules).items(): + for module_name, module_class in self.preset.module_loader.load_modules(modules).items(): if module_class: try: loaded_modules[module_name] = module_class(self) @@ -1028,10 +1104,10 @@ async def _status_ticker(self, interval=15): async with self._acatch(): while 1: await asyncio.sleep(interval) - self.manager.modules_status(_log=True) + self.modules_status(_log=True) @contextlib.asynccontextmanager - async def _acatch(self, context="scan", finally_callback=None): + async def _acatch(self, context="scan", finally_callback=None, unhandled_is_critical=False): """ Async version of catch() @@ -1041,9 +1117,9 @@ async def _acatch(self, context="scan", finally_callback=None): try: yield except BaseException as e: - self._handle_exception(e, context=context) + self._handle_exception(e, context=context, unhandled_is_critical=unhandled_is_critical) - def _handle_exception(self, e, context="scan", finally_callback=None): + def _handle_exception(self, e, context="scan", finally_callback=None, unhandled_is_critical=False): if callable(context): context = f"{context.__qualname__}()" filename, lineno, funcname = self.helpers.get_traceback_details(e) @@ -1056,7 +1132,43 @@ def _handle_exception(self, e, context="scan", finally_callback=None): elif isinstance(e, asyncio.CancelledError): raise elif isinstance(e, Exception): - log.error(f"Error in {context}: {filename}:{lineno}:{funcname}(): {e}") - log.trace(traceback.format_exc()) + if unhandled_is_critical: + log.critical(f"Error in {context}: {filename}:{lineno}:{funcname}(): {e}") + log.critical(traceback.format_exc()) + else: + log.error(f"Error in {context}: {filename}:{lineno}:{funcname}(): {e}") + log.trace(traceback.format_exc()) if callable(finally_callback): finally_callback(e) + + def _make_dummy_module(self, name, _type="scan"): + """ + Construct a dummy module, for attachment to events + """ + try: + return self.dummy_modules[name] + except KeyError: + dummy = DummyModule(scan=self, name=name, _type=_type) + self.dummy_modules[name] = dummy + return dummy + + def _make_dummy_module_dns(self, name): + try: + dummy_module = self.dummy_modules[name] + except KeyError: + dummy_module = self._make_dummy_module(name=name, _type="DNS") + dummy_module.suppress_dupes = False + self.dummy_modules[name] = dummy_module + return dummy_module + + +from bbot.modules.base import BaseModule + + +class DummyModule(BaseModule): + _priority = 4 + + def __init__(self, *args, **kwargs): + self._name = kwargs.pop("name") + self._type = kwargs.pop("_type") + super().__init__(*args, **kwargs) diff --git a/bbot/scanner/stats.py b/bbot/scanner/stats.py index 0c0a4d287..617703d8c 100644 --- a/bbot/scanner/stats.py +++ b/bbot/scanner/stats.py @@ -23,6 +23,9 @@ def event_produced(self, event): module_stat.increment_produced(event) def event_consumed(self, event, module): + # skip ingress/egress modules, etc. + if module.name.startswith("_"): + return module_stat = self.get(module) if module_stat is not None: module_stat.increment_consumed(event) diff --git a/bbot/scanner/target.py b/bbot/scanner/target.py index 1cf3cebdb..b19d1b6a6 100644 --- a/bbot/scanner/target.py +++ b/bbot/scanner/target.py @@ -1,8 +1,11 @@ +import re +import copy import logging import ipaddress from contextlib import suppress +from radixtarget import RadixTarget -from bbot.core.errors import * +from bbot.errors import * from bbot.modules.base import BaseModule from bbot.core.event import make_event, is_event @@ -14,12 +17,12 @@ class Target: A class representing a target. Can contain an unlimited number of hosts, IP or IP ranges, URLs, etc. Attributes: - make_in_scope (bool): Specifies whether to mark contained events as in-scope. - scan (Scan): Reference to the Scan object that instantiated the Target. - _events (dict): Dictionary mapping hosts to events related to the target. strict_scope (bool): Flag indicating whether to consider child domains in-scope. If set to True, only the exact hosts specified and not their children are considered part of the target. + _radix (RadixTree): Radix tree for quick IP/DNS lookups. + _events (set): Flat set of contained events. + Examples: Basic usage >>> target = Target(scan, "evilcorp.com", "1.2.3.0/24") @@ -62,17 +65,13 @@ class Target: - If you do not want to include child subdomains, use `strict_scope=True` """ - def __init__(self, scan, *targets, strict_scope=False, make_in_scope=False): + def __init__(self, *targets, strict_scope=False): """ Initialize a Target object. Args: scan (Scan): Reference to the Scan object that instantiated the Target. *targets: One or more targets (e.g., domain names, IP ranges) to be included in this Target. - strict_scope (bool, optional): Flag to control whether only the exact hosts are considered in-scope. - Defaults to False. - make_in_scope (bool, optional): Flag to control whether contained events are marked as in-scope. - Defaults to False. Attributes: scan (Scan): Reference to the Scan object. @@ -83,12 +82,14 @@ def __init__(self, scan, *targets, strict_scope=False, make_in_scope=False): - The strict_scope flag can be set to restrict scope calculation to only exactly-matching hosts and not their child subdomains. - Each target is processed and stored as an `Event` in the '_events' dictionary. """ - self.scan = scan self.strict_scope = strict_scope - self.make_in_scope = make_in_scope + self.special_event_types = { + "ORG_STUB": re.compile(r"^ORG:(.*)", re.IGNORECASE), + "ASN": re.compile(r"^ASN:(.*)", re.IGNORECASE), + } + self._events = set() + self._radix = RadixTarget() - self._dummy_module = TargetDummyModule(scan) - self._events = dict() if len(targets) > 0: log.verbose(f"Creating events from {len(targets):,} targets") for t in targets: @@ -112,43 +113,40 @@ def add_target(self, t, event_type=None): Notes: - If `t` is of the same class as this Target, all its events are merged. - If `t` is an event, it is directly added to `_events`. - - If `make_in_scope` is True, the scope distance of the event is set to 0. """ - if type(t) == self.__class__: - for k, v in t._events.items(): - try: - self._events[k].update(v) - except KeyError: - self._events[k] = set(t._events[k]) - else: - if is_event(t): - event = t + if not isinstance(t, (list, tuple, set)): + t = [t] + for single_target in t: + if type(single_target) == self.__class__: + for event in single_target.events: + self._add_event(event) else: - try: - event = self.scan.make_event( - t, - event_type=event_type, - source=self.scan.root_event, - module=self._dummy_module, - tags=["target"], - ) - except ValidationError as e: - # allow commented lines - if not str(t).startswith("#"): - raise ValidationError(f'Could not add target "{t}": {e}') - if self.make_in_scope: - event.scope_distance = 0 - try: - self._events[event.host].add(event) - except KeyError: - self._events[event.host] = { - event, - } + if is_event(single_target): + event = single_target + else: + for eventtype, regex in self.special_event_types.items(): + match = regex.match(single_target) + if match: + single_target = match.groups()[0] + event_type = eventtype + break + try: + event = make_event( + single_target, + event_type=event_type, + dummy=True, + tags=["target"], + ) + except ValidationError as e: + # allow commented lines + if not str(t).startswith("#"): + raise ValidationError(f'Could not add target "{t}": {e}') + self._add_event(event) @property def events(self): """ - A generator property that yields all events in the target. + Returns all events in the target. Yields: Event object: One of the Event objects stored in the `_events` dictionary. @@ -160,17 +158,15 @@ def events(self): Notes: - This property is read-only. - - Iterating over this property gives you one event at a time from the `_events` dictionary. """ - for _events in self._events.values(): - yield from _events + return self._events def copy(self): """ - Creates and returns a copy of the Target object, including a shallow copy of the `_events` attribute. + Creates and returns a copy of the Target object, including a shallow copy of the `_events` and `_radix` attributes. Returns: - Target: A new Target object with the same `scan` and `strict_scope` attributes as the original. + Target: A new Target object with the sameattributes as the original. A shallow copy of the `_events` dictionary is made. Examples: @@ -188,13 +184,14 @@ def copy(self): Notes: - The `scan` object reference is kept intact in the copied Target object. """ - self_copy = self.__class__(self.scan, strict_scope=self.strict_scope) - self_copy._events = dict(self._events) + self_copy = self.__class__() + self_copy._events = set(self._events) + self_copy._radix = copy.copy(self._radix) return self_copy def get(self, host): """ - Gets the event associated with the specified host from the target's `_events` dictionary. + Gets the event associated with the specified host from the target's radix tree. Args: host (Event, Target, or str): The hostname, IP, URL, or event to look for. @@ -220,15 +217,24 @@ def get(self, host): return if other.host: with suppress(KeyError, StopIteration): - return next(iter(self._events[other.host])) - if self.scan.helpers.is_ip_type(other.host): - for n in self.scan.helpers.ip_network_parents(other.host, include_self=True): - with suppress(KeyError, StopIteration): - return next(iter(self._events[n])) - elif not self.strict_scope: - for h in self.scan.helpers.domain_parents(other.host): - with suppress(KeyError, StopIteration): - return next(iter(self._events[h])) + result = self._radix.search(other.host) + if result is not None: + for event in result: + # if the result is a dns name and strict scope is enabled + if isinstance(event.host, str) and self.strict_scope: + # if the result doesn't exactly equal the host, abort + if event.host != other.host: + return + return event + + def _add_event(self, event): + radix_data = self._radix.search(event.host) + if radix_data is None: + radix_data = {event} + self._radix.insert(event.host, radix_data) + else: + radix_data.add(event) + self._events.add(event) def _contains(self, other): if self.get(other) is not None: @@ -278,11 +284,11 @@ def __len__(self): - For other types of hosts, each unique event is counted as one. """ num_hosts = 0 - for host, _events in self._events.items(): - if type(host) in (ipaddress.IPv4Network, ipaddress.IPv6Network): - num_hosts += host.num_addresses + for event in self._events: + if isinstance(event.host, (ipaddress.IPv4Network, ipaddress.IPv6Network)): + num_hosts += event.host.num_addresses else: - num_hosts += len(_events) + num_hosts += 1 return num_hosts diff --git a/bbot/scripts/docs.py b/bbot/scripts/docs.py index 8e6d045f3..f8a5050a3 100755 --- a/bbot/scripts/docs.py +++ b/bbot/scripts/docs.py @@ -5,8 +5,9 @@ import yaml from pathlib import Path -from bbot.modules import module_loader -from bbot.core.configurator.args import parser, scan_examples +from bbot.scanner import Preset + +DEFAULT_PRESET = Preset() os.environ["BBOT_TABLE_FORMAT"] = "github" @@ -18,6 +19,14 @@ bbot_code_dir = Path(__file__).parent.parent.parent +def homedir_collapseuser(f): + f = Path(f) + home_dir = Path.home() + if f.is_relative_to(home_dir): + return Path("~") / f.relative_to(home_dir) + return f + + def enclose_tags(text): # Use re.sub() to replace matched words with the same words enclosed in backticks result = blacklist_re.sub(r"|`\1`|", text) @@ -63,12 +72,12 @@ def update_individual_module_options(): content = f.read() for match in regex.finditer(content): module_name = match.groups()[0].lower() - bbot_module_options_table = module_loader.modules_options_table(modules=[module_name]) + bbot_module_options_table = DEFAULT_PRESET.module_loader.modules_options_table(modules=[module_name]) find_replace_file(file, f"BBOT MODULE OPTIONS {module_name.upper()}", bbot_module_options_table) # Example commands bbot_example_commands = [] - for title, description, command in scan_examples: + for title, description, command in DEFAULT_PRESET.args.scan_examples: example = "" example += f"**{title}:**\n\n" # example += f"{description}\n" @@ -79,37 +88,79 @@ def update_individual_module_options(): update_md_files("BBOT EXAMPLE COMMANDS", bbot_example_commands) # Help output - bbot_help_output = parser.format_help().replace("docs.py", "bbot") + bbot_help_output = DEFAULT_PRESET.args.parser.format_help().replace("docs.py", "bbot") bbot_help_output = f"```text\n{bbot_help_output}\n```" assert len(bbot_help_output.splitlines()) > 50 update_md_files("BBOT HELP OUTPUT", bbot_help_output) # BBOT events - bbot_event_table = module_loader.events_table() + bbot_event_table = DEFAULT_PRESET.module_loader.events_table() assert len(bbot_event_table.splitlines()) > 10 update_md_files("BBOT EVENTS", bbot_event_table) # BBOT modules - bbot_module_table = module_loader.modules_table() + bbot_module_table = DEFAULT_PRESET.module_loader.modules_table() assert len(bbot_module_table.splitlines()) > 50 update_md_files("BBOT MODULES", bbot_module_table) # BBOT output modules - bbot_output_module_table = module_loader.modules_table(mod_type="output") + bbot_output_module_table = DEFAULT_PRESET.module_loader.modules_table(mod_type="output") assert len(bbot_output_module_table.splitlines()) > 10 update_md_files("BBOT OUTPUT MODULES", bbot_output_module_table) # BBOT module options - bbot_module_options_table = module_loader.modules_options_table() + bbot_module_options_table = DEFAULT_PRESET.module_loader.modules_options_table() assert len(bbot_module_options_table.splitlines()) > 100 update_md_files("BBOT MODULE OPTIONS", bbot_module_options_table) update_individual_module_options() # BBOT module flags - bbot_module_flags_table = module_loader.flags_table() + bbot_module_flags_table = DEFAULT_PRESET.module_loader.flags_table() assert len(bbot_module_flags_table.splitlines()) > 10 update_md_files("BBOT MODULE FLAGS", bbot_module_flags_table) + # BBOT presets + bbot_presets_table = DEFAULT_PRESET.presets_table(include_modules=True) + assert len(bbot_presets_table.splitlines()) > 5 + update_md_files("BBOT PRESETS", bbot_presets_table) + + # BBOT subdomain enum preset + for yaml_file, (loaded_preset, category, preset_path, original_filename) in DEFAULT_PRESET.all_presets.items(): + if loaded_preset.name == "subdomain-enum": + subdomain_enum_preset = f"""```yaml title="{yaml_file.name}" +{loaded_preset._yaml_str} +```""" + update_md_files("BBOT SUBDOMAIN ENUM PRESET", subdomain_enum_preset) + break + + content = [] + for yaml_file, (loaded_preset, category, preset_path, original_filename) in DEFAULT_PRESET.all_presets.items(): + yaml_str = loaded_preset._yaml_str + indent = " " * 4 + yaml_str = f"\n{indent}".join(yaml_str.splitlines()) + filename = homedir_collapseuser(yaml_file) + + num_modules = len(loaded_preset.scan_modules) + modules = ", ".join(sorted([f"`{m}`" for m in loaded_preset.scan_modules])) + category = f"Category: {category}" if category else "" + + content.append( + f"""## **{loaded_preset.name}** + +{loaded_preset.description} + +??? note "`{filename.name}`" + ```yaml title="{filename}" + {yaml_str} + ``` + +{category} + +Modules: [{num_modules:,}]("{modules}")""" + ) + assert len(content) > 5 + update_md_files("BBOT PRESET YAML", "\n\n".join(content)) + # Default config default_config_file = bbot_code_dir / "bbot" / "defaults.yml" with open(default_config_file) as f: diff --git a/bbot/test/bbot_fixtures.py b/bbot/test/bbot_fixtures.py index 85af696cd..14495f41a 100644 --- a/bbot/test/bbot_fixtures.py +++ b/bbot/test/bbot_fixtures.py @@ -1,5 +1,4 @@ -import os -import dns +import os # noqa import sys import pytest import asyncio # noqa @@ -8,10 +7,39 @@ import tldextract import pytest_httpserver from pathlib import Path -from omegaconf import OmegaConf +from omegaconf import OmegaConf # noqa from werkzeug.wrappers import Request +from bbot.errors import * # noqa: F401 +from bbot.core import CORE +from bbot.scanner import Preset +from bbot.core.helpers.misc import mkdir + + +log = logging.getLogger(f"bbot.test.fixtures") + + +bbot_test_dir = Path("/tmp/.bbot_test") +mkdir(bbot_test_dir) + + +DEFAULT_PRESET = Preset() + +available_modules = list(DEFAULT_PRESET.module_loader.configs(type="scan")) +available_output_modules = list(DEFAULT_PRESET.module_loader.configs(type="output")) +available_internal_modules = list(DEFAULT_PRESET.module_loader.configs(type="internal")) + + +@pytest.fixture +def clean_default_config(monkeypatch): + clean_config = OmegaConf.merge( + CORE.files_config.get_default_config(), {"modules": DEFAULT_PRESET.module_loader.configs()} + ) + with monkeypatch.context() as m: + m.setattr("bbot.core.core.DEFAULT_CONFIG", clean_config) + yield + class SubstringRequestMatcher(pytest_httpserver.httpserver.RequestMatcher): def match_data(self, request: Request) -> bool: @@ -22,28 +50,12 @@ def match_data(self, request: Request) -> bool: pytest_httpserver.httpserver.RequestMatcher = SubstringRequestMatcher - -test_config = OmegaConf.load(Path(__file__).parent / "test.conf") -if test_config.get("debug", False): - os.environ["BBOT_DEBUG"] = "True" - -from .bbot_fixtures import * # noqa: F401 -import bbot.core.logger # noqa: F401 -from bbot.core.errors import * # noqa: F401 - # silence pytest_httpserver log = logging.getLogger("werkzeug") log.setLevel(logging.CRITICAL) -# silence stdout -root_logger = logging.getLogger() -for h in root_logger.handlers: - h.addFilter(lambda x: x.levelname not in ("STDOUT", "TRACE")) - tldextract.extract("www.evilcorp.com") -log = logging.getLogger(f"bbot.test.fixtures") - @pytest.fixture def bbot_scanner(): @@ -53,16 +65,10 @@ def bbot_scanner(): @pytest.fixture -def scan(monkeypatch, bbot_config): +def scan(monkeypatch): from bbot.scanner import Scanner - bbot_scan = Scanner("127.0.0.1", modules=["ipneighbor"], config=bbot_config) - - fallback_nameservers_file = bbot_scan.helpers.bbot_home / "fallback_nameservers.txt" - with open(fallback_nameservers_file, "w") as f: - f.write("8.8.8.8\n") - monkeypatch.setattr(bbot_scan.helpers.dns, "fallback_nameservers_file", fallback_nameservers_file) - + bbot_scan = Scanner("127.0.0.1", modules=["ipneighbor"]) return bbot_scan @@ -197,104 +203,9 @@ class bbot_events: return bbot_events -@pytest.fixture -def agent(monkeypatch, bbot_config): - from bbot import agent - - test_agent = agent.Agent(bbot_config) - test_agent.setup() - return test_agent - - -# bbot config -from bbot import config as default_config - -test_config = OmegaConf.load(Path(__file__).parent / "test.conf") -test_config = OmegaConf.merge(default_config, test_config) - -if test_config.get("debug", False): - logging.getLogger("bbot").setLevel(logging.DEBUG) - - -@pytest.fixture -def bbot_config(): - return test_config - - -from bbot.modules import module_loader - -available_modules = list(module_loader.configs(type="scan")) -available_output_modules = list(module_loader.configs(type="output")) -available_internal_modules = list(module_loader.configs(type="internal")) - - @pytest.fixture(autouse=True) def install_all_python_deps(): deps_pip = set() - for module in module_loader.preloaded().values(): + for module in DEFAULT_PRESET.module_loader.preloaded().values(): deps_pip.update(set(module.get("deps", {}).get("pip", []))) subprocess.run([sys.executable, "-m", "pip", "install"] + list(deps_pip)) - - -class MockResolver: - import dns - - def __init__(self, mock_data=None): - self.mock_data = mock_data if mock_data else {} - self.nameservers = ["127.0.0.1"] - - async def resolve_address(self, ipaddr, *args, **kwargs): - modified_kwargs = {} - modified_kwargs.update(kwargs) - modified_kwargs["rdtype"] = "PTR" - return await self.resolve(str(dns.reversename.from_address(ipaddr)), *args, **modified_kwargs) - - def create_dns_response(self, query_name, rdtype): - query_name = query_name.strip(".") - answers = self.mock_data.get(query_name, {}).get(rdtype, []) - if not answers: - raise self.dns.resolver.NXDOMAIN(f"No answer found for {query_name} {rdtype}") - - message_text = f"""id 1234 -opcode QUERY -rcode NOERROR -flags QR AA RD -;QUESTION -{query_name}. IN {rdtype} -;ANSWER""" - for answer in answers: - message_text += f"\n{query_name}. 1 IN {rdtype} {answer}" - - message_text += "\n;AUTHORITY\n;ADDITIONAL\n" - message = self.dns.message.from_text(message_text) - return message - - async def resolve(self, query_name, rdtype=None): - if rdtype is None: - rdtype = "A" - elif isinstance(rdtype, str): - rdtype = rdtype.upper() - else: - rdtype = str(rdtype.name).upper() - - domain_name = self.dns.name.from_text(query_name) - rdtype_obj = self.dns.rdatatype.from_text(rdtype) - - if "_NXDOMAIN" in self.mock_data and query_name in self.mock_data["_NXDOMAIN"]: - # Simulate the NXDOMAIN exception - raise self.dns.resolver.NXDOMAIN - - try: - response = self.create_dns_response(query_name, rdtype) - answer = self.dns.resolver.Answer(domain_name, rdtype_obj, self.dns.rdataclass.IN, response) - return answer - except self.dns.resolver.NXDOMAIN: - return [] - - -@pytest.fixture() -def mock_dns(): - def _mock_dns(scan, mock_data): - scan.helpers.dns.resolver = MockResolver(mock_data) - - return _mock_dns diff --git a/bbot/test/conftest.py b/bbot/test/conftest.py index b60a0633d..3dd403106 100644 --- a/bbot/test/conftest.py +++ b/bbot/test/conftest.py @@ -1,15 +1,33 @@ +import os import ssl import shutil import pytest import asyncio import logging from pathlib import Path +from omegaconf import OmegaConf from pytest_httpserver import HTTPServer +from bbot.core import CORE from bbot.core.helpers.misc import execute_sync_or_async from bbot.core.helpers.interactsh import server_list as interactsh_servers +test_config = OmegaConf.load(Path(__file__).parent / "test.conf") +if test_config.get("debug", False): + os.environ["BBOT_DEBUG"] = "True" + +if test_config.get("debug", False): + logging.getLogger("bbot").setLevel(logging.DEBUG) +else: + # silence stdout + trace + root_logger = logging.getLogger() + for h in root_logger.handlers: + h.addFilter(lambda x: x.levelname not in ("STDOUT", "TRACE")) + +CORE.merge_default(test_config) + + @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_sessionfinish(session, exitstatus): # Remove handlers from all loggers to prevent logging errors at exit diff --git a/bbot/test/test.conf b/bbot/test/test.conf index ba8367461..cadcca687 100644 --- a/bbot/test/test.conf +++ b/bbot/test/test.conf @@ -4,9 +4,6 @@ modules: wordlist: https://raw.githubusercontent.com/danielmiessler/SecLists/master/Discovery/DNS/deepmagic.com-prefixes-top500.txt ffuf: prefix_busting: true - ipneighbor: - test_option: ipneighbor -output_modules: http: url: http://127.0.0.1:11111 username: username @@ -17,18 +14,12 @@ output_modules: token: asdf neo4j: uri: bolt://127.0.0.1:11111 - python: - test_option: asdf -internal_modules: - speculate: - test_option: speculate http_proxy: http_headers: { "test": "header" } ssl_verify: false scope_search_distance: 0 scope_report_distance: 0 scope_dns_search_distance: 1 -plumbus: asdf dns_debug: false user_agent: "BBOT Test User-Agent" http_debug: false diff --git a/bbot/test/test_step_1/test__module__tests.py b/bbot/test/test_step_1/test__module__tests.py index 0d0855557..9d88b1bcc 100644 --- a/bbot/test/test_step_1/test__module__tests.py +++ b/bbot/test/test_step_1/test__module__tests.py @@ -2,8 +2,8 @@ import importlib from pathlib import Path +from bbot.scanner import Preset from ..test_step_2.module_tests.base import ModuleTestBase -from bbot.modules import module_loader log = logging.getLogger("bbot.test.modules") @@ -15,8 +15,11 @@ def test__module__tests(): + + preset = Preset() + # make sure each module has a .py file - for module_name in module_loader.preloaded(): + for module_name in preset.module_loader.preloaded(): module_name = module_name.lower() assert module_name in module_test_files, f'No test file found for module "{module_name}"' diff --git a/bbot/test/test_step_1/test_agent.py b/bbot/test/test_step_1/test_agent.py deleted file mode 100644 index 00d70a751..000000000 --- a/bbot/test/test_step_1/test_agent.py +++ /dev/null @@ -1,158 +0,0 @@ -import json -import websockets -from functools import partial - -from ..bbot_fixtures import * # noqa: F401 - - -_first_run = True -success = False - - -async def websocket_handler(websocket, path, scan_done=None): - # whether this is the first run - global _first_run - first_run = int(_first_run) - # whether the test succeeded - global success - # test phase - phase = "ping" - # control channel or event channel? - control = True - - if path == "/control/" and first_run: - # test ping - await websocket.send(json.dumps({"conversation": "90196cc1-299f-4555-82a0-bc22a4247590", "command": "ping"})) - _first_run = False - else: - control = False - - # Bearer token - assert websocket.request_headers["Authorization"] == "Bearer test" - - async for message in websocket: - log.debug(f"PHASE: {phase}, MESSAGE: {message}") - if not control or not first_run: - continue - m = json.loads(message) - # ping - if phase == "ping": - assert json.loads(message)["message_type"] == "pong" - phase = "start_scan_bad" - if phase == "start_scan_bad": - await websocket.send( - json.dumps( - { - "conversation": "90196cc1-299f-4555-82a0-bc22a4247590", - "command": "start_scan", - "arguments": { - "scan_id": "90196cc1-299f-4555-82a0-bc22a4247590", - "targets": ["127.0.0.2"], - "modules": ["asdf"], - "output_modules": ["human"], - "name": "agent_test_scan_bad", - }, - } - ) - ) - phase = "success" - continue - # scan start success - if phase == "success": - assert m["message"]["success"] == "Started scan" - phase = "cleaning_up" - continue - # CLEANING_UP status message - if phase == "cleaning_up": - assert m["message_type"] == "scan_status_change" - assert m["status"] == "CLEANING_UP" - phase = "failed" - continue - # FAILED status message - if phase == "failed": - assert m["message_type"] == "scan_status_change" - assert m["status"] == "FAILED" - phase = "start_scan" - # start good scan - if phase == "start_scan": - await websocket.send( - json.dumps( - { - "conversation": "90196cc1-299f-4555-82a0-bc22a4247590", - "command": "start_scan", - "arguments": { - "scan_id": "90196cc1-299f-4555-82a0-bc22a4247590", - "targets": ["127.0.0.2"], - "modules": ["ipneighbor"], - "output_modules": ["human"], - "name": "agent_test_scan", - }, - } - ) - ) - phase = "success_2" - continue - # scan start success - if phase == "success_2": - assert m["message"]["success"] == "Started scan" - phase = "starting" - continue - # STARTING status message - if phase == "starting": - assert m["message_type"] == "scan_status_change" - assert m["status"] == "STARTING" - phase = "running" - continue - # RUNNING status message - if phase == "running": - assert m["message_type"] == "scan_status_change" - assert m["status"] == "RUNNING" - phase = "finishing" - continue - # FINISHING status message - if phase == "finishing": - assert m["message_type"] == "scan_status_change" - assert m["status"] == "FINISHING" - phase = "cleaning_up_2" - continue - # CLEANING_UP status message - if phase == "cleaning_up_2": - assert m["message_type"] == "scan_status_change" - assert m["status"] == "CLEANING_UP" - phase = "finished_2" - continue - # FINISHED status message - if phase == "finished_2": - assert m["message_type"] == "scan_status_change" - assert m["status"] == "FINISHED" - success = True - scan_done.set() - break - - -@pytest.mark.asyncio -async def test_agent(agent): - scan_done = asyncio.Event() - scan_status = await agent.scan_status() - assert scan_status["error"] == "Scan not in progress" - - _websocket_handler = partial(websocket_handler, scan_done=scan_done) - - global success - async with websockets.serve(_websocket_handler, "127.0.0.1", 8765): - agent_task = asyncio.create_task(agent.start()) - # wait for 90 seconds - await asyncio.wait_for(scan_done.wait(), 60) - assert success - - await agent.start_scan("scan_to_be_cancelled", targets=["127.0.0.1"], modules=["ipneighbor"]) - await agent.start_scan("scan_to_be_rejected", targets=["127.0.0.1"], modules=["ipneighbor"]) - await asyncio.sleep(0.1) - await agent.stop_scan() - tasks = [agent.task, agent_task] - for task in tasks: - try: - task.cancel() - await task - except (asyncio.CancelledError, AttributeError): - pass diff --git a/bbot/test/test_step_1/test_cli.py b/bbot/test/test_step_1/test_cli.py index 0ccc94887..0a34b4faa 100644 --- a/bbot/test/test_step_1/test_cli.py +++ b/bbot/test/test_step_1/test_cli.py @@ -2,17 +2,13 @@ @pytest.mark.asyncio -async def test_cli(monkeypatch, bbot_config): +async def test_cli_scan(monkeypatch): from bbot import cli monkeypatch.setattr(sys, "exit", lambda *args, **kwargs: True) monkeypatch.setattr(os, "_exit", lambda *args, **kwargs: True) - monkeypatch.setattr(cli, "config", bbot_config) - old_sys_argv = sys.argv - - home_dir = Path(bbot_config["home"]) - scans_home = home_dir / "scans" + scans_home = bbot_test_dir / "scans" # basic scan monkeypatch.setattr( @@ -20,13 +16,20 @@ async def test_cli(monkeypatch, bbot_config): "argv", ["bbot", "-y", "-t", "127.0.0.1", "www.example.com", "-n", "test_cli_scan", "-c", "dns_resolution=False"], ) - await cli._main() + result = await cli._main() + assert result == True scan_home = scans_home / "test_cli_scan" + assert (scan_home / "preset.yml").is_file(), "preset.yml not found" assert (scan_home / "wordcloud.tsv").is_file(), "wordcloud.tsv not found" assert (scan_home / "output.txt").is_file(), "output.txt not found" assert (scan_home / "output.csv").is_file(), "output.csv not found" - assert (scan_home / "output.ndjson").is_file(), "output.ndjson not found" + assert (scan_home / "output.json").is_file(), "output.json not found" + + with open(scan_home / "preset.yml") as f: + text = f.read() + assert " dns_resolution: false" in text + with open(scan_home / "output.csv") as f: lines = f.readlines() assert lines[0] == "Event type,Event data,IP Address,Source Module,Scope Distance,Event Tags\n" @@ -44,146 +47,434 @@ async def test_cli(monkeypatch, bbot_config): dns_success = True assert ip_success and dns_success, "IP_ADDRESS and/or DNS_NAME are not present in output.txt" + +@pytest.mark.asyncio +async def test_cli_args(monkeypatch, caplog, clean_default_config): + from bbot import cli + + caplog.set_level(logging.INFO) + + monkeypatch.setattr(sys, "exit", lambda *args, **kwargs: True) + monkeypatch.setattr(os, "_exit", lambda *args, **kwargs: True) + # show version + caplog.clear() + assert not caplog.text monkeypatch.setattr("sys.argv", ["bbot", "--version"]) - await cli._main() - - # start agent - monkeypatch.setattr("sys.argv", ["bbot", "--agent-mode"]) - task = asyncio.create_task(cli._main()) - await asyncio.sleep(2) - task.cancel() - try: - await task - except asyncio.CancelledError: - pass + result = await cli._main() + assert result == None + assert len(caplog.text.splitlines()) == 1 + assert caplog.text.count(".") > 1 + + # output dir and scan name + output_dir = bbot_test_dir / "bbot_cli_args_output" + scan_name = "bbot_cli_args_scan_name" + scan_dir = output_dir / scan_name + assert not output_dir.exists() + monkeypatch.setattr("sys.argv", ["bbot", "-o", str(output_dir), "-n", scan_name, "-y"]) + result = await cli._main() + assert result == True + assert output_dir.is_dir() + assert scan_dir.is_dir() + assert "[SCAN]" in open(scan_dir / "output.txt").read() + assert "[INFO]" in open(scan_dir / "scan.log").read() + + # output modules override + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-om", "csv,json", "-y"]) + result = await cli._main() + assert result == True + assert "Loaded 2/2 output modules, (csv,json)" in caplog.text + caplog.clear() + monkeypatch.setattr("sys.argv", ["bbot", "-em", "csv,json", "-y"]) + result = await cli._main() + assert result == True + assert "Loaded 2/2 output modules, (human,python)" in caplog.text + + # internal modules override + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-y"]) + result = await cli._main() + assert result == True + assert "Loaded 5/5 internal modules (aggregate,cloud,dns,excavate,speculate)" in caplog.text + caplog.clear() + monkeypatch.setattr("sys.argv", ["bbot", "-em", "excavate", "speculate", "-y"]) + result = await cli._main() + assert result == True + assert "Loaded 3/3 internal modules (aggregate,cloud,dns)" in caplog.text + caplog.clear() + monkeypatch.setattr("sys.argv", ["bbot", "-c", "speculate=false", "-y"]) + result = await cli._main() + assert result == True + assert "Loaded 4/4 internal modules (aggregate,cloud,dns,excavate)" in caplog.text + + # list modules + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "--list-modules"]) + result = await cli._main() + assert result == None + # internal modules + assert "| excavate" in caplog.text + # output modules + assert "| csv" in caplog.text + # scan modules + assert "| wayback" in caplog.text + + # list module options + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "--list-module-options"]) + result = await cli._main() + assert result == None + assert "| modules.wayback.urls" in caplog.text + assert "| bool" in caplog.text + assert "| emit URLs in addition to DNS_NAMEs" in caplog.text + assert "| False" in caplog.text + assert "| modules.massdns.wordlist" in caplog.text + assert "| modules.robots.include_allow" in caplog.text + + # list module options by flag + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-f", "subdomain-enum", "--list-module-options"]) + result = await cli._main() + assert result == None + assert "| modules.wayback.urls" in caplog.text + assert "| bool" in caplog.text + assert "| emit URLs in addition to DNS_NAMEs" in caplog.text + assert "| False" in caplog.text + assert "| modules.massdns.wordlist" in caplog.text + assert not "| modules.robots.include_allow" in caplog.text + + # list module options by module + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-m", "massdns", "-lmo"]) + result = await cli._main() + assert result == None + assert not "| modules.wayback.urls" in caplog.text + assert "| modules.massdns.wordlist" in caplog.text + assert not "| modules.robots.include_allow" in caplog.text + + # list flags + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "--list-flags"]) + result = await cli._main() + assert result == None + assert "| safe" in caplog.text + assert "| Non-intrusive, safe to run" in caplog.text + assert "| active" in caplog.text + assert "| passive" in caplog.text + + # list only a single flag + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-f", "active", "--list-flags"]) + result = await cli._main() + assert result == None + assert not "| safe" in caplog.text + assert "| active" in caplog.text + assert not "| passive" in caplog.text + + # list multiple flags + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-f", "active", "safe", "--list-flags"]) + result = await cli._main() + assert result == None + assert "| safe" in caplog.text + assert "| active" in caplog.text + assert not "| passive" in caplog.text + + # custom target type + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-t", "ORG:evilcorp"]) + result = await cli._main() + assert result == True + assert "[ORG_STUB] evilcorp TARGET" in caplog.text + + # activate modules by flag + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-f", "passive"]) + result = await cli._main() + assert result == True # no args + caplog.clear() + assert not caplog.text monkeypatch.setattr("sys.argv", ["bbot"]) - await cli._main() + result = await cli._main() + assert result == None + assert "Target:\n -t TARGET [TARGET ...]" in caplog.text - # enable module by flag - monkeypatch.setattr("sys.argv", ["bbot", "-f", "report"]) - await cli._main() + # list modules + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-l"]) + result = await cli._main() + assert result == None + assert "| massdns" in caplog.text + assert "| httpx" in caplog.text + assert "| robots" in caplog.text + + # list modules by flag + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-f", "subdomain-enum", "-l"]) + result = await cli._main() + assert result == None + assert "| massdns" in caplog.text + assert "| httpx" in caplog.text + assert not "| robots" in caplog.text + + # list modules by flag + required flag + caplog.clear() + monkeypatch.setattr("sys.argv", ["bbot", "-f", "subdomain-enum", "-rf", "passive", "-l"]) + result = await cli._main() + assert result == None + assert "| massdns" in caplog.text + assert not "| httpx" in caplog.text + + # list modules by flag + excluded flag + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-f", "subdomain-enum", "-ef", "active", "-l"]) + result = await cli._main() + assert result == None + assert "| massdns" in caplog.text + assert not "| httpx" in caplog.text + + # list modules by flag + excluded module + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-f", "subdomain-enum", "-em", "massdns", "-l"]) + result = await cli._main() + assert result == None + assert not "| massdns" in caplog.text + assert "| httpx" in caplog.text # unconsoleable output module monkeypatch.setattr("sys.argv", ["bbot", "-om", "web_report"]) - await cli._main() - - # install all deps - monkeypatch.setattr("sys.argv", ["bbot", "--install-all-deps"]) - success = await cli._main() - assert success, "--install-all-deps failed for at least one module" + result = await cli._main() + assert result == True # unresolved dependency monkeypatch.setattr("sys.argv", ["bbot", "-m", "wappalyzer"]) - await cli._main() + result = await cli._main() + assert result == True - # resolved dependency, excluded module + # enable and exclude the same module + caplog.clear() + assert not caplog.text monkeypatch.setattr("sys.argv", ["bbot", "-m", "ffuf_shortnames", "-em", "ffuf_shortnames"]) - await cli._main() + result = await cli._main() + assert result == None + assert 'Unable to add scan module "ffuf_shortnames" because the module has been excluded' in caplog.text # require flags monkeypatch.setattr("sys.argv", ["bbot", "-f", "active", "-rf", "passive"]) - await cli._main() + result = await cli._main() + assert result == True # excluded flags monkeypatch.setattr("sys.argv", ["bbot", "-f", "active", "-ef", "active"]) - await cli._main() + result = await cli._main() + assert result == True # slow modules - monkeypatch.setattr("sys.argv", ["bbot", "-m", "massdns"]) - await cli._main() + monkeypatch.setattr("sys.argv", ["bbot", "-m", "bucket_digitalocean"]) + result = await cli._main() + assert result == True # deadly modules + caplog.clear() + assert not caplog.text monkeypatch.setattr("sys.argv", ["bbot", "-m", "nuclei"]) result = await cli._main() assert result == False, "-m nuclei ran without --allow-deadly" + assert "Please specify --allow-deadly to continue" in caplog.text # --allow-deadly monkeypatch.setattr("sys.argv", ["bbot", "-m", "nuclei", "--allow-deadly"]) result = await cli._main() - assert result != False, "-m nuclei failed to run with --allow-deadly" + assert result == True, "-m nuclei failed to run with --allow-deadly" - # show current config - monkeypatch.setattr("sys.argv", ["bbot", "-y", "--current-config"]) - await cli._main() + # install all deps + # monkeypatch.setattr("sys.argv", ["bbot", "--install-all-deps"]) + # success = await cli._main() + # assert success, "--install-all-deps failed for at least one module" - # list modules - monkeypatch.setattr("sys.argv", ["bbot", "-l"]) - await cli._main() - # list module options - monkeypatch.setattr("sys.argv", ["bbot", "--help-all"]) - await cli._main() +def test_cli_config_validation(monkeypatch, caplog): + from bbot import cli + + monkeypatch.setattr(sys, "exit", lambda *args, **kwargs: True) + monkeypatch.setattr(os, "_exit", lambda *args, **kwargs: True) + + # incorrect module option + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-c", "modules.ipnegibhor.num_bits=4"]) + cli.main() + assert 'Could not find module option "modules.ipnegibhor.num_bits"' in caplog.text + assert 'Did you mean "modules.ipneighbor.num_bits"?' in caplog.text - # unpatch sys.argv - monkeypatch.setattr("sys.argv", old_sys_argv) + # incorrect global option + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-c", "web_spier_distance=4"]) + cli.main() + assert 'Could not find module option "web_spier_distance"' in caplog.text + assert 'Did you mean "web_spider_distance"?' in caplog.text -def test_config_validation(monkeypatch, capsys, bbot_config): +def test_cli_module_validation(monkeypatch, caplog): from bbot import cli - from bbot.core.configurator import args monkeypatch.setattr(sys, "exit", lambda *args, **kwargs: True) monkeypatch.setattr(os, "_exit", lambda *args, **kwargs: True) - monkeypatch.setattr(cli, "config", bbot_config) - old_cli_config = args.cli_config + # incorrect module + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-m", "massdnss"]) + cli.main() + assert 'Could not find scan module "massdnss"' in caplog.text + assert 'Did you mean "massdns"?' in caplog.text - # incorrect module option - monkeypatch.setattr(args, "cli_config", ["bbot", "-c", "modules.ipnegibhor.num_bits=4"]) + # incorrect excluded module + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-em", "massdnss"]) cli.main() - captured = capsys.readouterr() - assert 'Could not find module option "modules.ipnegibhor.num_bits"' in captured.err - assert 'Did you mean "modules.ipneighbor.num_bits"?' in captured.err + assert 'Could not find module "massdnss"' in caplog.text + assert 'Did you mean "massdns"?' in caplog.text - # incorrect global option - monkeypatch.setattr(args, "cli_config", ["bbot", "-c", "web_spier_distance=4"]) + # incorrect output module + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-om", "neoo4j"]) cli.main() - captured = capsys.readouterr() - assert 'Could not find module option "web_spier_distance"' in captured.err - assert 'Did you mean "web_spider_distance"?' in captured.err + assert 'Could not find output module "neoo4j"' in caplog.text + assert 'Did you mean "neo4j"?' in caplog.text + + # incorrect flag + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-f", "subdomainenum"]) + cli.main() + assert 'Could not find flag "subdomainenum"' in caplog.text + assert 'Did you mean "subdomain-enum"?' in caplog.text + + # incorrect excluded flag + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-ef", "subdomainenum"]) + cli.main() + assert 'Could not find flag "subdomainenum"' in caplog.text + assert 'Did you mean "subdomain-enum"?' in caplog.text - # unpatch cli_options - monkeypatch.setattr(args, "cli_config", old_cli_config) + # incorrect required flag + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-rf", "subdomainenum"]) + cli.main() + assert 'Could not find flag "subdomainenum"' in caplog.text + assert 'Did you mean "subdomain-enum"?' in caplog.text -def test_module_validation(monkeypatch, capsys, bbot_config): - from bbot.core.configurator import args +def test_cli_presets(monkeypatch, capsys, caplog): + import yaml + from bbot import cli monkeypatch.setattr(sys, "exit", lambda *args, **kwargs: True) monkeypatch.setattr(os, "_exit", lambda *args, **kwargs: True) - old_sys_argv = sys.argv + # show current preset + monkeypatch.setattr("sys.argv", ["bbot", "-c", "http_proxy=currentpresettest", "--current-preset"]) + cli.main() + captured = capsys.readouterr() + assert " http_proxy: currentpresettest" in captured.out - # incorrect module - monkeypatch.setattr(sys, "argv", ["bbot", "-m", "massdnss"]) - args.parser.parse_args() + # show current preset (full) + monkeypatch.setattr("sys.argv", ["bbot", "-c" "modules.c99.api_key=asdf", "--current-preset-full"]) + cli.main() captured = capsys.readouterr() - assert 'Could not find module "massdnss"' in captured.err - assert 'Did you mean "massdns"?' in captured.err + assert " api_key: asdf" in captured.out + + preset_dir = bbot_test_dir / "test_cli_presets" + preset_dir.mkdir(exist_ok=True) + + preset1_file = preset_dir / "cli_preset1.conf" + with open(preset1_file, "w") as f: + f.write( + """ +config: + http_proxy: http://proxy1 + """ + ) + + preset2_file = preset_dir / "cli_preset2.yml" + with open(preset2_file, "w") as f: + f.write( + """ +config: + http_proxy: http://proxy2 + """ + ) + + # test reading single preset + monkeypatch.setattr("sys.argv", ["bbot", "-p", str(preset1_file.resolve()), "--current-preset"]) + cli.main() + captured = capsys.readouterr() + stdout_preset = yaml.safe_load(captured.out) + assert stdout_preset["config"]["http_proxy"] == "http://proxy1" - # incorrect excluded module - monkeypatch.setattr(sys, "argv", ["bbot", "-em", "massdnss"]) - args.parser.parse_args() + # preset overrides preset + monkeypatch.setattr( + "sys.argv", ["bbot", "-p", str(preset2_file.resolve()), str(preset1_file.resolve()), "--current-preset"] + ) + cli.main() captured = capsys.readouterr() - assert 'Could not find module "massdnss"' in captured.err - assert 'Did you mean "massdns"?' in captured.err + stdout_preset = yaml.safe_load(captured.out) + assert stdout_preset["config"]["http_proxy"] == "http://proxy1" - # incorrect output module - monkeypatch.setattr(sys, "argv", ["bbot", "-om", "neoo4j"]) - args.parser.parse_args() + # override other way + monkeypatch.setattr( + "sys.argv", ["bbot", "-p", str(preset1_file.resolve()), str(preset2_file.resolve()), "--current-preset"] + ) + cli.main() captured = capsys.readouterr() - assert 'Could not find output module "neoo4j"' in captured.err - assert 'Did you mean "neo4j"?' in captured.err + stdout_preset = yaml.safe_load(captured.out) + assert stdout_preset["config"]["http_proxy"] == "http://proxy2" - # incorrect flag - monkeypatch.setattr(sys, "argv", ["bbot", "-f", "subdomainenum"]) - args.parser.parse_args() + # cli config overrides all presets + monkeypatch.setattr( + "sys.argv", + [ + "bbot", + "-p", + str(preset1_file.resolve()), + str(preset2_file.resolve()), + "-c", + "http_proxy=asdf", + "--current-preset", + ], + ) + cli.main() captured = capsys.readouterr() - assert 'Could not find flag "subdomainenum"' in captured.err - assert 'Did you mean "subdomain-enum"?' in captured.err + stdout_preset = yaml.safe_load(captured.out) + assert stdout_preset["config"]["http_proxy"] == "asdf" - # unpatch sys.argv - monkeypatch.setattr("sys.argv", old_sys_argv) + # invalid preset + caplog.clear() + assert not caplog.text + monkeypatch.setattr("sys.argv", ["bbot", "-p", "asdfasdfasdf", "-y"]) + cli.main() + assert "file does not exist. Use -lp to list available presets" in caplog.text diff --git a/bbot/test/test_step_1/test_cloud_helpers.py b/bbot/test/test_step_1/test_cloud_helpers.py deleted file mode 100644 index b42da11a7..000000000 --- a/bbot/test/test_step_1/test_cloud_helpers.py +++ /dev/null @@ -1,86 +0,0 @@ -from ..bbot_fixtures import * # noqa: F401 - - -@pytest.mark.asyncio -async def test_cloud_helpers(bbot_scanner, bbot_config): - scan1 = bbot_scanner("127.0.0.1", config=bbot_config) - - provider_names = ("amazon", "google", "azure", "digitalocean", "oracle", "akamai", "cloudflare", "github") - for provider_name in provider_names: - assert provider_name in scan1.helpers.cloud.providers.providers - - for p in scan1.helpers.cloud.providers.providers.values(): - print(f"{p.name}: {p.domains} / {p.ranges}") - amazon_ranges = list(scan1.helpers.cloud["amazon"].ranges) - assert amazon_ranges - amazon_range = next(iter(amazon_ranges)) - amazon_address = amazon_range.broadcast_address - - ip_event = scan1.make_event(amazon_address, source=scan1.root_event) - aws_event1 = scan1.make_event("amazonaws.com", source=scan1.root_event) - aws_event2 = scan1.make_event("asdf.amazonaws.com", source=scan1.root_event) - aws_event3 = scan1.make_event("asdfamazonaws.com", source=scan1.root_event) - aws_event4 = scan1.make_event("test.asdf.aws", source=scan1.root_event) - - other_event1 = scan1.make_event("cname.evilcorp.com", source=scan1.root_event) - other_event2 = scan1.make_event("cname2.evilcorp.com", source=scan1.root_event) - other_event3 = scan1.make_event("cname3.evilcorp.com", source=scan1.root_event) - other_event2._resolved_hosts = {amazon_address} - other_event3._resolved_hosts = {"asdf.amazonaws.com"} - - for event in (ip_event, aws_event1, aws_event2, aws_event4, other_event2, other_event3): - await scan1.helpers.cloud.tag_event(event) - assert "cloud-amazon" in event.tags, f"{event} was not properly cloud-tagged" - - for event in (aws_event3, other_event1): - await scan1.helpers.cloud.tag_event(event) - assert "cloud-amazon" not in event.tags, f"{event} was improperly cloud-tagged" - assert not any( - t for t in event.tags if t.startswith("cloud-") or t.startswith("cdn-") - ), f"{event} was improperly cloud-tagged" - - google_event1 = scan1.make_event("asdf.googleapis.com", source=scan1.root_event) - google_event2 = scan1.make_event("asdf.google", source=scan1.root_event) - google_event3 = scan1.make_event("asdf.evilcorp.com", source=scan1.root_event) - google_event3._resolved_hosts = {"asdf.storage.googleapis.com"} - - for event in (google_event1, google_event2, google_event3): - await scan1.helpers.cloud.tag_event(event) - assert "cloud-google" in event.tags, f"{event} was not properly cloud-tagged" - assert "cloud-storage-bucket" in google_event3.tags - - -@pytest.mark.asyncio -async def test_cloud_helpers_excavate(bbot_scanner, bbot_config, bbot_httpserver): - url = bbot_httpserver.url_for("/test_cloud_helpers_excavate") - bbot_httpserver.expect_request(uri="/test_cloud_helpers_excavate").respond_with_data( - "" - ) - scan1 = bbot_scanner(url, modules=["httpx", "excavate"], config=bbot_config) - events = [e async for e in scan1.async_start()] - assert 1 == len( - [ - e - for e in events - if e.type == "STORAGE_BUCKET" - and e.data["name"] == "asdf" - and "cloud-amazon" in e.tags - and "cloud-storage-bucket" in e.tags - ] - ) - - -@pytest.mark.asyncio -async def test_cloud_helpers_speculate(bbot_scanner, bbot_config): - scan1 = bbot_scanner("asdf.s3.amazonaws.com", modules=["speculate"], config=bbot_config) - events = [e async for e in scan1.async_start()] - assert 1 == len( - [ - e - for e in events - if e.type == "STORAGE_BUCKET" - and e.data["name"] == "asdf" - and "cloud-amazon" in e.tags - and "cloud-storage-bucket" in e.tags - ] - ) diff --git a/bbot/test/test_step_1/test_command.py b/bbot/test/test_step_1/test_command.py index 8827bcdad..a3772cefe 100644 --- a/bbot/test/test_step_1/test_command.py +++ b/bbot/test/test_step_1/test_command.py @@ -3,8 +3,8 @@ @pytest.mark.asyncio -async def test_command(bbot_scanner, bbot_config): - scan1 = bbot_scanner(config=bbot_config) +async def test_command(bbot_scanner): + scan1 = bbot_scanner() # run assert "plumbus\n" == (await scan1.helpers.run(["echo", "plumbus"])).stdout @@ -102,25 +102,25 @@ async def test_command(bbot_scanner, bbot_config): path_parts = os.environ.get("PATH", "").split(":") assert "/tmp/.bbot_test/tools" in path_parts run_lines = (await scan1.helpers.run(["env"])).stdout.splitlines() - assert "BBOT_PLUMBUS=asdf" in run_lines + assert "BBOT_USER_AGENT=BBOT Test User-Agent" in run_lines for line in run_lines: if line.startswith("PATH="): path_parts = line.split("=", 1)[-1].split(":") assert "/tmp/.bbot_test/tools" in path_parts run_lines_sudo = (await scan1.helpers.run(["env"], sudo=True)).stdout.splitlines() - assert "BBOT_PLUMBUS=asdf" in run_lines_sudo + assert "BBOT_USER_AGENT=BBOT Test User-Agent" in run_lines_sudo for line in run_lines_sudo: if line.startswith("PATH="): path_parts = line.split("=", 1)[-1].split(":") assert "/tmp/.bbot_test/tools" in path_parts run_live_lines = [l async for l in scan1.helpers.run_live(["env"])] - assert "BBOT_PLUMBUS=asdf" in run_live_lines + assert "BBOT_USER_AGENT=BBOT Test User-Agent" in run_live_lines for line in run_live_lines: if line.startswith("PATH="): path_parts = line.strip().split("=", 1)[-1].split(":") assert "/tmp/.bbot_test/tools" in path_parts run_live_lines_sudo = [l async for l in scan1.helpers.run_live(["env"], sudo=True)] - assert "BBOT_PLUMBUS=asdf" in run_live_lines_sudo + assert "BBOT_USER_AGENT=BBOT Test User-Agent" in run_live_lines_sudo for line in run_live_lines_sudo: if line.startswith("PATH="): path_parts = line.strip().split("=", 1)[-1].split(":") diff --git a/bbot/test/test_step_1/test_config.py b/bbot/test/test_step_1/test_config.py index 2d9980a2c..f62b11912 100644 --- a/bbot/test/test_step_1/test_config.py +++ b/bbot/test/test_step_1/test_config.py @@ -2,9 +2,21 @@ @pytest.mark.asyncio -async def test_config(bbot_config, bbot_scanner): - scan1 = bbot_scanner("127.0.0.1", modules=["ipneighbor", "speculate"], config=bbot_config) +async def test_config(bbot_scanner): + config = OmegaConf.create( + { + "plumbus": "asdf", + "speculate": True, + "modules": { + "ipneighbor": {"test_option": "ipneighbor"}, + "python": {"test_option": "asdf"}, + "speculate": {"test_option": "speculate"}, + }, + } + ) + scan1 = bbot_scanner("127.0.0.1", modules=["ipneighbor"], config=config) await scan1.load_modules() + assert scan1.config.user_agent == "BBOT Test User-Agent" assert scan1.config.plumbus == "asdf" assert scan1.modules["ipneighbor"].config.test_option == "ipneighbor" assert scan1.modules["python"].config.test_option == "asdf" diff --git a/bbot/test/test_step_1/test_depsinstaller.py b/bbot/test/test_step_1/test_depsinstaller.py index 39a56bf41..e3f80d5cf 100644 --- a/bbot/test/test_step_1/test_depsinstaller.py +++ b/bbot/test/test_step_1/test_depsinstaller.py @@ -1,11 +1,9 @@ from ..bbot_fixtures import * -def test_depsinstaller(monkeypatch, bbot_config, bbot_scanner): +def test_depsinstaller(monkeypatch, bbot_scanner): scan = bbot_scanner( "127.0.0.1", - modules=["dnsresolve"], - config=bbot_config, ) # test shell diff --git a/bbot/test/test_step_1/test_dns.py b/bbot/test/test_step_1/test_dns.py index 91465507e..aa2a27907 100644 --- a/bbot/test/test_step_1/test_dns.py +++ b/bbot/test/test_step_1/test_dns.py @@ -2,91 +2,142 @@ @pytest.mark.asyncio -async def test_dns(bbot_scanner, bbot_config, mock_dns): - scan = bbot_scanner("1.1.1.1", config=bbot_config) - helpers = scan.helpers +async def test_dns_engine(bbot_scanner): + scan = bbot_scanner() + result = await scan.helpers.resolve("one.one.one.one") + assert "1.1.1.1" in result + assert not "2606:4700:4700::1111" in result + + results = [_ async for _ in scan.helpers.resolve_batch(("one.one.one.one", "1.1.1.1"))] + pass_1 = False + pass_2 = False + for query, result in results: + if query == "one.one.one.one" and "1.1.1.1" in result: + pass_1 = True + elif query == "1.1.1.1" and "one.one.one.one" in result: + pass_2 = True + assert pass_1 and pass_2 + + results = [_ async for _ in scan.helpers.resolve_raw_batch((("one.one.one.one", "A"), ("1.1.1.1", "PTR")))] + pass_1 = False + pass_2 = False + for (query, rdtype), (result, errors) in results: + _results = [r[0] for r in result] + if query == "one.one.one.one" and "1.1.1.1" in _results: + pass_1 = True + elif query == "1.1.1.1" and "one.one.one.one" in _results: + pass_2 = True + assert pass_1 and pass_2 + + from bbot.core.helpers.dns.engine import DNSEngine + from bbot.core.helpers.dns.mock import MockResolver + + # ensure dns records are being properly cleaned + mockresolver = MockResolver({"evilcorp.com": {"MX": ["0 ."]}}) + mx_records = await mockresolver.resolve("evilcorp.com", rdtype="MX") + results = set() + for r in mx_records: + results.update(DNSEngine.extract_targets(r)) + assert not results + + +@pytest.mark.asyncio +async def test_dns_resolution(bbot_scanner): + scan = bbot_scanner("1.1.1.1") + + from bbot.core.helpers.dns.engine import DNSEngine + + dnsengine = DNSEngine(None) # lowest level functions - a_responses = await helpers._resolve_hostname("one.one.one.one") - aaaa_responses = await helpers._resolve_hostname("one.one.one.one", rdtype="AAAA") - ip_responses = await helpers._resolve_ip("1.1.1.1") + a_responses = await dnsengine._resolve_hostname("one.one.one.one") + aaaa_responses = await dnsengine._resolve_hostname("one.one.one.one", rdtype="AAAA") + ip_responses = await dnsengine._resolve_ip("1.1.1.1") assert a_responses[0].response.answer[0][0].address in ("1.1.1.1", "1.0.0.1") assert aaaa_responses[0].response.answer[0][0].address in ("2606:4700:4700::1111", "2606:4700:4700::1001") assert ip_responses[0].response.answer[0][0].target.to_text() in ("one.one.one.one.",) # mid level functions - _responses, errors = await helpers.resolve_raw("one.one.one.one") + answers, errors = await dnsengine.resolve_raw("one.one.one.one", type="A") responses = [] - for rdtype, response in _responses: - for answers in response: - responses += list(helpers.extract_targets(answers)) + for answer in answers: + responses += list(dnsengine.extract_targets(answer)) assert ("A", "1.1.1.1") in responses - _responses, errors = await helpers.resolve_raw("one.one.one.one", rdtype="AAAA") + assert not ("AAAA", "2606:4700:4700::1111") in responses + answers, errors = await dnsengine.resolve_raw("one.one.one.one", type="AAAA") responses = [] - for rdtype, response in _responses: - for answers in response: - responses += list(helpers.extract_targets(answers)) + for answer in answers: + responses += list(dnsengine.extract_targets(answer)) + assert not ("A", "1.1.1.1") in responses assert ("AAAA", "2606:4700:4700::1111") in responses - _responses, errors = await helpers.resolve_raw("1.1.1.1") + answers, errors = await dnsengine.resolve_raw("1.1.1.1") responses = [] - for rdtype, response in _responses: - for answers in response: - responses += list(helpers.extract_targets(answers)) + for answer in answers: + responses += list(dnsengine.extract_targets(answer)) assert ("PTR", "one.one.one.one") in responses # high level functions - assert "1.1.1.1" in await helpers.resolve("one.one.one.one") - assert "2606:4700:4700::1111" in await helpers.resolve("one.one.one.one", type="AAAA") - assert "one.one.one.one" in await helpers.resolve("1.1.1.1") + assert "1.1.1.1" in await dnsengine.resolve("one.one.one.one") + assert "2606:4700:4700::1111" in await dnsengine.resolve("one.one.one.one", type="AAAA") + assert "one.one.one.one" in await dnsengine.resolve("1.1.1.1") for rdtype in ("NS", "SOA", "MX", "TXT"): - assert len(await helpers.resolve("google.com", type=rdtype)) > 0 + assert len(await dnsengine.resolve("google.com", type=rdtype)) > 0 # batch resolution - batch_results = [r async for r in helpers.resolve_batch(["1.1.1.1", "one.one.one.one"])] + batch_results = [r async for r in dnsengine.resolve_batch(["1.1.1.1", "one.one.one.one"])] assert len(batch_results) == 2 batch_results = dict(batch_results) assert any([x in batch_results["one.one.one.one"] for x in ("1.1.1.1", "1.0.0.1")]) assert "one.one.one.one" in batch_results["1.1.1.1"] - # "any" type - resolved = await helpers.resolve("google.com", type="any") - assert any([helpers.is_subdomain(h) for h in resolved]) + # custom batch resolution + batch_results = [r async for r in dnsengine.resolve_raw_batch([("1.1.1.1", "PTR"), ("one.one.one.one", "A")])] + assert len(batch_results) == 2 + batch_results = dict(batch_results) + assert ("1.1.1.1", "A") in batch_results[("one.one.one.one", "A")][0] + assert ("one.one.one.one", "PTR") in batch_results[("1.1.1.1", "PTR")][0] # dns cache - helpers.dns._dns_cache.clear() - assert hash(f"1.1.1.1:PTR") not in helpers.dns._dns_cache - assert hash(f"one.one.one.one:A") not in helpers.dns._dns_cache - assert hash(f"one.one.one.one:AAAA") not in helpers.dns._dns_cache - await helpers.resolve("1.1.1.1", use_cache=False) - await helpers.resolve("one.one.one.one", use_cache=False) - assert hash(f"1.1.1.1:PTR") not in helpers.dns._dns_cache - assert hash(f"one.one.one.one:A") not in helpers.dns._dns_cache - assert hash(f"one.one.one.one:AAAA") not in helpers.dns._dns_cache - - await helpers.resolve("1.1.1.1") - assert hash(f"1.1.1.1:PTR") in helpers.dns._dns_cache - await helpers.resolve("one.one.one.one") - assert hash(f"one.one.one.one:A") in helpers.dns._dns_cache - assert hash(f"one.one.one.one:AAAA") in helpers.dns._dns_cache + dnsengine._dns_cache.clear() + assert hash(f"1.1.1.1:PTR") not in dnsengine._dns_cache + assert hash(f"one.one.one.one:A") not in dnsengine._dns_cache + assert hash(f"one.one.one.one:AAAA") not in dnsengine._dns_cache + await dnsengine.resolve("1.1.1.1", use_cache=False) + await dnsengine.resolve("one.one.one.one", use_cache=False) + assert hash(f"1.1.1.1:PTR") not in dnsengine._dns_cache + assert hash(f"one.one.one.one:A") not in dnsengine._dns_cache + assert hash(f"one.one.one.one:AAAA") not in dnsengine._dns_cache + + await dnsengine.resolve("1.1.1.1") + assert hash(f"1.1.1.1:PTR") in dnsengine._dns_cache + await dnsengine.resolve("one.one.one.one", type="A") + assert hash(f"one.one.one.one:A") in dnsengine._dns_cache + assert not hash(f"one.one.one.one:AAAA") in dnsengine._dns_cache + dnsengine._dns_cache.clear() + await dnsengine.resolve("one.one.one.one", type="AAAA") + assert hash(f"one.one.one.one:AAAA") in dnsengine._dns_cache + assert not hash(f"one.one.one.one:A") in dnsengine._dns_cache # Ensure events with hosts have resolved_hosts attribute populated - resolved_hosts_event1 = scan.make_event("one.one.one.one", "DNS_NAME", dummy=True) - resolved_hosts_event2 = scan.make_event("http://one.one.one.one/", "URL_UNVERIFIED", dummy=True) - event_tags1, event_whitelisted1, event_blacklisted1, children1 = await scan.helpers.resolve_event( - resolved_hosts_event1 - ) - event_tags2, event_whitelisted2, event_blacklisted2, children2 = await scan.helpers.resolve_event( - resolved_hosts_event2 - ) - assert "1.1.1.1" in [str(x) for x in children1["A"]] - assert "1.1.1.1" in [str(x) for x in children2["A"]] - assert set(children1.keys()) == set(children2.keys()) - - dns_config = OmegaConf.create({"dns_resolution": True}) - dns_config = OmegaConf.merge(bbot_config, dns_config) - scan2 = bbot_scanner("evilcorp.com", config=dns_config) - mock_dns( - scan2, + await scan._prep() + resolved_hosts_event1 = scan.make_event("one.one.one.one", "DNS_NAME", source=scan.root_event) + resolved_hosts_event2 = scan.make_event("http://one.one.one.one/", "URL_UNVERIFIED", source=scan.root_event) + dnsresolve = scan.modules["dns"] + assert hash(resolved_hosts_event1.host) not in dnsresolve._event_cache + assert hash(resolved_hosts_event2.host) not in dnsresolve._event_cache + await dnsresolve.handle_event(resolved_hosts_event1, {}) + assert hash(resolved_hosts_event1.host) in dnsresolve._event_cache + assert hash(resolved_hosts_event2.host) in dnsresolve._event_cache + await dnsresolve.handle_event(resolved_hosts_event2, {}) + assert "1.1.1.1" in resolved_hosts_event2.resolved_hosts + assert "1.1.1.1" in resolved_hosts_event2.dns_children["A"] + assert resolved_hosts_event1.resolved_hosts == resolved_hosts_event2.resolved_hosts + assert resolved_hosts_event1.dns_children == resolved_hosts_event2.dns_children + assert "a-record" in resolved_hosts_event1.tags + assert not "a-record" in resolved_hosts_event2.tags + + scan2 = bbot_scanner("evilcorp.com", config={"dns_resolution": True}) + await scan2.helpers.dns._mock_dns( { "evilcorp.com": {"TXT": ['"v=spf1 include:cloudprovider.com ~all"']}, "cloudprovider.com": {"A": ["1.2.3.4"]}, @@ -99,48 +150,51 @@ async def test_dns(bbot_scanner, bbot_config, mock_dns): @pytest.mark.asyncio -async def test_wildcards(bbot_scanner, bbot_config): - scan = bbot_scanner("1.1.1.1", config=bbot_config) +async def test_wildcards(bbot_scanner): + scan = bbot_scanner("1.1.1.1") helpers = scan.helpers + from bbot.core.helpers.dns.engine import DNSEngine + + dnsengine = DNSEngine(None) + # wildcards - wildcard_domains = await helpers.is_wildcard_domain("asdf.github.io") - assert hash("github.io") in helpers.dns._wildcard_cache - assert hash("asdf.github.io") in helpers.dns._wildcard_cache + wildcard_domains = await dnsengine.is_wildcard_domain("asdf.github.io") + assert hash("github.io") in dnsengine._wildcard_cache + assert hash("asdf.github.io") in dnsengine._wildcard_cache assert "github.io" in wildcard_domains assert "A" in wildcard_domains["github.io"] assert "SRV" not in wildcard_domains["github.io"] assert wildcard_domains["github.io"]["A"] and all(helpers.is_ip(r) for r in wildcard_domains["github.io"]["A"]) - helpers.dns._wildcard_cache.clear() + dnsengine._wildcard_cache.clear() - wildcard_rdtypes = await helpers.is_wildcard("blacklanternsecurity.github.io") + wildcard_rdtypes = await dnsengine.is_wildcard("blacklanternsecurity.github.io") assert "A" in wildcard_rdtypes assert "SRV" not in wildcard_rdtypes assert wildcard_rdtypes["A"] == (True, "github.io") - assert hash("github.io") in helpers.dns._wildcard_cache - assert len(helpers.dns._wildcard_cache[hash("github.io")]) > 0 - helpers.dns._wildcard_cache.clear() + assert hash("github.io") in dnsengine._wildcard_cache + assert len(dnsengine._wildcard_cache[hash("github.io")]) > 0 + dnsengine._wildcard_cache.clear() - wildcard_rdtypes = await helpers.is_wildcard("asdf.asdf.asdf.github.io") + wildcard_rdtypes = await dnsengine.is_wildcard("asdf.asdf.asdf.github.io") assert "A" in wildcard_rdtypes assert "SRV" not in wildcard_rdtypes assert wildcard_rdtypes["A"] == (True, "github.io") - assert hash("github.io") in helpers.dns._wildcard_cache - assert not hash("asdf.github.io") in helpers.dns._wildcard_cache - assert not hash("asdf.asdf.github.io") in helpers.dns._wildcard_cache - assert not hash("asdf.asdf.asdf.github.io") in helpers.dns._wildcard_cache - assert len(helpers.dns._wildcard_cache[hash("github.io")]) > 0 + assert hash("github.io") in dnsengine._wildcard_cache + assert not hash("asdf.github.io") in dnsengine._wildcard_cache + assert not hash("asdf.asdf.github.io") in dnsengine._wildcard_cache + assert not hash("asdf.asdf.asdf.github.io") in dnsengine._wildcard_cache + assert len(dnsengine._wildcard_cache[hash("github.io")]) > 0 wildcard_event1 = scan.make_event("wat.asdf.fdsa.github.io", "DNS_NAME", dummy=True) wildcard_event2 = scan.make_event("wats.asd.fdsa.github.io", "DNS_NAME", dummy=True) wildcard_event3 = scan.make_event("github.io", "DNS_NAME", dummy=True) # event resolution - event_tags1, event_whitelisted1, event_blacklisted1, children1 = await scan.helpers.resolve_event(wildcard_event1) - event_tags2, event_whitelisted2, event_blacklisted2, children2 = await scan.helpers.resolve_event(wildcard_event2) - event_tags3, event_whitelisted3, event_blacklisted3, children3 = await scan.helpers.resolve_event(wildcard_event3) - await helpers.handle_wildcard_event(wildcard_event1, children1) - await helpers.handle_wildcard_event(wildcard_event2, children2) - await helpers.handle_wildcard_event(wildcard_event3, children3) + await scan._prep() + dnsresolve = scan.modules["dns"] + await dnsresolve.handle_event(wildcard_event1, {}) + await dnsresolve.handle_event(wildcard_event2, {}) + await dnsresolve.handle_event(wildcard_event3, {}) assert "wildcard" in wildcard_event1.tags assert "a-wildcard" in wildcard_event1.tags assert "srv-wildcard" not in wildcard_event1.tags @@ -149,6 +203,63 @@ async def test_wildcards(bbot_scanner, bbot_config): assert "srv-wildcard" not in wildcard_event2.tags assert wildcard_event1.data == "_wildcard.github.io" assert wildcard_event2.data == "_wildcard.github.io" - assert "wildcard-domain" in wildcard_event3.tags - assert "a-wildcard-domain" in wildcard_event3.tags - assert "srv-wildcard-domain" not in wildcard_event3.tags + assert wildcard_event3.data == "github.io" + + # dns resolve distance + event_distance_0 = scan.make_event("8.8.8.8", module=scan._make_dummy_module_dns("PTR"), source=scan.root_event) + assert event_distance_0.dns_resolve_distance == 0 + event_distance_1 = scan.make_event( + "evilcorp.com", module=scan._make_dummy_module_dns("A"), source=event_distance_0 + ) + assert event_distance_1.dns_resolve_distance == 1 + event_distance_2 = scan.make_event("1.2.3.4", module=scan._make_dummy_module_dns("PTR"), source=event_distance_1) + assert event_distance_2.dns_resolve_distance == 1 + event_distance_3 = scan.make_event( + "evilcorp.org", module=scan._make_dummy_module_dns("A"), source=event_distance_2 + ) + assert event_distance_3.dns_resolve_distance == 2 + + from bbot.scanner import Scanner + + # test with full scan + scan2 = Scanner("asdfl.gashdgkjsadgsdf.github.io", config={"dns_resolution": True}) + events = [e async for e in scan2.async_start()] + assert len(events) == 2 + assert 1 == len([e for e in events if e.type == "SCAN"]) + assert 1 == len( + [ + e + for e in events + if e.type == "DNS_NAME" + and e.data == "_wildcard.github.io" + and all( + t in e.tags + for t in ( + "a-record", + "target", + "aaaa-wildcard", + "in-scope", + "subdomain", + "aaaa-record", + "wildcard", + "a-wildcard", + ) + ) + ] + ) + + # test with full scan (wildcard detection disabled for domain) + scan2 = Scanner("asdfl.gashdgkjsadgsdf.github.io", config={"dns_wildcard_ignore": ["github.io"]}) + events = [e async for e in scan2.async_start()] + assert len(events) == 2 + assert 1 == len([e for e in events if e.type == "SCAN"]) + assert 1 == len( + [ + e + for e in events + if e.type == "DNS_NAME" + and e.data == "asdfl.gashdgkjsadgsdf.github.io" + and all(t in e.tags for t in ("a-record", "target", "in-scope", "subdomain", "aaaa-record")) + and not any(t in e.tags for t in ("wildcard", "a-wildcard", "aaaa-wildcard")) + ] + ) diff --git a/bbot/test/test_step_1/test_docs.py b/bbot/test/test_step_1/test_docs.py index 6b00d2d0d..a86947ff0 100644 --- a/bbot/test/test_step_1/test_docs.py +++ b/bbot/test/test_step_1/test_docs.py @@ -1,5 +1,4 @@ -from bbot.scripts.docs import update_docs - - def test_docs(): + from bbot.scripts.docs import update_docs + update_docs() diff --git a/bbot/test/test_step_1/test_events.py b/bbot/test/test_step_1/test_events.py index 024ee84a3..3b8ec29bf 100644 --- a/bbot/test/test_step_1/test_events.py +++ b/bbot/test/test_step_1/test_events.py @@ -6,7 +6,7 @@ @pytest.mark.asyncio -async def test_events(events, scan, helpers, bbot_config): +async def test_events(events, scan, helpers): assert events.ipv4.type == "IP_ADDRESS" assert events.ipv6.type == "IP_ADDRESS" assert events.netv4.type == "IP_RANGE" @@ -119,6 +119,12 @@ async def test_events(events, scan, helpers, bbot_config): assert http_response.http_title == "HTTP RESPONSE" assert http_response.redirect_location == "http://www.evilcorp.com/asdf" + # http response url validation + http_response_2 = scan.make_event( + {"port": "80", "url": "http://evilcorp.com:80/asdf"}, "HTTP_RESPONSE", dummy=True + ) + assert http_response_2.data["url"] == "http://evilcorp.com/asdf" + # open port tests assert events.open_port in events.domain assert "api.publicapis.org:443" in events.open_port @@ -438,3 +444,19 @@ async def test_events(events, scan, helpers, bbot_config): event_5 = scan.make_event("127.0.0.5", source=event_4) assert event_5.get_sources() == [event_4, event_3, event_2, event_1, scan.root_event] assert event_5.get_sources(omit=True) == [event_4, event_2, event_1, scan.root_event] + + # test host backup + host_event = scan.make_event("asdf.evilcorp.com", "DNS_NAME", source=scan.root_event) + assert host_event.host_original == "asdf.evilcorp.com" + host_event.host = "_wildcard.evilcorp.com" + assert host_event.host == "_wildcard.evilcorp.com" + assert host_event.host_original == "asdf.evilcorp.com" + + # test storage bucket validation + bucket_event = scan.make_event( + {"name": "ASDF.s3.amazonaws.com", "url": "https://ASDF.s3.amazonaws.com"}, + "STORAGE_BUCKET", + source=scan.root_event, + ) + assert bucket_event.data["name"] == "asdf.s3.amazonaws.com" + assert bucket_event.data["url"] == "https://asdf.s3.amazonaws.com/" diff --git a/bbot/test/test_step_1/test_files.py b/bbot/test/test_step_1/test_files.py index be52b1cd2..ed9bc0a33 100644 --- a/bbot/test/test_step_1/test_files.py +++ b/bbot/test/test_step_1/test_files.py @@ -4,8 +4,8 @@ @pytest.mark.asyncio -async def test_files(bbot_scanner, bbot_config): - scan1 = bbot_scanner(config=bbot_config) +async def test_files(bbot_scanner): + scan1 = bbot_scanner() # tempfile tempfile = scan1.helpers.tempfile(("line1", "line2"), pipe=False) diff --git a/bbot/test/test_step_1/test_helpers.py b/bbot/test/test_step_1/test_helpers.py index c8045e595..4e3f3993e 100644 --- a/bbot/test/test_step_1/test_helpers.py +++ b/bbot/test/test_step_1/test_helpers.py @@ -6,7 +6,7 @@ @pytest.mark.asyncio -async def test_helpers_misc(helpers, scan, bbot_scanner, bbot_config, bbot_httpserver): +async def test_helpers_misc(helpers, scan, bbot_scanner, bbot_httpserver): ### URL ### bad_urls = ( "http://e.co/index.html", @@ -103,17 +103,7 @@ async def test_helpers_misc(helpers, scan, bbot_scanner, bbot_config, bbot_https assert helpers.domain_stem("evilcorp.co.uk") == "evilcorp" assert helpers.domain_stem("www.evilcorp.co.uk") == "www.evilcorp" - assert helpers.host_in_host("www.evilcorp.com", "evilcorp.com") == True - assert helpers.host_in_host("asdf.www.evilcorp.com", "evilcorp.com") == True - assert helpers.host_in_host("evilcorp.com", "www.evilcorp.com") == False - assert helpers.host_in_host("evilcorp.com", "evilcorp.com") == True - assert helpers.host_in_host("evilcorp.com", "eevilcorp.com") == False - assert helpers.host_in_host("eevilcorp.com", "evilcorp.com") == False - assert helpers.host_in_host("evilcorp.com", "evilcorp") == False - assert helpers.host_in_host("evilcorp", "evilcorp.com") == False - assert helpers.host_in_host("evilcorp.com", "com") == True - - assert tuple(helpers.extract_emails("asdf@asdf.com\nT@t.Com&a=a@a.com__ b@b.com")) == ( + assert tuple(await helpers.re.extract_emails("asdf@asdf.com\nT@t.Com&a=a@a.com__ b@b.com")) == ( "asdf@asdf.com", "t@t.com", "a@a.com", @@ -254,73 +244,6 @@ async def test_helpers_misc(helpers, scan, bbot_scanner, bbot_config, bbot_https "https://www.evilcorp.com/fdsa", } - filtered_dict = helpers.filter_dict( - {"modules": {"c99": {"api_key": "1234", "filterme": "asdf"}, "ipneighbor": {"test": "test"}}}, "api_key" - ) - assert "api_key" in filtered_dict["modules"]["c99"] - assert "filterme" not in filtered_dict["modules"]["c99"] - assert "ipneighbor" not in filtered_dict["modules"] - - filtered_dict2 = helpers.filter_dict( - {"modules": {"c99": {"api_key": "1234", "filterme": "asdf"}, "ipneighbor": {"test": "test"}}}, "c99" - ) - assert "api_key" in filtered_dict2["modules"]["c99"] - assert "filterme" in filtered_dict2["modules"]["c99"] - assert "ipneighbor" not in filtered_dict2["modules"] - - filtered_dict3 = helpers.filter_dict( - {"modules": {"c99": {"api_key": "1234", "filterme": "asdf"}, "ipneighbor": {"test": "test"}}}, - "key", - fuzzy=True, - ) - assert "api_key" in filtered_dict3["modules"]["c99"] - assert "filterme" not in filtered_dict3["modules"]["c99"] - assert "ipneighbor" not in filtered_dict3["modules"] - - filtered_dict4 = helpers.filter_dict( - {"modules": {"secrets_db": {"api_key": "1234"}, "ipneighbor": {"secret": "test", "asdf": "1234"}}}, - "secret", - fuzzy=True, - exclude_keys="modules", - ) - assert not "secrets_db" in filtered_dict4["modules"] - assert "ipneighbor" in filtered_dict4["modules"] - assert "secret" in filtered_dict4["modules"]["ipneighbor"] - assert "asdf" not in filtered_dict4["modules"]["ipneighbor"] - - cleaned_dict = helpers.clean_dict( - {"modules": {"c99": {"api_key": "1234", "filterme": "asdf"}, "ipneighbor": {"test": "test"}}}, "api_key" - ) - assert "api_key" not in cleaned_dict["modules"]["c99"] - assert "filterme" in cleaned_dict["modules"]["c99"] - assert "ipneighbor" in cleaned_dict["modules"] - - cleaned_dict2 = helpers.clean_dict( - {"modules": {"c99": {"api_key": "1234", "filterme": "asdf"}, "ipneighbor": {"test": "test"}}}, "c99" - ) - assert "c99" not in cleaned_dict2["modules"] - assert "ipneighbor" in cleaned_dict2["modules"] - - cleaned_dict3 = helpers.clean_dict( - {"modules": {"c99": {"api_key": "1234", "filterme": "asdf"}, "ipneighbor": {"test": "test"}}}, - "key", - fuzzy=True, - ) - assert "api_key" not in cleaned_dict3["modules"]["c99"] - assert "filterme" in cleaned_dict3["modules"]["c99"] - assert "ipneighbor" in cleaned_dict3["modules"] - - cleaned_dict4 = helpers.clean_dict( - {"modules": {"secrets_db": {"api_key": "1234"}, "ipneighbor": {"secret": "test", "asdf": "1234"}}}, - "secret", - fuzzy=True, - exclude_keys="modules", - ) - assert "secrets_db" in cleaned_dict4["modules"] - assert "ipneighbor" in cleaned_dict4["modules"] - assert "secret" not in cleaned_dict4["modules"]["ipneighbor"] - assert "asdf" in cleaned_dict4["modules"]["ipneighbor"] - replaced = helpers.search_format_dict( {"asdf": [{"wat": {"here": "#{replaceme}!"}}, {500: True}]}, replaceme="asdf" ) @@ -427,20 +350,30 @@ async def test_helpers_misc(helpers, scan, bbot_scanner, bbot_config, bbot_https assert helpers.smart_encode_punycode("ドメイン.テスト:80") == "xn--eckwd4c7c.xn--zckzah:80" assert helpers.smart_decode_punycode("xn--eckwd4c7c.xn--zckzah:80") == "ドメイン.テスト:80" - assert helpers.recursive_decode("Hello%20world%21") == "Hello world!" - assert helpers.recursive_decode("Hello%20%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442") == "Hello Привет" - assert helpers.recursive_decode("%5Cu0020%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442%5Cu0021") == " Привет!" - assert helpers.recursive_decode("Hello%2520world%2521") == "Hello world!" + assert await helpers.re.recursive_decode("Hello%20world%21") == "Hello world!" + assert ( + await helpers.re.recursive_decode("Hello%20%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442") == "Hello Привет" + ) + assert ( + await helpers.re.recursive_decode("%5Cu0020%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442%5Cu0021") + == " Привет!" + ) + assert await helpers.re.recursive_decode("Hello%2520world%2521") == "Hello world!" assert ( - helpers.recursive_decode("Hello%255Cu0020%255Cu041f%255Cu0440%255Cu0438%255Cu0432%255Cu0435%255Cu0442") + await helpers.re.recursive_decode( + "Hello%255Cu0020%255Cu041f%255Cu0440%255Cu0438%255Cu0432%255Cu0435%255Cu0442" + ) == "Hello Привет" ) assert ( - helpers.recursive_decode("%255Cu0020%255Cu041f%255Cu0440%255Cu0438%255Cu0432%255Cu0435%255Cu0442%255Cu0021") + await helpers.re.recursive_decode( + "%255Cu0020%255Cu041f%255Cu0440%255Cu0438%255Cu0432%255Cu0435%255Cu0442%255Cu0021" + ) == " Привет!" ) assert ( - helpers.recursive_decode(r"Hello\\nWorld\\\tGreetings\\\\nMore\nText") == "Hello\nWorld\tGreetings\nMore\nText" + await helpers.re.recursive_decode(r"Hello\\nWorld\\\tGreetings\\\\nMore\nText") + == "Hello\nWorld\tGreetings\nMore\nText" ) ### CACHE ### @@ -519,7 +452,7 @@ async def test_helpers_misc(helpers, scan, bbot_scanner, bbot_config, bbot_https ) -def test_word_cloud(helpers, bbot_config, bbot_scanner): +def test_word_cloud(helpers, bbot_scanner): number_mutations = helpers.word_cloud.get_number_mutations("base2_p013", n=5, padding=2) assert "base0_p013" in number_mutations assert "base7_p013" in number_mutations @@ -535,7 +468,7 @@ def test_word_cloud(helpers, bbot_config, bbot_scanner): assert ("dev", "_base") in permutations # saving and loading - scan1 = bbot_scanner("127.0.0.1", config=bbot_config) + scan1 = bbot_scanner("127.0.0.1") word_cloud = scan1.helpers.word_cloud word_cloud.add_word("lantern") word_cloud.add_word("black") diff --git a/bbot/test/test_step_1/test_manager_deduplication.py b/bbot/test/test_step_1/test_manager_deduplication.py index e8ec69edc..a1d7f8596 100644 --- a/bbot/test/test_step_1/test_manager_deduplication.py +++ b/bbot/test/test_step_1/test_manager_deduplication.py @@ -3,7 +3,7 @@ @pytest.mark.asyncio -async def test_manager_deduplication(bbot_config, bbot_scanner, mock_dns): +async def test_manager_deduplication(bbot_scanner): class DefaultModule(BaseModule): _name = "default_module" @@ -47,8 +47,7 @@ class PerDomainOnly(DefaultModule): async def do_scan(*args, _config={}, _dns_mock={}, scan_callback=None, **kwargs): - merged_config = OmegaConf.merge(bbot_config, OmegaConf.create(_config)) - scan = bbot_scanner(*args, config=merged_config, **kwargs) + scan = bbot_scanner(*args, config=_config, **kwargs) default_module = DefaultModule(scan) everything_module = EverythingModule(scan) no_suppress_dupes = NoSuppressDupes(scan) @@ -62,7 +61,7 @@ async def do_scan(*args, _config={}, _dns_mock={}, scan_callback=None, **kwargs) scan.modules["per_hostport_only"] = per_hostport_only scan.modules["per_domain_only"] = per_domain_only if _dns_mock: - mock_dns(scan, _dns_mock) + await scan.helpers.dns._mock_dns(_dns_mock) if scan_callback is not None: scan_callback(scan) return ( @@ -92,15 +91,15 @@ async def do_scan(*args, _config={}, _dns_mock={}, scan_callback=None, **kwargs) ) assert len(events) == 21 - assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "accept_dupes.test.notreal" and str(e.module) == "accept_dupes" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "default_module.test.notreal" and str(e.module) == "default_module" and e.source.data == "test.notreal"]) + assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "accept_dupes.test.notreal" and str(e.module) == "accept_dupes"]) + assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "default_module.test.notreal" and str(e.module) == "default_module"]) assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "accept_dupes.test.notreal"]) assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "default_module.test.notreal"]) assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "per_domain_only.test.notreal"]) assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "per_hostport_only.test.notreal"]) assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "per_domain_only.test.notreal" and str(e.module) == "per_domain_only" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "per_hostport_only.test.notreal" and str(e.module) == "per_hostport_only" and e.source.data == "test.notreal"]) + assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "per_domain_only.test.notreal" and str(e.module) == "per_domain_only"]) + assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "per_hostport_only.test.notreal" and str(e.module) == "per_hostport_only"]) assert 1 == len([e for e in events if e.type == "DNS_NAME" and e.data == "test.notreal" and str(e.module) == "TARGET" and "SCAN:" in e.source.data]) assert 1 == len([e for e in events if e.type == "OPEN_TCP_PORT" and e.data == "accept_dupes.test.notreal:88" and str(e.module) == "everything_module" and e.source.data == "accept_dupes.test.notreal"]) assert 1 == len([e for e in events if e.type == "OPEN_TCP_PORT" and e.data == "default_module.test.notreal:88" and str(e.module) == "everything_module" and e.source.data == "default_module.test.notreal"]) @@ -110,23 +109,23 @@ async def do_scan(*args, _config={}, _dns_mock={}, scan_callback=None, **kwargs) assert 5 == len([e for e in events if e.type == "OPEN_TCP_PORT" and e.data == "no_suppress_dupes.test.notreal:88" and str(e.module) == "everything_module" and e.source.data == "no_suppress_dupes.test.notreal"]) assert len(default_events) == 6 - assert 1 == len([e for e in default_events if e.type == "DNS_NAME" and e.data == "accept_dupes.test.notreal" and str(e.module) == "accept_dupes" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in default_events if e.type == "DNS_NAME" and e.data == "default_module.test.notreal" and str(e.module) == "default_module" and e.source.data == "test.notreal"]) + assert 1 == len([e for e in default_events if e.type == "DNS_NAME" and e.data == "accept_dupes.test.notreal" and str(e.module) == "accept_dupes"]) + assert 1 == len([e for e in default_events if e.type == "DNS_NAME" and e.data == "default_module.test.notreal" and str(e.module) == "default_module"]) assert 1 == len([e for e in default_events if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes"]) - assert 1 == len([e for e in default_events if e.type == "DNS_NAME" and e.data == "per_domain_only.test.notreal" and str(e.module) == "per_domain_only" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in default_events if e.type == "DNS_NAME" and e.data == "per_hostport_only.test.notreal" and str(e.module) == "per_hostport_only" and e.source.data == "test.notreal"]) + assert 1 == len([e for e in default_events if e.type == "DNS_NAME" and e.data == "per_domain_only.test.notreal" and str(e.module) == "per_domain_only"]) + assert 1 == len([e for e in default_events if e.type == "DNS_NAME" and e.data == "per_hostport_only.test.notreal" and str(e.module) == "per_hostport_only"]) assert 1 == len([e for e in default_events if e.type == "DNS_NAME" and e.data == "test.notreal" and str(e.module) == "TARGET" and "SCAN:" in e.source.data]) assert len(all_events) == 26 - assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "accept_dupes.test.notreal" and str(e.module) == "accept_dupes" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "default_module.test.notreal" and str(e.module) == "default_module" and e.source.data == "test.notreal"]) + assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "accept_dupes.test.notreal" and str(e.module) == "accept_dupes"]) + assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "default_module.test.notreal" and str(e.module) == "default_module"]) assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "accept_dupes.test.notreal"]) assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "default_module.test.notreal"]) assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "per_domain_only.test.notreal"]) assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "per_hostport_only.test.notreal"]) assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "per_domain_only.test.notreal" and str(e.module) == "per_domain_only" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "per_hostport_only.test.notreal" and str(e.module) == "per_hostport_only" and e.source.data == "test.notreal"]) + assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "per_domain_only.test.notreal" and str(e.module) == "per_domain_only"]) + assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "per_hostport_only.test.notreal" and str(e.module) == "per_hostport_only"]) assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "test.notreal" and str(e.module) == "TARGET" and "SCAN:" in e.source.data]) assert 1 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.3" and str(e.module) == "A" and e.source.data == "default_module.test.notreal"]) assert 1 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.5" and str(e.module) == "A" and e.source.data == "no_suppress_dupes.test.notreal"]) @@ -141,31 +140,31 @@ async def do_scan(*args, _config={}, _dns_mock={}, scan_callback=None, **kwargs) assert 5 == len([e for e in all_events if e.type == "OPEN_TCP_PORT" and e.data == "no_suppress_dupes.test.notreal:88" and str(e.module) == "everything_module" and e.source.data == "no_suppress_dupes.test.notreal"]) assert len(no_suppress_dupes) == 6 - assert 1 == len([e for e in no_suppress_dupes if e.type == "DNS_NAME" and e.data == "accept_dupes.test.notreal" and str(e.module) == "accept_dupes" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in no_suppress_dupes if e.type == "DNS_NAME" and e.data == "default_module.test.notreal" and str(e.module) == "default_module" and e.source.data == "test.notreal"]) + assert 1 == len([e for e in no_suppress_dupes if e.type == "DNS_NAME" and e.data == "accept_dupes.test.notreal" and str(e.module) == "accept_dupes"]) + assert 1 == len([e for e in no_suppress_dupes if e.type == "DNS_NAME" and e.data == "default_module.test.notreal" and str(e.module) == "default_module"]) assert 1 == len([e for e in no_suppress_dupes if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes"]) - assert 1 == len([e for e in no_suppress_dupes if e.type == "DNS_NAME" and e.data == "per_domain_only.test.notreal" and str(e.module) == "per_domain_only" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in no_suppress_dupes if e.type == "DNS_NAME" and e.data == "per_hostport_only.test.notreal" and str(e.module) == "per_hostport_only" and e.source.data == "test.notreal"]) + assert 1 == len([e for e in no_suppress_dupes if e.type == "DNS_NAME" and e.data == "per_domain_only.test.notreal" and str(e.module) == "per_domain_only"]) + assert 1 == len([e for e in no_suppress_dupes if e.type == "DNS_NAME" and e.data == "per_hostport_only.test.notreal" and str(e.module) == "per_hostport_only"]) assert 1 == len([e for e in no_suppress_dupes if e.type == "DNS_NAME" and e.data == "test.notreal" and str(e.module) == "TARGET" and "SCAN:" in e.source.data]) assert len(accept_dupes) == 10 - assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "accept_dupes.test.notreal" and str(e.module) == "accept_dupes" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "default_module.test.notreal" and str(e.module) == "default_module" and e.source.data == "test.notreal"]) + assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "accept_dupes.test.notreal" and str(e.module) == "accept_dupes"]) + assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "default_module.test.notreal" and str(e.module) == "default_module"]) assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "accept_dupes.test.notreal"]) assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "default_module.test.notreal"]) assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "per_domain_only.test.notreal"]) assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "per_hostport_only.test.notreal"]) assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "per_domain_only.test.notreal" and str(e.module) == "per_domain_only" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "per_hostport_only.test.notreal" and str(e.module) == "per_hostport_only" and e.source.data == "test.notreal"]) + assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "per_domain_only.test.notreal" and str(e.module) == "per_domain_only"]) + assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "per_hostport_only.test.notreal" and str(e.module) == "per_hostport_only"]) assert 1 == len([e for e in accept_dupes if e.type == "DNS_NAME" and e.data == "test.notreal" and str(e.module) == "TARGET" and "SCAN:" in e.source.data]) assert len(per_hostport_only) == 6 - assert 1 == len([e for e in per_hostport_only if e.type == "DNS_NAME" and e.data == "accept_dupes.test.notreal" and str(e.module) == "accept_dupes" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in per_hostport_only if e.type == "DNS_NAME" and e.data == "default_module.test.notreal" and str(e.module) == "default_module" and e.source.data == "test.notreal"]) + assert 1 == len([e for e in per_hostport_only if e.type == "DNS_NAME" and e.data == "accept_dupes.test.notreal" and str(e.module) == "accept_dupes"]) + assert 1 == len([e for e in per_hostport_only if e.type == "DNS_NAME" and e.data == "default_module.test.notreal" and str(e.module) == "default_module"]) assert 1 == len([e for e in per_hostport_only if e.type == "DNS_NAME" and e.data == "no_suppress_dupes.test.notreal" and str(e.module) == "no_suppress_dupes"]) - assert 1 == len([e for e in per_hostport_only if e.type == "DNS_NAME" and e.data == "per_domain_only.test.notreal" and str(e.module) == "per_domain_only" and e.source.data == "test.notreal"]) - assert 1 == len([e for e in per_hostport_only if e.type == "DNS_NAME" and e.data == "per_hostport_only.test.notreal" and str(e.module) == "per_hostport_only" and e.source.data == "test.notreal"]) + assert 1 == len([e for e in per_hostport_only if e.type == "DNS_NAME" and e.data == "per_domain_only.test.notreal" and str(e.module) == "per_domain_only"]) + assert 1 == len([e for e in per_hostport_only if e.type == "DNS_NAME" and e.data == "per_hostport_only.test.notreal" and str(e.module) == "per_hostport_only"]) assert 1 == len([e for e in per_hostport_only if e.type == "DNS_NAME" and e.data == "test.notreal" and str(e.module) == "TARGET" and "SCAN:" in e.source.data]) assert len(per_domain_only) == 1 diff --git a/bbot/test/test_step_1/test_manager_scope_accuracy.py b/bbot/test/test_step_1/test_manager_scope_accuracy.py index e2ce82e94..dbca45276 100644 --- a/bbot/test/test_step_1/test_manager_scope_accuracy.py +++ b/bbot/test/test_step_1/test_manager_scope_accuracy.py @@ -1,3 +1,14 @@ +""" +The tests in this file are a bit unique because they're not intended to test any specific functionality + +They are meant to be a thorough baseline of how different modules and BBOT systems interact +Basically, if there is a small change in how scope works, dns resolution, etc., these tests are designed to catch it. +They will show you how your change affects bbot's behavior across a wide range of scans and configurations. + +I know they suck but they exist for a reason. If one of these tests is failing for you, it's important to take the time and +understand exactly what changed and why (and whether it's okay) before changing the test to match your results. +""" + from ..bbot_fixtures import * # noqa: F401 from pytest_httpserver import HTTPServer @@ -31,7 +42,7 @@ def bbot_other_httpservers(): @pytest.mark.asyncio -async def test_manager_scope_accuracy(bbot_config, bbot_scanner, bbot_httpserver, bbot_other_httpservers, bbot_httpserver_ssl, mock_dns): +async def test_manager_scope_accuracy(bbot_scanner, bbot_httpserver, bbot_other_httpservers, bbot_httpserver_ssl): """ This test ensures that BBOT correctly handles different scope distance settings. It performs these tests for normal modules, output modules, and their graph variants, @@ -91,8 +102,7 @@ async def handle_batch(self, *events): self.events.append(event) async def do_scan(*args, _config={}, _dns_mock={}, scan_callback=None, **kwargs): - merged_config = OmegaConf.merge(bbot_config, OmegaConf.create(_config)) - scan = bbot_scanner(*args, config=merged_config, **kwargs) + scan = bbot_scanner(*args, config=_config, **kwargs) dummy_module = DummyModule(scan) dummy_module_nodupes = DummyModuleNoDupes(scan) dummy_graph_output_module = DummyGraphOutputModule(scan) @@ -101,8 +111,7 @@ async def do_scan(*args, _config={}, _dns_mock={}, scan_callback=None, **kwargs) scan.modules["dummy_module_nodupes"] = dummy_module_nodupes scan.modules["dummy_graph_output_module"] = dummy_graph_output_module scan.modules["dummy_graph_batch_output_module"] = dummy_graph_batch_output_module - if _dns_mock: - mock_dns(scan, _dns_mock) + await scan.helpers.dns._mock_dns(_dns_mock) if scan_callback is not None: scan_callback(scan) return ( @@ -305,15 +314,17 @@ def custom_setup(scan): # httpx/speculate IP_RANGE --> IP_ADDRESS --> OPEN_TCP_PORT --> URL, search distance = 0 events, all_events, all_events_nodups, graph_output_events, graph_output_batch_events = await do_scan( "127.0.0.1/31", - modules=["httpx", "excavate"], + modules=["httpx"], _config={ "scope_search_distance": 0, "scope_dns_search_distance": 2, "scope_report_distance": 1, "speculate": True, - "internal_modules": {"speculate": {"ports": "8888"}}, + "excavate": True, + "modules": {"speculate": {"ports": "8888"}}, "omit_event_types": ["HTTP_RESPONSE", "URL_UNVERIFIED"], }, + _dns_mock={}, ) assert len(events) == 6 @@ -372,14 +383,14 @@ def custom_setup(scan): # httpx/speculate IP_RANGE --> IP_ADDRESS --> OPEN_TCP_PORT --> URL, search distance = 0, in_scope_only = False events, all_events, all_events_nodups, graph_output_events, graph_output_batch_events = await do_scan( "127.0.0.1/31", - modules=["httpx", "excavate"], + modules=["httpx"], _config={ "scope_search_distance": 0, "scope_dns_search_distance": 2, "scope_report_distance": 1, + "excavate": True, "speculate": True, - "modules": {"httpx": {"in_scope_only": False}}, - "internal_modules": {"speculate": {"ports": "8888"}}, + "modules": {"httpx": {"in_scope_only": False}, "speculate": {"ports": "8888"}}, "omit_event_types": ["HTTP_RESPONSE", "URL_UNVERIFIED"], }, ) @@ -401,7 +412,7 @@ def custom_setup(scan): assert 0 == len([e for e in events if e.type == "IP_ADDRESS" and e.data == "127.0.0.88"]) assert 0 == len([e for e in events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.77:8888/"]) - assert len(all_events) == 20 + assert len(all_events) == 19 assert 1 == len([e for e in all_events if e.type == "IP_RANGE" and e.data == "127.0.0.0/31" and e.internal == False and e.scope_distance == 0]) assert 1 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.0" and e.internal == True and e.scope_distance == 0]) assert 2 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.1" and e.internal == False and e.scope_distance == 0]) @@ -410,7 +421,7 @@ def custom_setup(scan): assert 1 == len([e for e in all_events if e.type == "URL" and e.data == "http://127.0.0.1:8888/" and e.internal == False and e.scope_distance == 0]) assert 1 == len([e for e in all_events if e.type == "HTTP_RESPONSE" and e.data["input"] == "127.0.0.1:8888" and e.internal == False and e.scope_distance == 0]) assert 1 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.1:8888/" and e.internal == False and e.scope_distance == 0 and "spider-danger" in e.tags]) - assert 2 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.77:8888/" and e.internal == False and e.scope_distance == 1]) + assert 1 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.77:8888/" and e.internal == False and e.scope_distance == 1]) assert 1 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.77" and e.internal == False and e.scope_distance == 1]) assert 2 == len([e for e in all_events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.77:8888" and e.internal == False and e.scope_distance == 1]) assert 1 == len([e for e in all_events if e.type == "URL" and e.data == "http://127.0.0.77:8888/" and e.internal == False and e.scope_distance == 1]) @@ -456,14 +467,14 @@ def custom_setup(scan): # httpx/speculate IP_RANGE --> IP_ADDRESS --> OPEN_TCP_PORT --> URL, search distance = 1 events, all_events, all_events_nodups, graph_output_events, graph_output_batch_events = await do_scan( "127.0.0.1/31", - modules=["httpx", "excavate"], + modules=["httpx"], _config={ "scope_search_distance": 1, "scope_dns_search_distance": 2, "scope_report_distance": 1, + "excavate": True, "speculate": True, - "modules": {"httpx": {"in_scope_only": False}}, - "internal_modules": {"speculate": {"ports": "8888"}}, + "modules": {"httpx": {"in_scope_only": False}, "speculate": {"ports": "8888"}}, "omit_event_types": ["HTTP_RESPONSE", "URL_UNVERIFIED"], }, ) @@ -485,7 +496,7 @@ def custom_setup(scan): assert 0 == len([e for e in events if e.type == "IP_ADDRESS" and e.data == "127.0.0.88"]) assert 0 == len([e for e in events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.77:8888/"]) - assert len(all_events) == 26 + assert len(all_events) == 24 assert 1 == len([e for e in all_events if e.type == "IP_RANGE" and e.data == "127.0.0.0/31" and e.internal == False and e.scope_distance == 0]) assert 1 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.0" and e.internal == True and e.scope_distance == 0]) assert 2 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.1" and e.internal == False and e.scope_distance == 0]) @@ -494,12 +505,12 @@ def custom_setup(scan): assert 1 == len([e for e in all_events if e.type == "URL" and e.data == "http://127.0.0.1:8888/" and e.internal == False and e.scope_distance == 0]) assert 1 == len([e for e in all_events if e.type == "HTTP_RESPONSE" and e.data["input"] == "127.0.0.1:8888" and e.internal == False and e.scope_distance == 0]) assert 1 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.1:8888/" and e.internal == False and e.scope_distance == 0 and "spider-danger" in e.tags]) - assert 2 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.77:8888/" and e.internal == False and e.scope_distance == 1]) + assert 1 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.77:8888/" and e.internal == False and e.scope_distance == 1]) assert 1 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.77" and e.internal == False and e.scope_distance == 1]) assert 2 == len([e for e in all_events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.77:8888" and e.internal == False and e.scope_distance == 1]) assert 1 == len([e for e in all_events if e.type == "URL" and e.data == "http://127.0.0.77:8888/" and e.internal == False and e.scope_distance == 1]) assert 1 == len([e for e in all_events if e.type == "HTTP_RESPONSE" and e.data["input"] == "127.0.0.77:8888" and e.internal == False and e.scope_distance == 1]) - assert 2 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.88:8888/" and e.internal == True and e.scope_distance == 2]) + assert 1 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.88:8888/" and e.internal == True and e.scope_distance == 2]) assert 1 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.88" and e.internal == True and e.scope_distance == 2]) assert 1 == len([e for e in all_events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.88:8888" and e.internal == True and e.scope_distance == 2]) assert 1 == len([e for e in all_events if e.type == "URL" and e.data == "http://127.0.0.88:8888/" and e.internal == True and e.scope_distance == 2]) @@ -551,14 +562,16 @@ def custom_setup(scan): events, all_events, all_events_nodups, graph_output_events, graph_output_batch_events = await do_scan( "127.0.0.111/31", whitelist=["127.0.0.111/31", "127.0.0.222", "127.0.0.33"], - modules=["httpx", "excavate"], + modules=["httpx"], output_modules=["python"], _config={ + "dns_resolution": True, "scope_search_distance": 0, "scope_dns_search_distance": 2, "scope_report_distance": 0, + "excavate": True, "speculate": True, - "internal_modules": {"speculate": {"ports": "8888"}}, + "modules": {"speculate": {"ports": "8888"}}, "omit_event_types": ["HTTP_RESPONSE", "URL_UNVERIFIED"], }, ) @@ -591,7 +604,7 @@ def custom_setup(scan): assert 0 == len([e for e in events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.44:8888"]) assert 0 == len([e for e in events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.55:8888"]) - assert len(all_events) == 33 + assert len(all_events) == 31 assert 1 == len([e for e in all_events if e.type == "IP_RANGE" and e.data == "127.0.0.110/31" and e.internal == False and e.scope_distance == 0]) assert 1 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.110" and e.internal == True and e.scope_distance == 0]) assert 2 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.111" and e.internal == False and e.scope_distance == 0]) @@ -600,9 +613,9 @@ def custom_setup(scan): assert 1 == len([e for e in all_events if e.type == "URL" and e.data == "http://127.0.0.111:8888/" and e.internal == False and e.scope_distance == 0]) assert 1 == len([e for e in all_events if e.type == "HTTP_RESPONSE" and e.data["input"] == "127.0.0.111:8888" and e.internal == False and e.scope_distance == 0]) assert 1 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.111:8888/" and e.internal == False and e.scope_distance == 0]) - assert 2 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.222:8889/" and e.internal == False and e.scope_distance == 0]) + assert 1 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.222:8889/" and e.internal == False and e.scope_distance == 0]) assert 1 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.222" and e.internal == False and e.scope_distance == 0]) - assert 2 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.33:8889/" and e.internal == False and e.scope_distance == 0]) + assert 1 == len([e for e in all_events if e.type == "URL_UNVERIFIED" and e.data == "http://127.0.0.33:8889/" and e.internal == False and e.scope_distance == 0]) assert 1 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.33" and e.internal == False and e.scope_distance == 0]) assert 1 == len([e for e in all_events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.222:8888" and e.internal == True and e.scope_distance == 0]) assert 2 == len([e for e in all_events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.222:8889" and e.internal == False and e.scope_distance == 0]) @@ -679,8 +692,8 @@ def custom_setup(scan): # sslcert with in-scope chain events, all_events, all_events_nodups, graph_output_events, graph_output_batch_events = await do_scan( "127.0.0.0/31", - modules=["speculate", "sslcert"], - _config={"dns_resolution": False, "scope_report_distance": 0, "internal_modules": {"speculate": {"ports": "9999"}}}, + modules=["sslcert"], + _config={"dns_resolution": False, "scope_report_distance": 0, "speculate": True, "modules": {"speculate": {"ports": "9999"}}}, _dns_mock={"www.bbottest.notreal": {"A": ["127.0.1.0"]}, "test.notreal": {"A": ["127.0.0.1"]}}, ) @@ -735,14 +748,14 @@ def custom_setup(scan): # sslcert with out-of-scope chain events, all_events, all_events_nodups, graph_output_events, graph_output_batch_events = await do_scan( "127.0.0.0/31", - modules=["speculate", "sslcert"], + modules=["sslcert"], whitelist=["127.0.1.0"], - _config={"dns_resolution": False, "scope_report_distance": 0, "internal_modules": {"speculate": {"ports": "9999"}}}, + _config={"dns_resolution": False, "scope_report_distance": 0, "scope_search_distance": 1, "speculate": True, "modules": {"speculate": {"ports": "9999"}}}, _dns_mock={"www.bbottest.notreal": {"A": ["127.0.0.1"]}, "test.notreal": {"A": ["127.0.1.0"]}}, ) assert len(events) == 3 - assert 1 == len([e for e in events if e.type == "IP_RANGE" and e.data == "127.0.0.0/31" and e.internal == False and e.scope_distance == 0]) + assert 1 == len([e for e in events if e.type == "IP_RANGE" and e.data == "127.0.0.0/31" and e.internal == False and e.scope_distance == 1]) assert 0 == len([e for e in events if e.type == "IP_ADDRESS" and e.data == "127.0.0.0"]) assert 0 == len([e for e in events if e.type == "IP_ADDRESS" and e.data == "127.0.0.1"]) assert 0 == len([e for e in events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.0:9999"]) @@ -752,30 +765,30 @@ def custom_setup(scan): assert 0 == len([e for e in events if e.type == "OPEN_TCP_PORT" and e.data == "test.notreal:9999"]) assert len(all_events) == 11 - assert 1 == len([e for e in all_events if e.type == "IP_RANGE" and e.data == "127.0.0.0/31" and e.internal == False and e.scope_distance == 0]) - assert 1 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.0" and e.internal == True and e.scope_distance == 1]) - assert 2 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.1" and e.internal == True and e.scope_distance == 1]) - assert 1 == len([e for e in all_events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.0:9999" and e.internal == True and e.scope_distance == 1]) + assert 1 == len([e for e in all_events if e.type == "IP_RANGE" and e.data == "127.0.0.0/31" and e.internal == False and e.scope_distance == 1]) + assert 1 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.0" and e.internal == True and e.scope_distance == 2]) + assert 2 == len([e for e in all_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.1" and e.internal == True and e.scope_distance == 2]) + assert 1 == len([e for e in all_events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.0:9999" and e.internal == True and e.scope_distance == 2]) assert 2 == len([e for e in all_events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.1:9999" and e.internal == True and e.scope_distance == 1]) assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "test.notreal" and e.internal == False and e.scope_distance == 0 and str(e.module) == "sslcert"]) - assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "www.bbottest.notreal" and e.internal == True and e.scope_distance == 2 and str(e.module) == "sslcert"]) + assert 1 == len([e for e in all_events if e.type == "DNS_NAME" and e.data == "www.bbottest.notreal" and e.internal == True and e.scope_distance == 3 and str(e.module) == "sslcert"]) assert 1 == len([e for e in all_events if e.type == "OPEN_TCP_PORT" and e.data == "test.notreal:9999" and e.internal == True and e.scope_distance == 0 and str(e.module) == "speculate"]) assert len(all_events_nodups) == 9 - assert 1 == len([e for e in all_events_nodups if e.type == "IP_RANGE" and e.data == "127.0.0.0/31" and e.internal == False and e.scope_distance == 0]) - assert 1 == len([e for e in all_events_nodups if e.type == "IP_ADDRESS" and e.data == "127.0.0.0" and e.internal == True and e.scope_distance == 1]) - assert 1 == len([e for e in all_events_nodups if e.type == "IP_ADDRESS" and e.data == "127.0.0.1" and e.internal == True and e.scope_distance == 1]) - assert 1 == len([e for e in all_events_nodups if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.0:9999" and e.internal == True and e.scope_distance == 1]) + assert 1 == len([e for e in all_events_nodups if e.type == "IP_RANGE" and e.data == "127.0.0.0/31" and e.internal == False and e.scope_distance == 1]) + assert 1 == len([e for e in all_events_nodups if e.type == "IP_ADDRESS" and e.data == "127.0.0.0" and e.internal == True and e.scope_distance == 2]) + assert 1 == len([e for e in all_events_nodups if e.type == "IP_ADDRESS" and e.data == "127.0.0.1" and e.internal == True and e.scope_distance == 2]) + assert 1 == len([e for e in all_events_nodups if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.0:9999" and e.internal == True and e.scope_distance == 2]) assert 1 == len([e for e in all_events_nodups if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.1:9999" and e.internal == True and e.scope_distance == 1]) assert 1 == len([e for e in all_events_nodups if e.type == "DNS_NAME" and e.data == "test.notreal" and e.internal == False and e.scope_distance == 0 and str(e.module) == "sslcert"]) - assert 1 == len([e for e in all_events_nodups if e.type == "DNS_NAME" and e.data == "www.bbottest.notreal" and e.internal == True and e.scope_distance == 2 and str(e.module) == "sslcert"]) + assert 1 == len([e for e in all_events_nodups if e.type == "DNS_NAME" and e.data == "www.bbottest.notreal" and e.internal == True and e.scope_distance == 3 and str(e.module) == "sslcert"]) assert 1 == len([e for e in all_events_nodups if e.type == "OPEN_TCP_PORT" and e.data == "test.notreal:9999" and e.internal == True and e.scope_distance == 0 and str(e.module) == "speculate"]) for _graph_output_events in (graph_output_events, graph_output_batch_events): assert len(_graph_output_events) == 5 - assert 1 == len([e for e in graph_output_events if e.type == "IP_RANGE" and e.data == "127.0.0.0/31" and e.internal == False and e.scope_distance == 0]) + assert 1 == len([e for e in graph_output_events if e.type == "IP_RANGE" and e.data == "127.0.0.0/31" and e.internal == False and e.scope_distance == 1]) assert 0 == len([e for e in graph_output_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.0"]) - assert 1 == len([e for e in graph_output_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.1" and e.internal == True and e.scope_distance == 1]) + assert 1 == len([e for e in graph_output_events if e.type == "IP_ADDRESS" and e.data == "127.0.0.1" and e.internal == True and e.scope_distance == 2]) assert 0 == len([e for e in graph_output_events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.0:9999"]) assert 1 == len([e for e in graph_output_events if e.type == "OPEN_TCP_PORT" and e.data == "127.0.0.1:9999" and e.internal == True and e.scope_distance == 1]) assert 1 == len([e for e in graph_output_events if e.type == "DNS_NAME" and e.data == "test.notreal" and e.internal == False and e.scope_distance == 0 and str(e.module) == "sslcert"]) @@ -784,21 +797,19 @@ def custom_setup(scan): @pytest.mark.asyncio -async def test_manager_blacklist(bbot_config, bbot_scanner, bbot_httpserver, caplog, mock_dns): +async def test_manager_blacklist(bbot_scanner, bbot_httpserver, caplog): bbot_httpserver.expect_request(uri="/").respond_with_data(response_data="") # dns search distance = 1, report distance = 0 - config = {"dns_resolution": True, "scope_dns_search_distance": 1, "scope_report_distance": 0} - merged_config = OmegaConf.merge(bbot_config, OmegaConf.create(config)) scan = bbot_scanner( "http://127.0.0.1:8888", - modules=["httpx", "excavate"], - config=merged_config, + modules=["httpx"], + config={"excavate": True, "dns_resolution": True, "scope_dns_search_distance": 1, "scope_report_distance": 0}, whitelist=["127.0.0.0/29", "test.notreal"], blacklist=["127.0.0.64/29"], ) - mock_dns(scan, { + await scan.helpers.dns._mock_dns({ "www-prod.test.notreal": {"A": ["127.0.0.66"]}, "www-dev.test.notreal": {"A": ["127.0.0.22"]}, }) @@ -806,14 +817,15 @@ async def test_manager_blacklist(bbot_config, bbot_scanner, bbot_httpserver, cap events = [e async for e in scan.async_start()] assert any([e for e in events if e.type == "URL_UNVERIFIED" and e.data == "http://www-dev.test.notreal:8888/"]) + # the hostname is in-scope, but its IP is blacklisted, therefore we shouldn't see it assert not any([e for e in events if e.type == "URL_UNVERIFIED" and e.data == "http://www-prod.test.notreal:8888/"]) - assert 'Omitting due to blacklisted DNS associations: URL_UNVERIFIED("http://www-prod.test.notreal:8888/"' in caplog.text + assert 'Not forwarding DNS_NAME("www-prod.test.notreal", module=excavate' in caplog.text and 'because it has a blacklisted DNS record' in caplog.text @pytest.mark.asyncio -async def test_manager_scope_tagging(bbot_config, bbot_scanner): - scan = bbot_scanner("test.notreal", config=bbot_config) +async def test_manager_scope_tagging(bbot_scanner): + scan = bbot_scanner("test.notreal") e1 = scan.make_event("www.test.notreal", source=scan.root_event, tags=["affiliate"]) assert e1.scope_distance == 1 assert "distance-1" in e1.tags diff --git a/bbot/test/test_step_1/test_modules_basic.py b/bbot/test/test_step_1/test_modules_basic.py index b66f06bbf..03273c0a7 100644 --- a/bbot/test/test_step_1/test_modules_basic.py +++ b/bbot/test/test_step_1/test_modules_basic.py @@ -9,11 +9,7 @@ @pytest.mark.asyncio -async def test_modules_basic(scan, helpers, events, bbot_config, bbot_scanner, httpx_mock): - fallback_nameservers = scan.helpers.temp_dir / "nameservers.txt" - with open(fallback_nameservers, "w") as f: - f.write("8.8.8.8\n") - +async def test_modules_basic(scan, helpers, events, bbot_scanner, httpx_mock): for http_method in ("GET", "CONNECT", "HEAD", "POST", "PUT", "TRACE", "DEBUG", "PATCH", "DELETE", "OPTIONS"): httpx_mock.add_response(method=http_method, url=re.compile(r".*"), json={"test": "test"}) @@ -80,12 +76,11 @@ async def test_modules_basic(scan, helpers, events, bbot_config, bbot_scanner, h base_output_module.watched_events = ["IP_ADDRESS"] scan2 = bbot_scanner( - modules=list(set(available_modules + available_internal_modules)), + modules=list(available_modules), output_modules=list(available_output_modules), - config=bbot_config, + config={i: True for i in available_internal_modules}, force_start=True, ) - scan2.helpers.dns.fallback_nameservers_file = fallback_nameservers await scan2.load_modules() scan2.status = "RUNNING" @@ -104,7 +99,7 @@ async def test_modules_basic(scan, helpers, events, bbot_config, bbot_scanner, h assert not any(not_async) # module preloading - all_preloaded = module_loader.preloaded() + all_preloaded = DEFAULT_PRESET.module_loader.preloaded() assert "massdns" in all_preloaded assert "DNS_NAME" in all_preloaded["massdns"]["watched_events"] assert "DNS_NAME" in all_preloaded["massdns"]["produced_events"] @@ -113,7 +108,8 @@ async def test_modules_basic(scan, helpers, events, bbot_config, bbot_scanner, h assert type(all_preloaded["massdns"]["config"]["max_resolvers"]) == int assert all_preloaded["sslcert"]["deps"]["pip"] assert all_preloaded["sslcert"]["deps"]["apt"] - assert all_preloaded["massdns"]["deps"]["ansible"] + assert all_preloaded["massdns"]["deps"]["common"] + assert all_preloaded["gowitness"]["deps"]["ansible"] all_flags = set() @@ -129,6 +125,9 @@ async def test_modules_basic(scan, helpers, events, bbot_config, bbot_scanner, h assert ("safe" in flags and not "aggressive" in flags) or ( not "safe" in flags and "aggressive" in flags ), f'module "{module_name}" must have either "safe" or "aggressive" flag' + assert not ( + "web-basic" in flags and "web-thorough" in flags + ), f'module "{module_name}" should have either "web-basic" or "web-thorough" flags, not both' assert preloaded.get("meta", {}).get("description", ""), f"{module_name} must have a description" # attribute checks @@ -174,7 +173,7 @@ async def test_modules_basic(scan, helpers, events, bbot_config, bbot_scanner, h @pytest.mark.asyncio -async def test_modules_basic_perhostonly(helpers, events, bbot_config, bbot_scanner, httpx_mock, monkeypatch): +async def test_modules_basic_perhostonly(helpers, events, bbot_scanner, httpx_mock, monkeypatch): from bbot.modules.base import BaseModule class mod_normal(BaseModule): @@ -198,7 +197,6 @@ class mod_domain_only(BaseModule): scan = bbot_scanner( "evilcorp.com", - config=bbot_config, force_start=True, ) @@ -262,11 +260,11 @@ class mod_domain_only(BaseModule): @pytest.mark.asyncio -async def test_modules_basic_perdomainonly(scan, helpers, events, bbot_config, bbot_scanner, httpx_mock, monkeypatch): +async def test_modules_basic_perdomainonly(scan, helpers, events, bbot_scanner, httpx_mock, monkeypatch): per_domain_scan = bbot_scanner( "evilcorp.com", - modules=list(set(available_modules + available_internal_modules)), - config=bbot_config, + modules=list(available_modules), + config={i: True for i in available_internal_modules}, force_start=True, ) @@ -303,7 +301,7 @@ async def test_modules_basic_perdomainonly(scan, helpers, events, bbot_config, b @pytest.mark.asyncio -async def test_modules_basic_stats(helpers, events, bbot_config, bbot_scanner, httpx_mock, monkeypatch, mock_dns): +async def test_modules_basic_stats(helpers, events, bbot_scanner, httpx_mock, monkeypatch): from bbot.modules.base import BaseModule class dummy(BaseModule): @@ -320,12 +318,11 @@ async def handle_event(self, event): scan = bbot_scanner( "evilcorp.com", - modules=["speculate"], - config=bbot_config, + config={"speculate": True}, + output_modules=["python"], force_start=True, ) - mock_dns( - scan, + await scan.helpers.dns._mock_dns( { "evilcorp.com": {"A": ["127.0.254.1"]}, "www.evilcorp.com": {"A": ["127.0.254.2"]}, @@ -356,7 +353,7 @@ async def handle_event(self, event): "ORG_STUB": 1, } - assert set(scan.stats.module_stats) == {"host", "speculate", "python", "dummy", "TARGET"} + assert set(scan.stats.module_stats) == {"speculate", "host", "TARGET", "python", "dummy", "cloud", "dns"} target_stats = scan.stats.module_stats["TARGET"] assert target_stats.produced == {"SCAN": 1, "DNS_NAME": 1} @@ -367,8 +364,15 @@ async def handle_event(self, event): dummy_stats = scan.stats.module_stats["dummy"] assert dummy_stats.produced == {"FINDING": 1, "URL": 1} assert dummy_stats.produced_total == 2 - assert dummy_stats.consumed == {"DNS_NAME": 2, "OPEN_TCP_PORT": 1, "SCAN": 1, "URL": 1, "URL_UNVERIFIED": 1} - assert dummy_stats.consumed_total == 6 + assert dummy_stats.consumed == { + "DNS_NAME": 2, + "FINDING": 1, + "OPEN_TCP_PORT": 1, + "SCAN": 1, + "URL": 1, + "URL_UNVERIFIED": 1, + } + assert dummy_stats.consumed_total == 7 python_stats = scan.stats.module_stats["python"] assert python_stats.produced == {} diff --git a/bbot/test/test_step_1/test_presets.py b/bbot/test/test_step_1/test_presets.py new file mode 100644 index 000000000..d84244e4f --- /dev/null +++ b/bbot/test/test_step_1/test_presets.py @@ -0,0 +1,661 @@ +from ..bbot_fixtures import * # noqa F401 + +from bbot.scanner import Scanner, Preset + + +# FUTURE TODO: +# Consider testing possible edge cases: +# make sure custom module load directory works with cli arg module/flag/config syntax validation +# what if you specify -c modules.custommodule.option? +# the validation needs to not happen until after your custom preset preset has been loaded +# what if you specify flags in one preset, but another preset (loaded later) has more custom modules that match that flag? +# how do we make sure those other modules get loaded too? +# what if you specify a flag that's only on custom modules? Will it be rejected as invalid? + + +def test_preset_descriptions(): + # ensure very preset has a description + preset = Preset() + for yaml_file, (loaded_preset, category, preset_path, original_filename) in preset.all_presets.items(): + assert ( + loaded_preset.description + ), f'Preset "{loaded_preset.name}" at {original_filename} does not have a description.' + + +def test_core(): + from bbot.core import CORE + + import omegaconf + + assert "testasdf" not in CORE.default_config + assert "testasdf" not in CORE.custom_config + assert "testasdf" not in CORE.config + + core_copy = CORE.copy() + # make sure our default config is read-only + with pytest.raises(omegaconf.errors.ReadonlyConfigError): + core_copy.default_config["testasdf"] = "test" + # same for merged config + with pytest.raises(omegaconf.errors.ReadonlyConfigError): + core_copy.config["testasdf"] = "test" + + assert "testasdf" not in core_copy.default_config + assert "testasdf" not in core_copy.custom_config + assert "testasdf" not in core_copy.config + + core_copy.custom_config["testasdf"] = "test" + assert "testasdf" not in core_copy.default_config + assert "testasdf" in core_copy.custom_config + assert "testasdf" in core_copy.config + + # test config merging + config_to_merge = omegaconf.OmegaConf.create({"test123": {"test321": [3, 2, 1], "test456": [4, 5, 6]}}) + core_copy.merge_custom(config_to_merge) + assert "test123" not in core_copy.default_config + assert "test123" in core_copy.custom_config + assert "test123" in core_copy.config + assert "test321" in core_copy.custom_config["test123"] + assert "test321" in core_copy.config["test123"] + + # test deletion + del core_copy.custom_config.test123.test321 + assert "test123" in core_copy.custom_config + assert "test123" in core_copy.config + assert "test321" not in core_copy.custom_config["test123"] + assert "test321" not in core_copy.config["test123"] + assert "test456" in core_copy.custom_config["test123"] + assert "test456" in core_copy.config["test123"] + + +def test_preset_yaml(clean_default_config): + + import yaml + + preset1 = Preset( + "evilcorp.com", + "www.evilcorp.ce", + whitelist=["evilcorp.ce"], + blacklist=["test.www.evilcorp.ce"], + modules=["sslcert"], + output_modules=["json"], + exclude_modules=["ipneighbor"], + flags=["subdomain-enum"], + require_flags=["safe"], + exclude_flags=["slow"], + verbose=False, + debug=False, + silent=True, + config={"preset_test_asdf": 1}, + strict_scope=False, + ) + preset1.bake() + assert "evilcorp.com" in preset1.target + assert "evilcorp.ce" in preset1.whitelist + assert "test.www.evilcorp.ce" in preset1.blacklist + assert "sslcert" in preset1.scan_modules + assert preset1.whitelisted("evilcorp.ce") + assert preset1.whitelisted("www.evilcorp.ce") + assert not preset1.whitelisted("evilcorp.com") + assert preset1.blacklisted("test.www.evilcorp.ce") + assert preset1.blacklisted("asdf.test.www.evilcorp.ce") + assert not preset1.blacklisted("www.evilcorp.ce") + + # test yaml save/load + yaml1 = preset1.to_yaml(sort_keys=True) + preset2 = Preset.from_yaml_string(yaml1) + yaml2 = preset2.to_yaml(sort_keys=True) + assert yaml1 == yaml2 + + yaml_string_1 = """ +flags: + - subdomain-enum + +exclude_flags: + - aggressive + - slow + +require_flags: + - passive + - safe + +exclude_modules: + - certspotter + - rapiddns + +modules: + - robots + - wappalyzer + +output_modules: + - csv + - json + +config: + speculate: False + excavate: True +""" + yaml_string_1 = yaml.dump(yaml.safe_load(yaml_string_1), sort_keys=True) + # preset from yaml + preset3 = Preset.from_yaml_string(yaml_string_1) + # yaml to preset + yaml_string_2 = preset3.to_yaml(sort_keys=True) + # make sure they're the same + assert yaml_string_2 == yaml_string_1 + + +def test_preset_scope(): + + blank_preset = Preset() + assert not blank_preset.target + assert blank_preset.strict_scope == False + + preset1 = Preset( + "evilcorp.com", + "www.evilcorp.ce", + whitelist=["evilcorp.ce"], + blacklist=["test.www.evilcorp.ce"], + ) + + # make sure target logic works as expected + assert "evilcorp.com" in preset1.target + assert "asdf.evilcorp.com" in preset1.target + assert "asdf.www.evilcorp.ce" in preset1.target + assert not "evilcorp.ce" in preset1.target + assert "evilcorp.ce" in preset1.whitelist + assert "test.www.evilcorp.ce" in preset1.blacklist + assert not "evilcorp.ce" in preset1.blacklist + assert preset1.in_scope("www.evilcorp.ce") + assert not preset1.in_scope("evilcorp.com") + assert not preset1.in_scope("asdf.test.www.evilcorp.ce") + + # test yaml save/load + yaml1 = preset1.to_yaml(sort_keys=True) + preset2 = Preset.from_yaml_string(yaml1) + yaml2 = preset2.to_yaml(sort_keys=True) + assert yaml1 == yaml2 + + # test preset merging + preset3 = Preset( + "evilcorp.org", + whitelist=["evilcorp.de"], + blacklist=["test.www.evilcorp.de"], + strict_scope=True, + ) + + preset1.merge(preset3) + + # targets should be merged + assert "evilcorp.com" in preset1.target + assert "www.evilcorp.ce" in preset1.target + assert "evilcorp.org" in preset1.target + # strict scope is enabled + assert not "asdf.evilcorp.com" in preset1.target + assert not "asdf.www.evilcorp.ce" in preset1.target + assert "evilcorp.ce" in preset1.whitelist + assert "evilcorp.de" in preset1.whitelist + assert not "asdf.evilcorp.de" in preset1.whitelist + assert not "asdf.evilcorp.ce" in preset1.whitelist + # blacklist should be merged, strict scope does not apply + assert "asdf.test.www.evilcorp.ce" in preset1.blacklist + assert "asdf.test.www.evilcorp.de" in preset1.blacklist + assert not "asdf.test.www.evilcorp.org" in preset1.blacklist + # only the base domain of evilcorp.de should be in scope + assert not preset1.in_scope("evilcorp.com") + assert not preset1.in_scope("evilcorp.org") + assert preset1.in_scope("evilcorp.de") + assert not preset1.in_scope("asdf.evilcorp.de") + assert not preset1.in_scope("evilcorp.com") + assert not preset1.in_scope("asdf.test.www.evilcorp.ce") + + preset4 = Preset(output_modules="neo4j") + set(preset1.output_modules) == {"python", "csv", "human", "json"} + preset1.merge(preset4) + set(preset1.output_modules) == {"python", "csv", "human", "json", "neo4j"} + + +def test_preset_logging(): + # test verbosity levels (conflicting verbose/debug/silent) + preset = Preset(verbose=True) + original_log_level = preset.core.logger.log_level + try: + assert preset.verbose == True + assert preset.debug == False + assert preset.silent == False + assert preset.core.logger.log_level == logging.VERBOSE + preset.debug = True + assert preset.verbose == False + assert preset.debug == True + assert preset.silent == False + assert preset.core.logger.log_level == logging.DEBUG + preset.silent = True + assert preset.verbose == False + assert preset.debug == False + assert preset.silent == True + assert preset.core.logger.log_level == logging.CRITICAL + finally: + preset.core.logger.log_level = original_log_level + + +def test_preset_module_resolution(clean_default_config): + preset = Preset().bake() + sslcert_preloaded = preset.preloaded_module("sslcert") + wayback_preloaded = preset.preloaded_module("wayback") + wappalyzer_preloaded = preset.preloaded_module("wappalyzer") + sslcert_flags = sslcert_preloaded.get("flags", []) + wayback_flags = wayback_preloaded.get("flags", []) + wappalyzer_flags = wappalyzer_preloaded.get("flags", []) + assert "active" in sslcert_flags + assert "passive" in wayback_flags + assert "active" in wappalyzer_flags + assert "subdomain-enum" in sslcert_flags + assert "subdomain-enum" in wayback_flags + assert "httpx" in wappalyzer_preloaded["deps"]["modules"] + + # make sure we have the expected defaults + assert not preset.scan_modules + assert set(preset.output_modules) == {"python", "csv", "human", "json"} + assert set(preset.internal_modules) == {"aggregate", "excavate", "speculate", "cloud", "dns"} + assert preset.modules == set(preset.output_modules).union(set(preset.internal_modules)) + + # make sure dependency resolution works as expected + preset = Preset(modules=["wappalyzer"]).bake() + assert set(preset.scan_modules) == {"wappalyzer", "httpx"} + + # make sure flags work as expected + preset = Preset(flags=["subdomain-enum"]).bake() + assert preset.flags == {"subdomain-enum"} + assert "sslcert" in preset.modules + assert "wayback" in preset.modules + assert "sslcert" in preset.scan_modules + assert "wayback" in preset.scan_modules + + # flag + module exclusions + preset = Preset(flags=["subdomain-enum"], exclude_modules=["sslcert"]).bake() + assert "sslcert" not in preset.modules + assert "wayback" in preset.modules + assert "sslcert" not in preset.scan_modules + assert "wayback" in preset.scan_modules + + # flag + flag exclusions + preset = Preset(flags=["subdomain-enum"], exclude_flags=["active"]).bake() + assert "sslcert" not in preset.modules + assert "wayback" in preset.modules + assert "sslcert" not in preset.scan_modules + assert "wayback" in preset.scan_modules + + # flag + flag requirements + preset = Preset(flags=["subdomain-enum"], require_flags=["passive"]).bake() + assert "sslcert" not in preset.modules + assert "wayback" in preset.modules + assert "sslcert" not in preset.scan_modules + assert "wayback" in preset.scan_modules + + # normal module enableement + preset = Preset(modules=["sslcert", "wappalyzer", "wayback"]).bake() + assert set(preset.scan_modules) == {"sslcert", "wappalyzer", "wayback", "httpx"} + + # modules + flag exclusions + preset = Preset(exclude_flags=["active"], modules=["sslcert", "wappalyzer", "wayback"]).bake() + assert set(preset.scan_modules) == {"wayback"} + + # modules + flag requirements + preset = Preset(require_flags=["passive"], modules=["sslcert", "wappalyzer", "wayback"]).bake() + assert set(preset.scan_modules) == {"wayback"} + + # modules + module exclusions + with pytest.raises(ValidationError) as error: + preset = Preset(exclude_modules=["sslcert"], modules=["sslcert", "wappalyzer", "wayback"]).bake() + assert str(error.value) == 'Unable to add scan module "sslcert" because the module has been excluded' + + +def test_preset_module_loader(): + custom_module_dir = bbot_test_dir / "custom_module_dir" + custom_module_dir_2 = custom_module_dir / "asdf" + custom_output_module_dir = custom_module_dir / "output" + custom_internal_module_dir = custom_module_dir / "internal" + for d in [custom_module_dir, custom_module_dir_2, custom_output_module_dir, custom_internal_module_dir]: + d.mkdir(parents=True, exist_ok=True) + assert d.is_dir() + custom_module_1 = custom_module_dir / "testmodule1.py" + with open(custom_module_1, "w") as f: + f.write( + """ +from bbot.modules.base import BaseModule + +class TestModule1(BaseModule): + watched_events = ["URL", "HTTP_RESPONSE"] + produced_events = ["VULNERABILITY"] +""" + ) + + custom_module_2 = custom_output_module_dir / "testmodule2.py" + with open(custom_module_2, "w") as f: + f.write( + """ +from bbot.modules.output.base import BaseOutputModule + +class TestModule2(BaseOutputModule): + pass +""" + ) + + custom_module_3 = custom_internal_module_dir / "testmodule3.py" + with open(custom_module_3, "w") as f: + f.write( + """ +from bbot.modules.internal.base import BaseInternalModule + +class TestModule3(BaseInternalModule): + pass +""" + ) + + custom_module_4 = custom_module_dir_2 / "testmodule4.py" + with open(custom_module_4, "w") as f: + f.write( + """ +from bbot.modules.base import BaseModule + +class TestModule4(BaseModule): + watched_events = ["TECHNOLOGY"] + produced_events = ["FINDING"] +""" + ) + + assert custom_module_1.is_file() + assert custom_module_2.is_file() + assert custom_module_3.is_file() + assert custom_module_4.is_file() + + preset = Preset() + preset.module_loader.save_preload_cache() + assert preset.module_loader.preload_cache_file.is_file() + + # at this point, core modules should be loaded, but not custom ones + assert "wappalyzer" in preset.module_loader.preloaded() + assert "testmodule1" not in preset.module_loader.preloaded() + + import pickle + + with open(preset.module_loader.preload_cache_file, "rb") as f: + preloaded = pickle.load(f) + assert "wappalyzer" in preloaded + assert "testmodule1" not in preloaded + + # add custom module dir + preset.module_dirs = [str(custom_module_dir)] + assert custom_module_dir in preset.module_dirs + assert custom_module_dir_2 in preset.module_dirs + assert custom_output_module_dir in preset.module_dirs + assert custom_internal_module_dir in preset.module_dirs + + # now our custom modules should be loaded + assert "wappalyzer" in preset.module_loader.preloaded() + assert "testmodule1" in preset.module_loader.preloaded() + assert "testmodule2" in preset.module_loader.preloaded() + assert "testmodule3" in preset.module_loader.preloaded() + assert "testmodule4" in preset.module_loader.preloaded() + + preset.module_loader.save_preload_cache() + with open(preset.module_loader.preload_cache_file, "rb") as f: + preloaded = pickle.load(f) + assert "wappalyzer" in preloaded + assert "testmodule1" in preloaded + assert "testmodule2" in preloaded + assert "testmodule3" in preloaded + assert "testmodule4" in preloaded + + # since module loader is shared across all presets, a new preset should now also have our custom modules + preset2 = Preset() + assert "wappalyzer" in preset2.module_loader.preloaded() + assert "testmodule1" in preset2.module_loader.preloaded() + assert "testmodule2" in preset2.module_loader.preloaded() + assert "testmodule3" in preset2.module_loader.preloaded() + assert "testmodule4" in preset2.module_loader.preloaded() + + # reset module_loader + preset2.module_loader.__init__() + + +def test_preset_include(): + + # test recursive preset inclusion + + custom_preset_dir_1 = bbot_test_dir / "custom_preset_dir" + custom_preset_dir_2 = custom_preset_dir_1 / "preset_subdir" + custom_preset_dir_3 = custom_preset_dir_2 / "subsubdir" + custom_preset_dir_4 = Path("/tmp/.bbot_preset_test") + custom_preset_dir_5 = custom_preset_dir_4 / "subdir" + mkdir(custom_preset_dir_1) + mkdir(custom_preset_dir_2) + mkdir(custom_preset_dir_3) + mkdir(custom_preset_dir_4) + mkdir(custom_preset_dir_5) + + preset_file = custom_preset_dir_1 / "preset1.yml" + with open(preset_file, "w") as f: + f.write( + """ +include: + - preset2 + +config: + modules: + testpreset1: + test: asdf +""" + ) + + preset_file = custom_preset_dir_2 / "preset2.yml" + with open(preset_file, "w") as f: + f.write( + """ +include: + - preset3 + +config: + modules: + testpreset2: + test: fdsa +""" + ) + + preset_file = custom_preset_dir_3 / "preset3.yml" + with open(preset_file, "w") as f: + f.write( + f""" +include: + # uh oh + - preset1 + - {custom_preset_dir_4}/preset4 + +config: + modules: + testpreset3: + test: qwerty +""" + ) + + preset_file = custom_preset_dir_4 / "preset4.yml" + with open(preset_file, "w") as f: + f.write( + """ +include: + - preset5 + +config: + modules: + testpreset4: + test: zxcv +""" + ) + + preset_file = custom_preset_dir_5 / "preset5.yml" + with open(preset_file, "w") as f: + f.write( + """ +config: + modules: + testpreset5: + test: hjkl +""" + ) + + preset = Preset(include=[str(custom_preset_dir_1 / "preset1")]) + assert preset.config.modules.testpreset1.test == "asdf" + assert preset.config.modules.testpreset2.test == "fdsa" + assert preset.config.modules.testpreset3.test == "qwerty" + assert preset.config.modules.testpreset4.test == "zxcv" + assert preset.config.modules.testpreset5.test == "hjkl" + + +def test_preset_conditions(): + custom_preset_dir_1 = bbot_test_dir / "custom_preset_dir" + custom_preset_dir_2 = custom_preset_dir_1 / "preset_subdir" + mkdir(custom_preset_dir_1) + mkdir(custom_preset_dir_2) + + preset_file_1 = custom_preset_dir_1 / "preset1.yml" + with open(preset_file_1, "w") as f: + f.write( + """ +include: + - preset2 +""" + ) + + preset_file_2 = custom_preset_dir_2 / "preset2.yml" + with open(preset_file_2, "w") as f: + f.write( + """ +conditions: + - | + {% if config.web_spider_distance == 3 and config.web_spider_depth == 4 %} + {{ abort("web spider is too aggressive") }} + {% endif %} +""" + ) + + preset = Preset(include=[preset_file_1]) + assert preset.conditions + + scan = Scanner(preset=preset) + assert scan.preset.conditions + + preset2 = Preset(config={"web_spider_distance": 3, "web_spider_depth": 4}) + preset.merge(preset2) + + with pytest.raises(PresetAbortError): + Scanner(preset=preset) + + +def test_preset_module_disablement(clean_default_config): + # internal module disablement + preset = Preset().bake() + assert "speculate" in preset.internal_modules + assert "excavate" in preset.internal_modules + assert "aggregate" in preset.internal_modules + preset = Preset(config={"speculate": False}).bake() + assert "speculate" not in preset.internal_modules + assert "excavate" in preset.internal_modules + assert "aggregate" in preset.internal_modules + preset = Preset(exclude_modules=["speculate", "excavate"]).bake() + assert "speculate" not in preset.internal_modules + assert "excavate" not in preset.internal_modules + assert "aggregate" in preset.internal_modules + + # internal module disablement + preset = Preset().bake() + assert set(preset.output_modules) == {"python", "human", "csv", "json"} + preset = Preset(exclude_modules=["human", "csv"]).bake() + assert set(preset.output_modules) == {"python", "json"} + preset = Preset(output_modules=["json"]).bake() + assert set(preset.output_modules) == {"json"} + + +def test_preset_require_exclude(): + + def get_module_flags(p): + for m in p.scan_modules: + preloaded = p.preloaded_module(m) + yield m, preloaded.get("flags", []) + + # enable by flag, no exclusions/requirements + preset = Preset(flags=["subdomain-enum"]).bake() + assert len(preset.modules) > 25 + module_flags = list(get_module_flags(preset)) + massdns_flags = preset.preloaded_module("massdns").get("flags", []) + assert "subdomain-enum" in massdns_flags + assert "passive" in massdns_flags + assert not "active" in massdns_flags + assert "aggressive" in massdns_flags + assert not "safe" in massdns_flags + assert "massdns" in [x[0] for x in module_flags] + assert "certspotter" in [x[0] for x in module_flags] + assert "c99" in [x[0] for x in module_flags] + assert any("passive" in flags for module, flags in module_flags) + assert any("active" in flags for module, flags in module_flags) + assert any("safe" in flags for module, flags in module_flags) + assert any("aggressive" in flags for module, flags in module_flags) + + # enable by flag, one required flag + preset = Preset(flags=["subdomain-enum"], require_flags=["passive"]).bake() + assert len(preset.modules) > 25 + module_flags = list(get_module_flags(preset)) + assert "massdns" in [x[0] for x in module_flags] + assert all("passive" in flags for module, flags in module_flags) + assert not any("active" in flags for module, flags in module_flags) + assert any("safe" in flags for module, flags in module_flags) + assert any("aggressive" in flags for module, flags in module_flags) + + # enable by flag, one excluded flag + preset = Preset(flags=["subdomain-enum"], exclude_flags=["active"]).bake() + assert len(preset.modules) > 25 + module_flags = list(get_module_flags(preset)) + assert "massdns" in [x[0] for x in module_flags] + assert all("passive" in flags for module, flags in module_flags) + assert not any("active" in flags for module, flags in module_flags) + assert any("safe" in flags for module, flags in module_flags) + assert any("aggressive" in flags for module, flags in module_flags) + + # enable by flag, one excluded module + preset = Preset(flags=["subdomain-enum"], exclude_modules=["massdns"]).bake() + assert len(preset.modules) > 25 + module_flags = list(get_module_flags(preset)) + assert not "massdns" in [x[0] for x in module_flags] + assert any("passive" in flags for module, flags in module_flags) + assert any("active" in flags for module, flags in module_flags) + assert any("safe" in flags for module, flags in module_flags) + assert any("aggressive" in flags for module, flags in module_flags) + + # enable by flag, multiple required flags + preset = Preset(flags=["subdomain-enum"], require_flags=["safe", "passive"]).bake() + assert len(preset.modules) > 25 + module_flags = list(get_module_flags(preset)) + assert not "massdns" in [x[0] for x in module_flags] + assert all("passive" in flags and "safe" in flags for module, flags in module_flags) + assert all("active" not in flags and "aggressive" not in flags for module, flags in module_flags) + assert not any("active" in flags for module, flags in module_flags) + assert not any("aggressive" in flags for module, flags in module_flags) + + # enable by flag, multiple excluded flags + preset = Preset(flags=["subdomain-enum"], exclude_flags=["aggressive", "active"]).bake() + assert len(preset.modules) > 25 + module_flags = list(get_module_flags(preset)) + assert not "massdns" in [x[0] for x in module_flags] + assert all("passive" in flags and "safe" in flags for module, flags in module_flags) + assert all("active" not in flags and "aggressive" not in flags for module, flags in module_flags) + assert not any("active" in flags for module, flags in module_flags) + assert not any("aggressive" in flags for module, flags in module_flags) + + # enable by flag, multiple excluded modules + preset = Preset(flags=["subdomain-enum"], exclude_modules=["massdns", "c99"]).bake() + assert len(preset.modules) > 25 + module_flags = list(get_module_flags(preset)) + assert not "massdns" in [x[0] for x in module_flags] + assert "certspotter" in [x[0] for x in module_flags] + assert not "c99" in [x[0] for x in module_flags] + assert any("passive" in flags for module, flags in module_flags) + assert any("active" in flags for module, flags in module_flags) + assert any("safe" in flags for module, flags in module_flags) + assert any("aggressive" in flags for module, flags in module_flags) diff --git a/bbot/test/test_step_1/test_python_api.py b/bbot/test/test_step_1/test_python_api.py index 00ad2d972..0155dcfb3 100644 --- a/bbot/test/test_step_1/test_python_api.py +++ b/bbot/test/test_step_1/test_python_api.py @@ -2,20 +2,20 @@ @pytest.mark.asyncio -async def test_python_api(bbot_config): +async def test_python_api(): from bbot.scanner import Scanner # make sure events are properly yielded - scan1 = Scanner("127.0.0.1", config=bbot_config) + scan1 = Scanner("127.0.0.1") events1 = [] async for event in scan1.async_start(): events1.append(event) assert any("127.0.0.1" == e for e in events1) # make sure output files work - scan2 = Scanner("127.0.0.1", config=bbot_config, output_modules=["json"], name="python_api_test") + scan2 = Scanner("127.0.0.1", output_modules=["json"], scan_name="python_api_test") await scan2.async_start_without_generator() scan_home = scan2.helpers.scans_dir / "python_api_test" - out_file = scan_home / "output.ndjson" + out_file = scan_home / "output.json" assert list(scan2.helpers.read_file(out_file)) scan_log = scan_home / "scan.log" debug_log = scan_home / "debug.log" @@ -24,14 +24,14 @@ async def test_python_api(bbot_config): assert debug_log.is_file() assert "python_api_test" in open(debug_log).read() - scan3 = Scanner("127.0.0.1", config=bbot_config, output_modules=["json"], name="scan_logging_test") + scan3 = Scanner("127.0.0.1", output_modules=["json"], scan_name="scan_logging_test") await scan3.async_start_without_generator() assert "scan_logging_test" not in open(scan_log).read() assert "scan_logging_test" not in open(debug_log).read() scan_home = scan3.helpers.scans_dir / "scan_logging_test" - out_file = scan_home / "output.ndjson" + out_file = scan_home / "output.json" assert list(scan3.helpers.read_file(out_file)) scan_log = scan_home / "scan.log" debug_log = scan_home / "debug.log" @@ -45,22 +45,74 @@ async def test_python_api(bbot_config): Scanner("127.0.0.1", config={"home": bbot_home}) assert os.environ["BBOT_TOOLS"] == str(Path(bbot_home) / "tools") + # custom target types + custom_target_scan = Scanner("ORG:evilcorp") + events = [e async for e in custom_target_scan.async_start()] + assert 1 == len([e for e in events if e.type == "ORG_STUB" and e.data == "evilcorp" and "target" in e.tags]) -def test_python_api_sync(bbot_config): + +def test_python_api_sync(): from bbot.scanner import Scanner # make sure events are properly yielded - scan1 = Scanner("127.0.0.1", config=bbot_config) + scan1 = Scanner("127.0.0.1") events1 = [] for event in scan1.start(): events1.append(event) assert any("127.0.0.1" == e for e in events1) # make sure output files work - scan2 = Scanner("127.0.0.1", config=bbot_config, output_modules=["json"], name="python_api_test") + scan2 = Scanner("127.0.0.1", output_modules=["json"], scan_name="python_api_test") scan2.start_without_generator() - out_file = scan2.helpers.scans_dir / "python_api_test" / "output.ndjson" + out_file = scan2.helpers.scans_dir / "python_api_test" / "output.json" assert list(scan2.helpers.read_file(out_file)) # make sure config loads properly bbot_home = "/tmp/.bbot_python_api_test" Scanner("127.0.0.1", config={"home": bbot_home}) assert os.environ["BBOT_TOOLS"] == str(Path(bbot_home) / "tools") + + +def test_python_api_validation(): + from bbot.scanner import Scanner, Preset + + # invalid module + with pytest.raises(ValidationError) as error: + Scanner(modules=["asdf"]) + assert str(error.value) == 'Could not find scan module "asdf". Did you mean "asn"?' + # invalid output module + with pytest.raises(ValidationError) as error: + Scanner(output_modules=["asdf"]) + assert str(error.value) == 'Could not find output module "asdf". Did you mean "teams"?' + # invalid excluded module + with pytest.raises(ValidationError) as error: + Scanner(exclude_modules=["asdf"]) + assert str(error.value) == 'Could not find module "asdf". Did you mean "asn"?' + # invalid flag + with pytest.raises(ValidationError) as error: + Scanner(flags=["asdf"]) + assert str(error.value) == 'Could not find flag "asdf". Did you mean "safe"?' + # invalid required flag + with pytest.raises(ValidationError) as error: + Scanner(require_flags=["asdf"]) + assert str(error.value) == 'Could not find flag "asdf". Did you mean "safe"?' + # invalid excluded flag + with pytest.raises(ValidationError) as error: + Scanner(exclude_flags=["asdf"]) + assert str(error.value) == 'Could not find flag "asdf". Did you mean "safe"?' + # output module as normal module + with pytest.raises(ValidationError) as error: + Scanner(modules=["json"]) + assert str(error.value) == 'Could not find scan module "json". Did you mean "asn"?' + # normal module as output module + with pytest.raises(ValidationError) as error: + Scanner(output_modules=["robots"]) + assert str(error.value) == 'Could not find output module "robots". Did you mean "web_report"?' + # invalid preset type + with pytest.raises(ValidationError) as error: + Scanner(preset="asdf") + assert str(error.value) == 'Preset must be of type Preset, not "str"' + # include nonexistent preset + with pytest.raises(ValidationError) as error: + Preset(include=["asdf"]) + assert ( + str(error.value) == 'Could not find preset at "asdf" - file does not exist. Use -lp to list available presets' + ) diff --git a/bbot/test/test_step_1/test_regexes.py b/bbot/test/test_step_1/test_regexes.py index 7807e6c79..709674c9e 100644 --- a/bbot/test/test_step_1/test_regexes.py +++ b/bbot/test/test_step_1/test_regexes.py @@ -1,9 +1,9 @@ import pytest import traceback -from bbot.core.event.helpers import get_event_type from bbot.core.helpers import regexes -from bbot.core.errors import ValidationError +from bbot.errors import ValidationError +from bbot.core.event.helpers import get_event_type def test_dns_name_regexes(): diff --git a/bbot/test/test_step_1/test_scan.py b/bbot/test/test_step_1/test_scan.py index 464f2038b..e5648c4ee 100644 --- a/bbot/test/test_step_1/test_scan.py +++ b/bbot/test/test_step_1/test_scan.py @@ -4,18 +4,15 @@ @pytest.mark.asyncio async def test_scan( events, - bbot_config, helpers, monkeypatch, bbot_scanner, - mock_dns, ): scan0 = bbot_scanner( "1.1.1.1/31", "evilcorp.com", blacklist=["1.1.1.1/28", "www.evilcorp.com"], modules=["ipneighbor"], - config=bbot_config, ) await scan0.load_modules() assert scan0.whitelisted("1.1.1.1") @@ -38,7 +35,7 @@ async def test_scan( assert "1.1.1.0/28" in j["blacklist"] assert "ipneighbor" in j["modules"] - scan1 = bbot_scanner("1.1.1.1", whitelist=["1.0.0.1"], config=bbot_config) + scan1 = bbot_scanner("1.1.1.1", whitelist=["1.0.0.1"]) assert not scan1.blacklisted("1.1.1.1") assert not scan1.blacklisted("1.0.0.1") assert not scan1.whitelisted("1.1.1.1") @@ -46,7 +43,7 @@ async def test_scan( assert scan1.in_scope("1.0.0.1") assert not scan1.in_scope("1.1.1.1") - scan2 = bbot_scanner("1.1.1.1", config=bbot_config) + scan2 = bbot_scanner("1.1.1.1") assert not scan2.blacklisted("1.1.1.1") assert not scan2.blacklisted("1.0.0.1") assert scan2.whitelisted("1.1.1.1") @@ -60,10 +57,8 @@ async def test_scan( } # make sure DNS resolution works - dns_config = OmegaConf.create({"dns_resolution": True}) - dns_config = OmegaConf.merge(bbot_config, dns_config) - scan4 = bbot_scanner("1.1.1.1", config=dns_config) - mock_dns(scan4, dns_table) + scan4 = bbot_scanner("1.1.1.1", config={"dns_resolution": True}) + await scan4.helpers.dns._mock_dns(dns_table) events = [] async for event in scan4.async_start(): events.append(event) @@ -71,10 +66,8 @@ async def test_scan( assert "one.one.one.one" in event_data # make sure it doesn't work when you turn it off - no_dns_config = OmegaConf.create({"dns_resolution": False}) - no_dns_config = OmegaConf.merge(bbot_config, no_dns_config) - scan5 = bbot_scanner("1.1.1.1", config=no_dns_config) - mock_dns(scan5, dns_table) + scan5 = bbot_scanner("1.1.1.1", config={"dns_resolution": False}) + await scan5.helpers.dns._mock_dns(dns_table) events = [] async for event in scan5.async_start(): events.append(event) diff --git a/bbot/test/test_step_1/test_scope.py b/bbot/test/test_step_1/test_scope.py index e51fec973..7435b82af 100644 --- a/bbot/test/test_step_1/test_scope.py +++ b/bbot/test/test_step_1/test_scope.py @@ -2,10 +2,58 @@ from ..test_step_2.module_tests.base import ModuleTestBase -class Scope_test_blacklist(ModuleTestBase): +class TestScopeBaseline(ModuleTestBase): targets = ["http://127.0.0.1:8888"] modules_overrides = ["httpx"] + async def setup_after_prep(self, module_test): + expect_args = {"method": "GET", "uri": "/"} + respond_args = {"response_data": "alive"} + module_test.set_expect_requests(expect_args=expect_args, respond_args=respond_args) + + def check(self, module_test, events): + assert len(events) == 6 + assert 1 == len( + [ + e + for e in events + if e.type == "URL_UNVERIFIED" + and str(e.host) == "127.0.0.1" + and e.scope_distance == 0 + and "target" in e.tags + ] + ) + # we have two of these because the host module considers "always_emit" in its outgoing deduplication + assert 2 == len( + [ + e + for e in events + if e.type == "IP_ADDRESS" + and e.data == "127.0.0.1" + and e.scope_distance == 0 + and str(e.module) == "host" + ] + ) + assert 1 == len( + [ + e + for e in events + if e.type == "HTTP_RESPONSE" + and str(e.host) == "127.0.0.1" + and e.port == 8888 + and e.scope_distance == 0 + ] + ) + assert 1 == len( + [ + e + for e in events + if e.type == "URL" and str(e.host) == "127.0.0.1" and e.port == 8888 and e.scope_distance == 0 + ] + ) + + +class TestScopeBlacklist(TestScopeBaseline): blacklist = ["127.0.0.1"] async def setup_after_prep(self, module_test): @@ -14,9 +62,32 @@ async def setup_after_prep(self, module_test): module_test.set_expect_requests(expect_args=expect_args, respond_args=respond_args) def check(self, module_test, events): + assert len(events) == 1 assert not any(e.type == "URL" for e in events) + assert not any(str(e.host) == "127.0.0.1" for e in events) -class Scope_test_whitelist(Scope_test_blacklist): +class TestScopeWhitelist(TestScopeBlacklist): blacklist = [] whitelist = ["255.255.255.255"] + + def check(self, module_test, events): + assert len(events) == 3 + assert not any(e.type == "URL" for e in events) + assert 1 == len( + [ + e + for e in events + if e.type == "IP_ADDRESS" and e.data == "127.0.0.1" and e.scope_distance == 1 and "target" in e.tags + ] + ) + assert 1 == len( + [ + e + for e in events + if e.type == "URL_UNVERIFIED" + and str(e.host) == "127.0.0.1" + and e.scope_distance == 1 + and "target" in e.tags + ] + ) diff --git a/bbot/test/test_step_1/test_target.py b/bbot/test/test_step_1/test_target.py index 90db526c7..ed5c1b7ef 100644 --- a/bbot/test/test_step_1/test_target.py +++ b/bbot/test/test_step_1/test_target.py @@ -1,12 +1,12 @@ from ..bbot_fixtures import * # noqa: F401 -def test_target(bbot_config, bbot_scanner): - scan1 = bbot_scanner("api.publicapis.org", "8.8.8.8/30", "2001:4860:4860::8888/126", config=bbot_config) - scan2 = bbot_scanner("8.8.8.8/29", "publicapis.org", "2001:4860:4860::8888/125", config=bbot_config) - scan3 = bbot_scanner("8.8.8.8/29", "publicapis.org", "2001:4860:4860::8888/125", config=bbot_config) - scan4 = bbot_scanner("8.8.8.8/29", config=bbot_config) - scan5 = bbot_scanner(config=bbot_config) +def test_target(bbot_scanner): + scan1 = bbot_scanner("api.publicapis.org", "8.8.8.8/30", "2001:4860:4860::8888/126") + scan2 = bbot_scanner("8.8.8.8/29", "publicapis.org", "2001:4860:4860::8888/125") + scan3 = bbot_scanner("8.8.8.8/29", "publicapis.org", "2001:4860:4860::8888/125") + scan4 = bbot_scanner("8.8.8.8/29") + scan5 = bbot_scanner() assert not scan5.target assert len(scan1.target) == 9 assert len(scan4.target) == 8 @@ -38,3 +38,25 @@ def test_target(bbot_config, bbot_scanner): assert scan1.target.get("2001:4860:4860::888c") is None assert str(scan1.target.get("www.api.publicapis.org").host) == "api.publicapis.org" assert scan1.target.get("publicapis.org") is None + + from bbot.scanner.target import Target + + target = Target("evilcorp.com") + assert not "com" in target + assert "evilcorp.com" in target + assert "www.evilcorp.com" in target + strict_target = Target("evilcorp.com", strict_scope=True) + assert not "com" in strict_target + assert "evilcorp.com" in strict_target + assert not "www.evilcorp.com" in strict_target + + target = Target() + target.add_target("evilcorp.com") + assert not "com" in target + assert "evilcorp.com" in target + assert "www.evilcorp.com" in target + strict_target = Target(strict_scope=True) + strict_target.add_target("evilcorp.com") + assert not "com" in strict_target + assert "evilcorp.com" in strict_target + assert not "www.evilcorp.com" in strict_target diff --git a/bbot/test/test_step_1/test_web.py b/bbot/test/test_step_1/test_web.py index 675197265..14da286d0 100644 --- a/bbot/test/test_step_1/test_web.py +++ b/bbot/test/test_step_1/test_web.py @@ -1,17 +1,16 @@ import re -from omegaconf import OmegaConf from ..bbot_fixtures import * @pytest.mark.asyncio -async def test_web_helpers(bbot_scanner, bbot_config, bbot_httpserver): - scan1 = bbot_scanner("8.8.8.8", config=bbot_config) - scan2 = bbot_scanner("127.0.0.1", config=bbot_config) +async def test_web_helpers(bbot_scanner, bbot_httpserver): + scan1 = bbot_scanner("8.8.8.8") + scan2 = bbot_scanner("127.0.0.1") - user_agent = bbot_config.get("user_agent", "") + user_agent = CORE.config.get("user_agent", "") headers = {"User-Agent": user_agent} - custom_headers = bbot_config.get("http_headers", {}) + custom_headers = CORE.config.get("http_headers", {}) headers.update(custom_headers) assert headers["test"] == "header" @@ -125,7 +124,7 @@ async def test_web_helpers(bbot_scanner, bbot_config, bbot_httpserver): @pytest.mark.asyncio -async def test_web_interactsh(bbot_scanner, bbot_config, bbot_httpserver): +async def test_web_interactsh(bbot_scanner, bbot_httpserver): from bbot.core.helpers.interactsh import server_list sync_called = False @@ -134,7 +133,7 @@ async def test_web_interactsh(bbot_scanner, bbot_config, bbot_httpserver): sync_correct_url = False async_correct_url = False - scan1 = bbot_scanner("8.8.8.8", config=bbot_config) + scan1 = bbot_scanner("8.8.8.8") scan1.status = "RUNNING" interactsh_client = scan1.helpers.interactsh(poll_interval=3) @@ -186,8 +185,8 @@ def sync_callback(data): @pytest.mark.asyncio -async def test_web_curl(bbot_scanner, bbot_config, bbot_httpserver): - scan = bbot_scanner("127.0.0.1", config=bbot_config) +async def test_web_curl(bbot_scanner, bbot_httpserver): + scan = bbot_scanner("127.0.0.1") helpers = scan.helpers url = bbot_httpserver.url_for("/curl") bbot_httpserver.expect_request(uri="/curl").respond_with_data("curl_yep") @@ -231,7 +230,7 @@ async def test_web_http_compare(httpx_mock, helpers): @pytest.mark.asyncio -async def test_http_proxy(bbot_scanner, bbot_config, bbot_httpserver, proxy_server): +async def test_http_proxy(bbot_scanner, bbot_httpserver, proxy_server): endpoint = "/test_http_proxy" url = bbot_httpserver.url_for(endpoint) # test user agent + custom headers @@ -239,9 +238,7 @@ async def test_http_proxy(bbot_scanner, bbot_config, bbot_httpserver, proxy_serv proxy_address = f"http://127.0.0.1:{proxy_server.server_address[1]}" - test_config = OmegaConf.merge(bbot_config, OmegaConf.create({"http_proxy": proxy_address})) - - scan = bbot_scanner("127.0.0.1", config=test_config) + scan = bbot_scanner("127.0.0.1", config={"http_proxy": proxy_address}) assert len(proxy_server.RequestHandlerClass.urls) == 0 @@ -256,17 +253,15 @@ async def test_http_proxy(bbot_scanner, bbot_config, bbot_httpserver, proxy_serv @pytest.mark.asyncio -async def test_http_ssl(bbot_scanner, bbot_config, bbot_httpserver_ssl): +async def test_http_ssl(bbot_scanner, bbot_httpserver_ssl): endpoint = "/test_http_ssl" url = bbot_httpserver_ssl.url_for(endpoint) # test user agent + custom headers bbot_httpserver_ssl.expect_request(uri=endpoint).respond_with_data("test_http_ssl_yep") - verify_config = OmegaConf.merge(bbot_config, OmegaConf.create({"ssl_verify": True, "http_debug": True})) - scan1 = bbot_scanner("127.0.0.1", config=verify_config) + scan1 = bbot_scanner("127.0.0.1", config={"ssl_verify": True, "http_debug": True}) - not_verify_config = OmegaConf.merge(bbot_config, OmegaConf.create({"ssl_verify": False, "http_debug": True})) - scan2 = bbot_scanner("127.0.0.1", config=not_verify_config) + scan2 = bbot_scanner("127.0.0.1", config={"ssl_verify": False, "http_debug": True}) r1 = await scan1.helpers.request(url) assert r1 is None, "Request to self-signed SSL server went through even with ssl_verify=True" @@ -276,12 +271,12 @@ async def test_http_ssl(bbot_scanner, bbot_config, bbot_httpserver_ssl): @pytest.mark.asyncio -async def test_web_cookies(bbot_scanner, bbot_config, httpx_mock): +async def test_web_cookies(bbot_scanner, httpx_mock): import httpx # make sure cookies work when enabled httpx_mock.add_response(url="http://www.evilcorp.com/cookies", headers=[("set-cookie", "wat=asdf; path=/")]) - scan = bbot_scanner(config=bbot_config) + scan = bbot_scanner() client = scan.helpers.AsyncClient(persist_cookies=True) r = await client.get(url="http://www.evilcorp.com/cookies") assert r.cookies["wat"] == "asdf" @@ -294,7 +289,7 @@ async def test_web_cookies(bbot_scanner, bbot_config, httpx_mock): # make sure they don't when they're not httpx_mock.add_response(url="http://www2.evilcorp.com/cookies", headers=[("set-cookie", "wats=fdsa; path=/")]) - scan = bbot_scanner(config=bbot_config) + scan = bbot_scanner() client2 = scan.helpers.AsyncClient(persist_cookies=False) r = await client2.get(url="http://www2.evilcorp.com/cookies") # make sure we can access the cookies diff --git a/bbot/test/test_step_2/module_tests/base.py b/bbot/test/test_step_2/module_tests/base.py index e92f3a63c..7793530df 100644 --- a/bbot/test/test_step_2/module_tests/base.py +++ b/bbot/test/test_step_2/module_tests/base.py @@ -5,20 +5,17 @@ from omegaconf import OmegaConf from types import SimpleNamespace +from ...bbot_fixtures import * from bbot.scanner import Scanner -from bbot.modules import module_loader from bbot.core.helpers.misc import rand_string -from ...bbot_fixtures import test_config, MockResolver log = logging.getLogger("bbot.test.modules") def tempwordlist(content): - tmp_path = "/tmp/.bbot_test/" - from bbot.core.helpers.misc import rand_string, mkdir + from bbot.core.helpers.misc import rand_string - mkdir(tmp_path) - filename = f"{tmp_path}{rand_string(8)}" + filename = bbot_test_dir / f"{rand_string(8)}" with open(filename, "w", errors="ignore") as f: for c in content: line = f"{c}\n" @@ -52,14 +49,14 @@ class ModuleTestBase: class ModuleTest: def __init__(self, module_test_base, httpx_mock, httpserver, httpserver_ssl, monkeypatch, request): self.name = module_test_base.name - self.config = OmegaConf.merge(test_config, OmegaConf.create(module_test_base.config_overrides)) + self.config = OmegaConf.merge(CORE.config, OmegaConf.create(module_test_base.config_overrides)) self.httpx_mock = httpx_mock self.httpserver = httpserver self.httpserver_ssl = httpserver_ssl self.monkeypatch = monkeypatch self.request_fixture = request - self.preloaded = module_loader.preloaded() + self.preloaded = DEFAULT_PRESET.module_loader.preloaded() # handle output, internal module types output_modules = None @@ -78,7 +75,7 @@ def __init__(self, module_test_base, httpx_mock, httpserver, httpserver_ssl, mon *module_test_base.targets, modules=modules, output_modules=output_modules, - name=module_test_base._scan_name, + scan_name=module_test_base._scan_name, config=self.config, whitelist=module_test_base.whitelist, blacklist=module_test_base.blacklist, @@ -94,10 +91,10 @@ def set_expect_requests(self, expect_args={}, respond_args={}): def set_expect_requests_handler(self, expect_args=None, request_handler=None): self.httpserver.expect_request(expect_args).respond_with_handler(request_handler) - def mock_dns(self, mock_data, scan=None): + async def mock_dns(self, mock_data, scan=None): if scan is None: scan = self.scan - scan.helpers.dns.resolver = MockResolver(mock_data) + await scan.helpers.dns._mock_dns(mock_data) @property def module(self): diff --git a/bbot/test/test_step_2/module_tests/test_module_affiliates.py b/bbot/test/test_step_2/module_tests/test_module_affiliates.py index 4afd4cd29..b138dce65 100644 --- a/bbot/test/test_step_2/module_tests/test_module_affiliates.py +++ b/bbot/test/test_step_2/module_tests/test_module_affiliates.py @@ -6,7 +6,7 @@ class TestAffiliates(ModuleTestBase): config_overrides = {"dns_resolution": True} async def setup_before_prep(self, module_test): - module_test.mock_dns( + await module_test.mock_dns( { "8.8.8.8.in-addr.arpa": {"PTR": ["dns.google"]}, "dns.google": {"A": ["8.8.8.8"], "NS": ["ns1.zdns.google"]}, diff --git a/bbot/test/test_step_2/module_tests/test_module_aggregate.py b/bbot/test/test_step_2/module_tests/test_module_aggregate.py index 7a41fe022..a41c60701 100644 --- a/bbot/test/test_step_2/module_tests/test_module_aggregate.py +++ b/bbot/test/test_step_2/module_tests/test_module_aggregate.py @@ -5,7 +5,7 @@ class TestAggregate(ModuleTestBase): config_overrides = {"dns_resolution": True, "scope_report_distance": 1} async def setup_before_prep(self, module_test): - module_test.mock_dns({"blacklanternsecurity.com": {"A": ["1.2.3.4"]}}) + await module_test.mock_dns({"blacklanternsecurity.com": {"A": ["1.2.3.4"]}}) def check(self, module_test, events): filename = next(module_test.scan.home.glob("scan-stats-table*.txt")) diff --git a/bbot/test/test_step_2/module_tests/test_module_asset_inventory.py b/bbot/test/test_step_2/module_tests/test_module_asset_inventory.py index 5e8c9b3a1..6b6c78dbf 100644 --- a/bbot/test/test_step_2/module_tests/test_module_asset_inventory.py +++ b/bbot/test/test_step_2/module_tests/test_module_asset_inventory.py @@ -4,11 +4,11 @@ class TestAsset_Inventory(ModuleTestBase): targets = ["127.0.0.1", "bbottest.notreal"] scan_name = "asset_inventory_test" - config_overrides = {"dns_resolution": True, "internal_modules": {"nmap": {"ports": "9999"}}} + config_overrides = {"dns_resolution": True, "modules": {"nmap": {"ports": "9999"}}} modules_overrides = ["asset_inventory", "nmap", "sslcert"] async def setup_before_prep(self, module_test): - module_test.mock_dns( + await module_test.mock_dns( { "1.0.0.127.in-addr.arpa": {"PTR": ["www.bbottest.notreal"]}, "www.bbottest.notreal": {"A": ["127.0.0.1"]}, @@ -32,7 +32,7 @@ def check(self, module_test, events): class TestAsset_InventoryEmitPrevious(TestAsset_Inventory): - config_overrides = {"dns_resolution": True, "output_modules": {"asset_inventory": {"use_previous": True}}} + config_overrides = {"dns_resolution": True, "modules": {"asset_inventory": {"use_previous": True}}} modules_overrides = ["asset_inventory"] def check(self, module_test, events): @@ -54,7 +54,7 @@ def check(self, module_test, events): class TestAsset_InventoryRecheck(TestAsset_Inventory): config_overrides = { "dns_resolution": True, - "output_modules": {"asset_inventory": {"use_previous": True, "recheck": True}}, + "modules": {"asset_inventory": {"use_previous": True, "recheck": True}}, } modules_overrides = ["asset_inventory"] diff --git a/bbot/test/test_step_2/module_tests/test_module_baddns.py b/bbot/test/test_step_2/module_tests/test_module_baddns.py index 57cca7d5f..dd533669f 100644 --- a/bbot/test/test_step_2/module_tests/test_module_baddns.py +++ b/bbot/test/test_step_2/module_tests/test_module_baddns.py @@ -24,7 +24,7 @@ async def setup_after_prep(self, module_test): from bbot.modules import baddns as baddns_module from baddns.lib.whoismanager import WhoisManager - module_test.mock_dns( + await module_test.mock_dns( {"bad.dns": {"CNAME": ["baddns.azurewebsites.net."]}, "_NXDOMAIN": ["baddns.azurewebsites.net"]} ) module_test.monkeypatch.setattr(baddns_module.baddns, "select_modules", self.select_modules) @@ -52,7 +52,7 @@ def set_target(self, target): respond_args = {"response_data": "

Oops! We couldn’t find that page.

", "status": 200} module_test.set_expect_requests(expect_args=expect_args, respond_args=respond_args) - module_test.mock_dns( + await module_test.mock_dns( {"bad.dns": {"CNAME": ["baddns.bigcartel.com."]}, "baddns.bigcartel.com": {"A": ["127.0.0.1"]}} ) module_test.monkeypatch.setattr(baddns_module.baddns, "select_modules", self.select_modules) diff --git a/bbot/test/test_step_2/module_tests/test_module_baddns_zone.py b/bbot/test/test_step_2/module_tests/test_module_baddns_zone.py index 349db8ed8..b3810e75a 100644 --- a/bbot/test/test_step_2/module_tests/test_module_baddns_zone.py +++ b/bbot/test/test_step_2/module_tests/test_module_baddns_zone.py @@ -31,7 +31,7 @@ def from_xfr(*args, **kwargs): zone = dns.zone.from_text(zone_text, origin="bad.dns.") return zone - module_test.mock_dns({"bad.dns": {"NS": ["ns1.bad.dns."]}, "ns1.bad.dns": {"A": ["127.0.0.1"]}}) + await module_test.mock_dns({"bad.dns": {"NS": ["ns1.bad.dns."]}, "ns1.bad.dns": {"A": ["127.0.0.1"]}}) module_test.monkeypatch.setattr("dns.zone.from_xfr", from_xfr) module_test.monkeypatch.setattr(WhoisManager, "dispatchWHOIS", self.dispatchWHOIS) @@ -46,7 +46,7 @@ class TestBaddns_zone_nsec(BaseTestBaddns_zone): async def setup_after_prep(self, module_test): from baddns.lib.whoismanager import WhoisManager - module_test.mock_dns( + await module_test.mock_dns( { "bad.dns": {"NSEC": ["asdf.bad.dns"]}, "asdf.bad.dns": {"NSEC": ["zzzz.bad.dns"]}, diff --git a/bbot/test/test_step_2/module_tests/test_module_bucket_amazon.py b/bbot/test/test_step_2/module_tests/test_module_bucket_amazon.py index 6d58dd36f..37ce77c5a 100644 --- a/bbot/test/test_step_2/module_tests/test_module_bucket_amazon.py +++ b/bbot/test/test_step_2/module_tests/test_module_bucket_amazon.py @@ -34,7 +34,7 @@ def module_name(self): @property def modules_overrides(self): - return ["excavate", "speculate", "httpx", self.module_name] + return ["excavate", "speculate", "httpx", self.module_name, "cloud"] def url_setup(self): self.url_1 = f"https://{self.random_bucket_1}/" @@ -71,7 +71,7 @@ async def setup_after_prep(self, module_test): def check(self, module_test, events): # make sure buckets were excavated assert any( - e.type == "STORAGE_BUCKET" and str(e.module) == f"{self.provider}_cloud" for e in events + e.type == "STORAGE_BUCKET" and str(e.module) == f"cloud_{self.provider}" for e in events ), f'bucket not found for module "{self.module_name}"' # make sure open buckets were found if module_test.module.supports_open_check: diff --git a/bbot/test/test_step_2/module_tests/test_module_cloud.py b/bbot/test/test_step_2/module_tests/test_module_cloud.py new file mode 100644 index 000000000..1ee8df5e7 --- /dev/null +++ b/bbot/test/test_step_2/module_tests/test_module_cloud.py @@ -0,0 +1,95 @@ +from .base import ModuleTestBase + +from bbot.scanner import Scanner + + +class TestCloud(ModuleTestBase): + targets = ["http://127.0.0.1:8888", "asdf2.storage.googleapis.com"] + modules_overrides = ["httpx", "excavate", "cloud"] + + async def setup_after_prep(self, module_test): + + module_test.set_expect_requests({"uri": "/"}, {"response_data": "
"}) + + scan = Scanner(config={"cloud": True}) + await scan._prep() + module = scan.modules["cloud"] + providers = scan.helpers.cloud.providers + # make sure we have all the providers + provider_names = ( + "amazon", + "google", + "azure", + "digitalocean", + "oracle", + "akamai", + "cloudflare", + "github", + "zoho", + "fastly", + ) + for provider_name in provider_names: + assert provider_name in providers + + amazon_ranges = list(providers["amazon"].ranges) + assert amazon_ranges + amazon_range = next(iter(amazon_ranges)) + amazon_address = amazon_range.broadcast_address + + ip_event = scan.make_event(amazon_address, source=scan.root_event) + aws_event1 = scan.make_event("amazonaws.com", source=scan.root_event) + aws_event2 = scan.make_event("asdf.amazonaws.com", source=scan.root_event) + aws_event3 = scan.make_event("asdfamazonaws.com", source=scan.root_event) + aws_event4 = scan.make_event("test.asdf.aws", source=scan.root_event) + + other_event1 = scan.make_event("cname.evilcorp.com", source=scan.root_event) + other_event2 = scan.make_event("cname2.evilcorp.com", source=scan.root_event) + other_event3 = scan.make_event("cname3.evilcorp.com", source=scan.root_event) + other_event2._resolved_hosts = {amazon_address} + other_event3._resolved_hosts = {"asdf.amazonaws.com"} + + for event in (ip_event, aws_event1, aws_event2, aws_event4, other_event2, other_event3): + await module.handle_event(event, {}) + assert "cloud-amazon" in event.tags, f"{event} was not properly cloud-tagged" + + for event in (aws_event3, other_event1): + await module.handle_event(event, {}) + assert "cloud-amazon" not in event.tags, f"{event} was improperly cloud-tagged" + assert not any( + t for t in event.tags if t.startswith("cloud-") or t.startswith("cdn-") + ), f"{event} was improperly cloud-tagged" + + google_event1 = scan.make_event("asdf.googleapis.com", source=scan.root_event) + google_event2 = scan.make_event("asdf.google", source=scan.root_event) + google_event3 = scan.make_event("asdf.evilcorp.com", source=scan.root_event) + google_event3._resolved_hosts = {"asdf.storage.googleapis.com"} + + for event in (google_event1, google_event2, google_event3): + await module.handle_event(event, {}) + assert "cloud-google" in event.tags, f"{event} was not properly cloud-tagged" + assert "cloud-storage-bucket" in google_event3.tags + + def check(self, module_test, events): + for e in events: + self.log.debug(e) + assert 2 == len([e for e in events if e.type == "STORAGE_BUCKET"]) + assert 1 == len( + [ + e + for e in events + if e.type == "STORAGE_BUCKET" + and e.data["name"] == "asdf" + and "cloud-amazon" in e.tags + and "cloud-storage-bucket" in e.tags + ] + ) + assert 1 == len( + [ + e + for e in events + if e.type == "STORAGE_BUCKET" + and e.data["name"] == "asdf2" + and "cloud-google" in e.tags + and "cloud-storage-bucket" in e.tags + ] + ) diff --git a/bbot/test/test_step_2/module_tests/test_module_csv.py b/bbot/test/test_step_2/module_tests/test_module_csv.py index fc180d481..0d6e326a9 100644 --- a/bbot/test/test_step_2/module_tests/test_module_csv.py +++ b/bbot/test/test_step_2/module_tests/test_module_csv.py @@ -2,6 +2,9 @@ class TestCSV(ModuleTestBase): + async def setup_after_prep(self, module_test): + await module_test.mock_dns({}) + def check(self, module_test, events): csv_file = module_test.scan.home / "output.csv" with open(csv_file) as f: diff --git a/bbot/test/test_step_2/module_tests/test_module_dehashed.py b/bbot/test/test_step_2/module_tests/test_module_dehashed.py index 34c73de82..73260f327 100644 --- a/bbot/test/test_step_2/module_tests/test_module_dehashed.py +++ b/bbot/test/test_step_2/module_tests/test_module_dehashed.py @@ -48,7 +48,7 @@ async def setup_before_prep(self, module_test): url=f"https://api.dehashed.com/search?query=domain:blacklanternsecurity.com&size=10000&page=1", json=dehashed_domain_response, ) - module_test.mock_dns( + await module_test.mock_dns( { "bob.com": {"A": ["127.0.0.1"]}, "blacklanternsecurity.com": {"A": ["127.0.0.1"]}, diff --git a/bbot/test/test_step_2/module_tests/test_module_discord.py b/bbot/test/test_step_2/module_tests/test_module_discord.py index 2a4143852..c0a210720 100644 --- a/bbot/test/test_step_2/module_tests/test_module_discord.py +++ b/bbot/test/test_step_2/module_tests/test_module_discord.py @@ -8,7 +8,7 @@ class TestDiscord(ModuleTestBase): modules_overrides = ["discord", "excavate", "badsecrets", "httpx"] webhook_url = "https://discord.com/api/webhooks/1234/deadbeef-P-uF-asdf" - config_overrides = {"output_modules": {"discord": {"webhook_url": webhook_url}}} + config_overrides = {"modules": {"discord": {"webhook_url": webhook_url}}} def custom_setup(self, module_test): respond_args = { diff --git a/bbot/test/test_step_2/module_tests/test_module_dns.py b/bbot/test/test_step_2/module_tests/test_module_dns.py new file mode 100644 index 000000000..d74b62351 --- /dev/null +++ b/bbot/test/test_step_2/module_tests/test_module_dns.py @@ -0,0 +1,61 @@ +from .base import ModuleTestBase + + +class TestDNS(ModuleTestBase): + modules_overrides = ["dns"] + config_overrides = {"dns_resolution": True, "scope_report_distance": 1} + + async def setup_after_prep(self, module_test): + await module_test.mock_dns( + { + "blacklanternsecurity.com": { + "A": ["192.168.0.7"], + "AAAA": ["::1"], + "CNAME": ["www.blacklanternsecurity.com"], + }, + "www.blacklanternsecurity.com": {"A": ["192.168.0.8"]}, + } + ) + + def check(self, module_test, events): + assert 1 == len( + [ + e + for e in events + if e.type == "DNS_NAME" + and e.data == "blacklanternsecurity.com" + and "a-record" in e.tags + and "aaaa-record" in e.tags + and "cname-record" in e.tags + and "private-ip" in e.tags + and e.scope_distance == 0 + and "192.168.0.7" in e.resolved_hosts + and "::1" in e.resolved_hosts + and "www.blacklanternsecurity.com" in e.resolved_hosts + and e.dns_children + == {"A": {"192.168.0.7"}, "AAAA": {"::1"}, "CNAME": {"www.blacklanternsecurity.com"}} + ] + ) + assert 1 == len( + [ + e + for e in events + if e.type == "DNS_NAME" + and e.data == "www.blacklanternsecurity.com" + and "a-record" in e.tags + and "private-ip" in e.tags + and e.scope_distance == 0 + and "192.168.0.8" in e.resolved_hosts + and e.dns_children == {"A": {"192.168.0.8"}} + ] + ) + assert 1 == len( + [ + e + for e in events + if e.type == "IP_ADDRESS" + and e.data == "192.168.0.7" + and "private-ip" in e.tags + and e.scope_distance == 1 + ] + ) diff --git a/bbot/test/test_step_2/module_tests/test_module_dnscommonsrv.py b/bbot/test/test_step_2/module_tests/test_module_dnscommonsrv.py index aaf26664c..5850fbd49 100644 --- a/bbot/test/test_step_2/module_tests/test_module_dnscommonsrv.py +++ b/bbot/test/test_step_2/module_tests/test_module_dnscommonsrv.py @@ -6,7 +6,7 @@ class TestDNSCommonSRV(ModuleTestBase): config_overrides = {"dns_resolution": True} async def setup_after_prep(self, module_test): - module_test.mock_dns( + await module_test.mock_dns( { "_ldap._tcp.gc._msdcs.blacklanternsecurity.notreal": { "SRV": ["0 100 3268 asdf.blacklanternsecurity.notreal"] diff --git a/bbot/test/test_step_2/module_tests/test_module_ffuf_shortnames.py b/bbot/test/test_step_2/module_tests/test_module_ffuf_shortnames.py index cbbec11ea..1f624a410 100644 --- a/bbot/test/test_step_2/module_tests/test_module_ffuf_shortnames.py +++ b/bbot/test/test_step_2/module_tests/test_module_ffuf_shortnames.py @@ -143,7 +143,7 @@ async def setup_after_prep(self, module_test): tags=["shortname-file"], ) ) - module_test.scan.target._events["http://127.0.0.1:8888"] = seed_events + module_test.scan.target._events = set(seed_events) expect_args = {"method": "GET", "uri": "/administrator.aspx"} respond_args = {"response_data": "alive"} diff --git a/bbot/test/test_step_2/module_tests/test_module_github_org.py b/bbot/test/test_step_2/module_tests/test_module_github_org.py index 4a01544f6..fd0e3ea5b 100644 --- a/bbot/test/test_step_2/module_tests/test_module_github_org.py +++ b/bbot/test/test_step_2/module_tests/test_module_github_org.py @@ -280,7 +280,7 @@ async def setup_before_prep(self, module_test): ) def check(self, module_test, events): - assert len(events) == 6 + assert len(events) == 7 assert 1 == len( [ e @@ -298,10 +298,22 @@ def check(self, module_test, events): if e.type == "SOCIAL" and e.data["platform"] == "github" and e.data["profile_name"] == "blacklanternsecurity" + and str(e.module) == "github_org" and "github-org" in e.tags and e.scope_distance == 1 ] ), "Failed to find blacklanternsecurity github" + assert 1 == len( + [ + e + for e in events + if e.type == "SOCIAL" + and e.data["platform"] == "github" + and e.data["profile_name"] == "blacklanternsecurity" + and str(e.module) == "social" + and e.scope_distance == 1 + ] + ), "Failed to find blacklanternsecurity github (social module)" assert 1 == len( [ e @@ -309,6 +321,7 @@ def check(self, module_test, events): if e.type == "SOCIAL" and e.data["platform"] == "github" and e.data["profile_name"] == "TheTechromancer" + and str(e.module) == "github_org" and "github-org-member" in e.tags and e.scope_distance == 2 ] @@ -329,7 +342,7 @@ class TestGithub_Org_No_Members(TestGithub_Org): config_overrides = {"modules": {"github_org": {"include_members": False}}} def check(self, module_test, events): - assert len(events) == 5 + assert len(events) == 6 assert 1 == len( [ e @@ -337,10 +350,22 @@ def check(self, module_test, events): if e.type == "SOCIAL" and e.data["platform"] == "github" and e.data["profile_name"] == "blacklanternsecurity" + and str(e.module) == "github_org" and "github-org" in e.tags and e.scope_distance == 1 ] ), "Failed to find blacklanternsecurity github" + assert 1 == len( + [ + e + for e in events + if e.type == "SOCIAL" + and e.data["platform"] == "github" + and e.data["profile_name"] == "blacklanternsecurity" + and str(e.module) == "social" + and e.scope_distance == 1 + ] + ), "Failed to find blacklanternsecurity github (social module)" assert 0 == len( [ e @@ -356,7 +381,7 @@ class TestGithub_Org_MemberRepos(TestGithub_Org): config_overrides = {"modules": {"github_org": {"include_member_repos": True}}} def check(self, module_test, events): - assert len(events) == 7 + assert len(events) == 8 assert 1 == len( [ e @@ -366,4 +391,4 @@ def check(self, module_test, events): and e.data["url"] == "https://github.com/TheTechromancer/websitedemo" and e.scope_distance == 2 ] - ), "Found to find TheTechromancer github repo" + ), "Failed to find TheTechromancer github repo" diff --git a/bbot/test/test_step_2/module_tests/test_module_http.py b/bbot/test/test_step_2/module_tests/test_module_http.py index d0afcefb2..43b7189ad 100644 --- a/bbot/test/test_step_2/module_tests/test_module_http.py +++ b/bbot/test/test_step_2/module_tests/test_module_http.py @@ -7,7 +7,7 @@ class TestHTTP(ModuleTestBase): downstream_url = "https://blacklanternsecurity.fakedomain:1234/events" config_overrides = { - "output_modules": { + "modules": { "http": { "url": downstream_url, "method": "PUT", @@ -56,8 +56,8 @@ def check(self, module_test, events): class TestHTTPSIEMFriendly(TestHTTP): modules_overrides = ["http"] - config_overrides = {"output_modules": {"http": dict(TestHTTP.config_overrides["output_modules"]["http"])}} - config_overrides["output_modules"]["http"]["siem_friendly"] = True + config_overrides = {"modules": {"http": dict(TestHTTP.config_overrides["modules"]["http"])}} + config_overrides["modules"]["http"]["siem_friendly"] = True def verify_data(self, j): return j["data"] == {"DNS_NAME": "blacklanternsecurity.com"} and j["type"] == "DNS_NAME" diff --git a/bbot/test/test_step_2/module_tests/test_module_httpx.py b/bbot/test/test_step_2/module_tests/test_module_httpx.py index ebd9bbdb1..f67525aeb 100644 --- a/bbot/test/test_step_2/module_tests/test_module_httpx.py +++ b/bbot/test/test_step_2/module_tests/test_module_httpx.py @@ -56,7 +56,7 @@ def check(self, module_test, events): class TestHTTPX_404(ModuleTestBase): targets = ["https://127.0.0.1:9999"] modules_overrides = ["httpx", "speculate", "excavate"] - config_overrides = {"internal_modules": {"speculate": {"ports": "8888,9999"}}} + config_overrides = {"modules": {"speculate": {"ports": "8888,9999"}}} async def setup_after_prep(self, module_test): module_test.httpserver.expect_request("/").respond_with_data( diff --git a/bbot/test/test_step_2/module_tests/test_module_internetdb.py b/bbot/test/test_step_2/module_tests/test_module_internetdb.py index d24cdebc0..54ec6a163 100644 --- a/bbot/test/test_step_2/module_tests/test_module_internetdb.py +++ b/bbot/test/test_step_2/module_tests/test_module_internetdb.py @@ -5,7 +5,7 @@ class TestInternetDB(ModuleTestBase): config_overrides = {"dns_resolution": True} async def setup_before_prep(self, module_test): - module_test.mock_dns( + await module_test.mock_dns( { "blacklanternsecurity.com": {"A": ["1.2.3.4"]}, "autodiscover.blacklanternsecurity.com": {"A": ["2.3.4.5"]}, diff --git a/bbot/test/test_step_2/module_tests/test_module_ipneighbor.py b/bbot/test/test_step_2/module_tests/test_module_ipneighbor.py index b8ba8331a..edb7dbff6 100644 --- a/bbot/test/test_step_2/module_tests/test_module_ipneighbor.py +++ b/bbot/test/test_step_2/module_tests/test_module_ipneighbor.py @@ -6,7 +6,7 @@ class TestIPNeighbor(ModuleTestBase): config_overrides = {"scope_report_distance": 1, "dns_resolution": True, "scope_dns_search_distance": 2} async def setup_after_prep(self, module_test): - module_test.mock_dns( + await module_test.mock_dns( {"3.0.0.127.in-addr.arpa": {"PTR": ["asdf.www.bls.notreal"]}, "asdf.www.bls.notreal": {"A": ["127.0.0.3"]}} ) diff --git a/bbot/test/test_step_2/module_tests/test_module_json.py b/bbot/test/test_step_2/module_tests/test_module_json.py index 1e67db085..53affcd30 100644 --- a/bbot/test/test_step_2/module_tests/test_module_json.py +++ b/bbot/test/test_step_2/module_tests/test_module_json.py @@ -6,7 +6,7 @@ class TestJSON(ModuleTestBase): def check(self, module_test, events): - txt_file = module_test.scan.home / "output.ndjson" + txt_file = module_test.scan.home / "output.json" lines = list(module_test.scan.helpers.read_file(txt_file)) assert lines e = event_from_json(json.loads(lines[0])) @@ -16,10 +16,10 @@ def check(self, module_test, events): class TestJSONSIEMFriendly(ModuleTestBase): modules_overrides = ["json"] - config_overrides = {"output_modules": {"json": {"siem_friendly": True}}} + config_overrides = {"modules": {"json": {"siem_friendly": True}}} def check(self, module_test, events): - txt_file = module_test.scan.home / "output.ndjson" + txt_file = module_test.scan.home / "output.json" lines = list(module_test.scan.helpers.read_file(txt_file)) passed = False for line in lines: diff --git a/bbot/test/test_step_2/module_tests/test_module_nmap.py b/bbot/test/test_step_2/module_tests/test_module_nmap.py index 092f84a47..32b5495e5 100644 --- a/bbot/test/test_step_2/module_tests/test_module_nmap.py +++ b/bbot/test/test_step_2/module_tests/test_module_nmap.py @@ -29,8 +29,7 @@ def check(self, module_test, events): class TestNmapAssetInventory(ModuleTestBase): targets = ["127.0.0.1/31"] config_overrides = { - "modules": {"nmap": {"ports": "8888,8889"}}, - "output_modules": {"asset_inventory": {"use_previous": True}}, + "modules": {"nmap": {"ports": "8888,8889"}, "asset_inventory": {"use_previous": True}}, } modules_overrides = ["nmap", "asset_inventory"] module_name = "nmap" @@ -39,9 +38,14 @@ class TestNmapAssetInventory(ModuleTestBase): async def setup_after_prep(self, module_test): from bbot.scanner import Scanner - first_scan_config = module_test.scan.config.copy() - first_scan_config["output_modules"]["asset_inventory"]["use_previous"] = False - first_scan = Scanner("127.0.0.1", name=self.scan_name, modules=["asset_inventory"], config=first_scan_config) + first_scan = Scanner( + "127.0.0.1", + scan_name=self.scan_name, + output_modules=["asset_inventory"], + config={ + "modules": {"nmap": {"ports": "8888,8889"}, "asset_inventory": {"use_previous": False}}, + }, + ) await first_scan.async_start_without_generator() asset_inventory_output_file = first_scan.home / "asset-inventory.csv" diff --git a/bbot/test/test_step_2/module_tests/test_module_postman.py b/bbot/test/test_step_2/module_tests/test_module_postman.py index 21f464054..8e9c0f3bf 100644 --- a/bbot/test/test_step_2/module_tests/test_module_postman.py +++ b/bbot/test/test_step_2/module_tests/test_module_postman.py @@ -235,7 +235,7 @@ async def new_emit_event(event_data, event_type, **kwargs): await old_emit_event(event_data, event_type, **kwargs) module_test.monkeypatch.setattr(module_test.module, "emit_event", new_emit_event) - module_test.mock_dns({"asdf.blacklanternsecurity.com": {"A": ["127.0.0.1"]}}) + await module_test.mock_dns({"asdf.blacklanternsecurity.com": {"A": ["127.0.0.1"]}}) request_args = dict(uri="/_api/request/28129865-987c8ac8-bfa9-4bab-ade9-88ccf0597862") respond_args = dict(response_data="https://asdf.blacklanternsecurity.com") diff --git a/bbot/test/test_step_2/module_tests/test_module_slack.py b/bbot/test/test_step_2/module_tests/test_module_slack.py index b486d7df2..2efea1478 100644 --- a/bbot/test/test_step_2/module_tests/test_module_slack.py +++ b/bbot/test/test_step_2/module_tests/test_module_slack.py @@ -6,4 +6,4 @@ class TestSlack(DiscordBase): modules_overrides = ["slack", "excavate", "badsecrets", "httpx"] webhook_url = "https://hooks.slack.com/services/deadbeef/deadbeef/deadbeef" - config_overrides = {"output_modules": {"slack": {"webhook_url": webhook_url}}} + config_overrides = {"modules": {"slack": {"webhook_url": webhook_url}}} diff --git a/bbot/test/test_step_2/module_tests/test_module_speculate.py b/bbot/test/test_step_2/module_tests/test_module_speculate.py index 2dcafaddc..2f7d6b7f3 100644 --- a/bbot/test/test_step_2/module_tests/test_module_speculate.py +++ b/bbot/test/test_step_2/module_tests/test_module_speculate.py @@ -28,7 +28,7 @@ class TestSpeculate_OpenPorts(ModuleTestBase): config_overrides = {"speculate": True} async def setup_before_prep(self, module_test): - module_test.mock_dns( + await module_test.mock_dns( { "evilcorp.com": {"A": ["127.0.254.1"]}, "asdf.evilcorp.com": {"A": ["127.0.254.2"]}, diff --git a/bbot/test/test_step_2/module_tests/test_module_splunk.py b/bbot/test/test_step_2/module_tests/test_module_splunk.py index 67d67a4ef..d55ed17c2 100644 --- a/bbot/test/test_step_2/module_tests/test_module_splunk.py +++ b/bbot/test/test_step_2/module_tests/test_module_splunk.py @@ -7,7 +7,7 @@ class TestSplunk(ModuleTestBase): downstream_url = "https://splunk.blacklanternsecurity.fakedomain:1234/services/collector" config_overrides = { - "output_modules": { + "modules": { "splunk": { "url": downstream_url, "hectoken": "HECTOKEN", diff --git a/bbot/test/test_step_2/module_tests/test_module_subdomains.py b/bbot/test/test_step_2/module_tests/test_module_subdomains.py index 9aa9f7b5e..65b9a8a03 100644 --- a/bbot/test/test_step_2/module_tests/test_module_subdomains.py +++ b/bbot/test/test_step_2/module_tests/test_module_subdomains.py @@ -17,7 +17,7 @@ def check(self, module_test, events): class TestSubdomainsUnresolved(TestSubdomains): - config_overrides = {"output_modules": {"subdomains": {"include_unresolved": True}}} + config_overrides = {"modules": {"subdomains": {"include_unresolved": True}}} def check(self, module_test, events): sub_file = module_test.scan.home / "subdomains.txt" diff --git a/bbot/test/test_step_2/module_tests/test_module_teams.py b/bbot/test/test_step_2/module_tests/test_module_teams.py index f544f5cb9..89344b680 100644 --- a/bbot/test/test_step_2/module_tests/test_module_teams.py +++ b/bbot/test/test_step_2/module_tests/test_module_teams.py @@ -8,7 +8,7 @@ class TestTeams(DiscordBase): modules_overrides = ["teams", "excavate", "badsecrets", "httpx"] webhook_url = "https://evilcorp.webhook.office.com/webhookb2/deadbeef@deadbeef/IncomingWebhook/deadbeef/deadbeef" - config_overrides = {"output_modules": {"teams": {"webhook_url": webhook_url}}} + config_overrides = {"modules": {"teams": {"webhook_url": webhook_url}}} async def setup_after_prep(self, module_test): self.custom_setup(module_test) diff --git a/bbot/test/test_step_2/module_tests/test_module_web_report.py b/bbot/test/test_step_2/module_tests/test_module_web_report.py index a37c178e2..c34eef00f 100644 --- a/bbot/test/test_step_2/module_tests/test_module_web_report.py +++ b/bbot/test/test_step_2/module_tests/test_module_web_report.py @@ -13,8 +13,6 @@ async def setup_before_prep(self, module_test): module_test.set_expect_requests(respond_args=respond_args) def check(self, module_test, events): - for e in events: - module_test.log.critical(e) report_file = module_test.scan.home / "web_report.html" with open(report_file) as f: report_content = f.read() diff --git a/bbot/test/test_step_2/module_tests/test_module_websocket.py b/bbot/test/test_step_2/module_tests/test_module_websocket.py index d1620702c..fcf5c2eee 100644 --- a/bbot/test/test_step_2/module_tests/test_module_websocket.py +++ b/bbot/test/test_step_2/module_tests/test_module_websocket.py @@ -23,7 +23,7 @@ async def server_coroutine(): class TestWebsocket(ModuleTestBase): - config_overrides = {"output_modules": {"websocket": {"url": "ws://127.0.0.1:8765/testing"}}} + config_overrides = {"modules": {"websocket": {"url": "ws://127.0.0.1:8765/testing"}}} async def setup_before_prep(self, module_test): self.server_task = asyncio.create_task(server_coroutine()) diff --git a/docs/contribution.md b/docs/contribution.md index 58b1b45e8..b291cea68 100644 --- a/docs/contribution.md +++ b/docs/contribution.md @@ -2,214 +2,8 @@ We welcome contributions! If you have an idea for a new module, or are a Python developer who wants to get involved, please fork us or come talk to us on [Discord](https://discord.com/invite/PZqkgxu5SA). -## Setting Up a Dev Environment +To get started devving, see the following links: -### Installation (Poetry) - -[Poetry](https://python-poetry.org/) is the recommended method of installation if you want to dev on BBOT. To set up a dev environment with Poetry, you can follow these steps: - -- Fork [BBOT](https://github.com/blacklanternsecurity/bbot) on GitHub -- Clone your fork and set up a development environment with Poetry: - -```bash -# clone your forked repo and cd into it -git clone git@github.com//bbot.git -cd bbot - -# install poetry -curl -sSL https://install.python-poetry.org | python3 - - -# install pip dependencies -poetry install -# install pre-commit hooks, etc. -poetry run pre-commit install - -# enter virtual environment -poetry shell - -bbot --help -``` - -- Now, any changes you make in the code will be reflected in the `bbot` command. -- After making your changes, run the tests locally to ensure they pass. - -```bash -# auto-format code indentation, etc. -black . - -# run tests -./bbot/test/run_tests.sh -``` - -- Finally, commit and push your changes, and create a pull request to the `dev` branch of the main BBOT repo. - - -## Creating a Module - -Writing a module is easy and requires only a basic understanding of Python. It consists of a few steps: - -1. Create a new `.py` file in `bbot/modules` -1. At the top of the file, import `BaseModule` -1. Declare a class that inherits from `BaseModule` - - the class must have the same name as your file (case-insensitive) -1. Define in `watched_events` what type of data your module will consume -1. Define in `produced_events` what type of data your module will produce -1. Define (via `flags`) whether your module is `active` or `passive`, and whether it's `safe` or `aggressive` -1. **Put your main logic in `.handle_event()`** - -Here is an example of a simple module that performs whois lookups: - -```python title="bbot/modules/whois.py" -from bbot.modules.base import BaseModule - -class whois(BaseModule): - watched_events = ["DNS_NAME"] # watch for DNS_NAME events - produced_events = ["WHOIS"] # we produce WHOIS events - flags = ["passive", "safe"] - meta = {"description": "Query WhoisXMLAPI for WHOIS data"} - options = {"api_key": ""} # module config options - options_desc = {"api_key": "WhoisXMLAPI Key"} - per_domain_only = True # only run once per domain - - base_url = "https://www.whoisxmlapi.com/whoisserver/WhoisService" - - # one-time setup - runs at the beginning of the scan - async def setup(self): - self.api_key = self.config.get("api_key") - if not self.api_key: - # soft-fail if no API key is set - return None, "Must set API key" - - async def handle_event(self, event): - self.hugesuccess(f"Got {event} (event.data: {event.data})") - _, domain = self.helpers.split_domain(event.data) - url = f"{self.base_url}?apiKey={self.api_key}&domainName={domain}&outputFormat=JSON" - self.hugeinfo(f"Visiting {url}") - response = await self.helpers.request(url) - if response is not None: - await self.emit_event(response.json(), "WHOIS", source=event) -``` - -After saving the module, you can run it with `-m`: - -```bash -# run a scan enabling the module in bbot/modules/mymodule.py -bbot -t evilcorp.com -m whois -``` - -### `handle_event()` and `emit_event()` - -The `handle_event()` method is the most important part of the module. By overriding this method, you control what the module does. During a scan, when an [event](./scanning/events.md) from your `watched_events` is encountered (a `DNS_NAME` in this example), `handle_event()` is automatically called with that event as its argument. - -The `emit_event()` method is how modules return data. When you call `emit_event()`, it creates an [event](./scanning/events.md) and outputs it, sending it any modules that are interested in that data type. - -### `setup()` - -A module's `setup()` method is used for performing one-time setup at the start of the scan, like downloading a wordlist or checking to make sure an API key is valid. It needs to return either: - -1. `True` - module setup succeeded -2. `None` - module setup soft-failed (scan will continue but module will be disabled) -3. `False` - module setup hard-failed (scan will abort) - -Optionally, it can also return a reason. Here are some examples: - -```python -async def setup(self): - if not self.config.get("api_key"): - # soft-fail - return None, "No API key specified" - -async def setup(self): - try: - wordlist = self.helpers.wordlist("https://raw.githubusercontent.com/user/wordlist.txt") - except WordlistError as e: - # hard-fail - return False, f"Error downloading wordlist: {e}" - -async def setup(self): - self.timeout = self.config.get("timeout", 5) - # success - return True -``` - -### Module Config Options - -Each module can have its own set of config options. These live in the `options` and `options_desc` attributes on your class. Both are dictionaries; `options` is for defaults and `options_desc` is for descriptions. Here is a typical example: - -```python title="bbot/modules/nmap.py" -class nmap(BaseModule): - # ... - options = { - "top_ports": 100, - "ports": "", - "timing": "T4", - "skip_host_discovery": True, - } - options_desc = { - "top_ports": "Top ports to scan (default 100) (to override, specify 'ports')", - "ports": "Ports to scan", - "timing": "-T<0-5>: Set timing template (higher is faster)", - "skip_host_discovery": "skip host discovery (-Pn)", - } - - async def setup(self): - self.ports = self.config.get("ports", "") - self.timing = self.config.get("timing", "T4") - self.top_ports = self.config.get("top_ports", 100) - self.skip_host_discovery = self.config.get("skip_host_discovery", True) -``` - -Once you've defined these variables, you can pass the options via `-c`: - -```bash -bbot -m nmap -c modules.nmap.top_ports=250 -``` - -... or via the config: - -```yaml title="~/.config/bbot/bbot.yml" -modules: - nmap: - top_ports: 250 -``` - -Inside the module, you access them via `self.config`, e.g.: - -```python -self.config.get("top_ports") -``` - -### Module Dependencies - -BBOT automates module dependencies with **Ansible**. If your module relies on a third-party binary, OS package, or python library, you can specify them in the `deps_*` attributes of your module. - -```python -class MyModule(BaseModule): - ... - deps_apt = ["chromium-browser"] - deps_ansible = [ - { - "name": "install dev tools", - "package": {"name": ["gcc", "git", "make"], "state": "present"}, - "become": True, - "ignore_errors": True, - }, - { - "name": "Download massdns source code", - "git": { - "repo": "https://github.com/blechschmidt/massdns.git", - "dest": "#{BBOT_TEMP}/massdns", - "single_branch": True, - "version": "master", - }, - }, - { - "name": "Build massdns", - "command": {"chdir": "#{BBOT_TEMP}/massdns", "cmd": "make", "creates": "#{BBOT_TEMP}/massdns/bin/massdns"}, - }, - { - "name": "Install massdns", - "copy": {"src": "#{BBOT_TEMP}/massdns/bin/massdns", "dest": "#{BBOT_TOOLS}/", "mode": "u+x,g+x,o+x"}, - }, - ] -``` +- [Setting up a Dev Environment](./dev/dev_environment.md) +- [How to Write a BBOT Module](./dev/module_howto.md) +- [Discord Bot Example](./dev/discord_bot.md) diff --git a/docs/dev/core.md b/docs/dev/core.md new file mode 100644 index 000000000..d138681f9 --- /dev/null +++ b/docs/dev/core.md @@ -0,0 +1 @@ +::: bbot.core.core.BBOTCore diff --git a/docs/dev/dev_environment.md b/docs/dev/dev_environment.md new file mode 100644 index 000000000..054656150 --- /dev/null +++ b/docs/dev/dev_environment.md @@ -0,0 +1,40 @@ +## Setting Up a Dev Environment + +### Installation (Poetry) + +[Poetry](https://python-poetry.org/) is the recommended method of installation if you want to dev on BBOT. To set up a dev environment with Poetry, you can follow these steps: + +- Fork [BBOT](https://github.com/blacklanternsecurity/bbot) on GitHub +- Clone your fork and set up a development environment with Poetry: + +```bash +# clone your forked repo and cd into it +git clone git@github.com//bbot.git +cd bbot + +# install poetry +curl -sSL https://install.python-poetry.org | python3 - + +# install pip dependencies +poetry install +# install pre-commit hooks, etc. +poetry run pre-commit install + +# enter virtual environment +poetry shell + +bbot --help +``` + +- Now, any changes you make in the code will be reflected in the `bbot` command. +- After making your changes, run the tests locally to ensure they pass. + +```bash +# auto-format code indentation, etc. +black . + +# run tests +./bbot/test/run_tests.sh +``` + +- Finally, commit and push your changes, and create a pull request to the `dev` branch of the main BBOT repo. diff --git a/docs/dev/discord_bot.md b/docs/dev/discord_bot.md new file mode 100644 index 000000000..ff2aa860a --- /dev/null +++ b/docs/dev/discord_bot.md @@ -0,0 +1,8 @@ + +![bbot-discord](https://github.com/blacklanternsecurity/bbot/assets/20261699/22b268a2-0dfd-4c2a-b7c5-548c0f2cc6f9) + +Below is a simple Discord bot designed to run BBOT scans. + +```python title="examples/discord_bot.py" +--8<-- "examples/discord_bot.py" +``` diff --git a/docs/dev/index.md b/docs/dev/index.md index 093a3aefb..526f03ce9 100644 --- a/docs/dev/index.md +++ b/docs/dev/index.md @@ -1,96 +1,86 @@ # BBOT Developer Reference -BBOT exposes a convenient API that allows you to create, start, and stop scans using Python code. +BBOT exposes a Python API that allows you to create, start, and stop scans. Documented in this section are commonly-used classes and functions within BBOT, along with usage examples. -## Discord Bot Example +## Running a BBOT Scan from Python -![bbot-discord](https://github.com/blacklanternsecurity/bbot/assets/20261699/22b268a2-0dfd-4c2a-b7c5-548c0f2cc6f9) +#### Synchronous +```python +from bbot.scanner import Scanner -Below is a simple Discord bot designed to run BBOT scans. +scan = Scanner("evilcorp.com", presets=["subdomain-enum"]) +for event in scan.start(): + print(event) +``` +#### Asynchronous ```python +from bbot.scanner import Scanner + +async def main(): + scan = Scanner("evilcorp.com", presets=["subdomain-enum"]) + async for event in scan.async_start(): + print(event.json()) + import asyncio -import discord -from discord.ext import commands +asyncio.run(main()) +``` -from bbot.scanner import Scanner -from bbot.modules import module_loader -from bbot.modules.output.discord import Discord - - -# make list of BBOT modules to enable for the scan -bbot_modules = ["excavate", "speculate", "aggregate"] -for module_name, preloaded in module_loader.preloaded().items(): - flags = preloaded["flags"] - if "subdomain-enum" in flags and "passive" in flags and "slow" not in flags: - bbot_modules.append(module_name) - - -class BBOTDiscordBot(commands.Cog): - """ - A simple Discord bot capable of running a BBOT scan. - - To set up: - 1. Go to Discord Developer Portal (https://discord.com/developers) - 2. Create a new application - 3. Create an invite link for the bot, visit the link to invite it to your server - - Your Application --> OAuth2 --> URL Generator - - For Scopes, select "bot"" - - For Bot Permissions, select: - - Read Messages/View Channels - - Send Messages - 4. Turn on "Message Content Intent" - - Your Application --> Bot --> Privileged Gateway Intents --> Message Content Intent - 5. Copy your Discord Bot Token and put it at the top this file - - Your Application --> Bot --> Reset Token - 6. Run this script - - To scan evilcorp.com, you would type: - - /scan evilcorp.com - - Results will be output to the same channel. - """ - def __init__(self): - self.current_scan = None - - @commands.command(name="scan", description="Scan a target with BBOT.") - async def scan(self, ctx, target: str): - if self.current_scan is not None: - self.current_scan.stop() - await ctx.send(f"Starting scan against {target}.") - - # creates scan instance - self.current_scan = Scanner(target, modules=bbot_modules) - discord_module = Discord(self.current_scan) - - seen = set() - num_events = 0 - # start scan and iterate through results - async for event in self.current_scan.async_start(): - if hash(event) in seen: - continue - seen.add(hash(event)) - await ctx.send(discord_module.format_message(event)) - num_events += 1 - - await ctx.send(f"Finished scan against {target}. {num_events:,} results.") - self.current_scan = None - - -if __name__ == "__main__": - intents = discord.Intents.default() - intents.message_content = True - bot = commands.Bot(command_prefix="/", intents=intents) - - @bot.event - async def on_ready(): - print(f"We have logged in as {bot.user}") - await bot.add_cog(BBOTDiscordBot()) - - bot.run("DISCORD_BOT_TOKEN_HERE") +For a full listing of `Scanner` attributes and functions, see the [`Scanner` Code Reference](./scanner.md). + +#### Multiple Targets + +You can specify any number of targets: + +```python +# create a scan against multiple targets +scan = Scanner( + "evilcorp.com", + "evilcorp.org", + "evilcorp.ce", + "4.3.2.1", + "1.2.3.4/24", + presets=["subdomain-enum"] +) + +# this is the same as: +targets = ["evilcorp.com", "evilcorp.org", "evilcorp.ce", "4.3.2.1", "1.2.3.4/24"] +scan = Scanner(*targets, presets=["subdomain-enum"]) +``` + +For more details, including which types of targets are valid, see [Targets](../scanning/index.md#targets) + +#### Other Custom Options + +In many cases, using a [Preset](../scanning/presets.md) like `subdomain-enum` is sufficient. However, the `Scanner` is flexible and accepts many other arguments that can override the default functionality. You can specify [`flags`](../index.md#flags), [`modules`](../index.md#modules), [`output_modules`](../output.md), a [`whitelist` or `blacklist`](../scanning/index.md#whitelists-and-blacklists), and custom [`config` options](../scanning/configuration.md): + +```python +# create a scan against multiple targets +scan = Scanner( + # targets + "evilcorp.com", + "4.3.2.1", + # enable these presets + presets=["subdomain-enum"], + # whitelist these hosts + whitelist=["evilcorp.com", "evilcorp.org"], + # blacklist these hosts + blacklist=["prod.evilcorp.com"], + # also enable these individual modules + modules=["nuclei", "ipstack"], + # exclude modules with these flags + exclude_flags=["slow"], + # custom config options + config={ + "modules": { + "nuclei": { + "tags": "apache,nginx" + } + } + } +) ``` -[Next Up: Scanner -->](scanner.md){ .md-button .md-button--primary } +For a list of all the possible scan options, see the [`Presets` Code Reference](./presets.md) diff --git a/docs/dev/module_howto.md b/docs/dev/module_howto.md new file mode 100644 index 000000000..94d8ffe60 --- /dev/null +++ b/docs/dev/module_howto.md @@ -0,0 +1,168 @@ + +Writing a module is easy and requires only a basic understanding of Python. It consists of a few steps: + +1. Create a new `.py` file in `bbot/modules` +1. At the top of the file, import `BaseModule` +1. Declare a class that inherits from `BaseModule` + - the class must have the same name as your file (case-insensitive) +1. Define in `watched_events` what type of data your module will consume +1. Define in `produced_events` what type of data your module will produce +1. Define (via `flags`) whether your module is `active` or `passive`, and whether it's `safe` or `aggressive` +1. **Put your main logic in `.handle_event()`** + +Here is an example of a simple module that performs whois lookups: + +```python title="bbot/modules/whois.py" +from bbot.modules.base import BaseModule + +class whois(BaseModule): + watched_events = ["DNS_NAME"] # watch for DNS_NAME events + produced_events = ["WHOIS"] # we produce WHOIS events + flags = ["passive", "safe"] + meta = {"description": "Query WhoisXMLAPI for WHOIS data"} + options = {"api_key": ""} # module config options + options_desc = {"api_key": "WhoisXMLAPI Key"} + per_domain_only = True # only run once per domain + + base_url = "https://www.whoisxmlapi.com/whoisserver/WhoisService" + + # one-time setup - runs at the beginning of the scan + async def setup(self): + self.api_key = self.config.get("api_key") + if not self.api_key: + # soft-fail if no API key is set + return None, "Must set API key" + + async def handle_event(self, event): + self.hugesuccess(f"Got {event} (event.data: {event.data})") + _, domain = self.helpers.split_domain(event.data) + url = f"{self.base_url}?apiKey={self.api_key}&domainName={domain}&outputFormat=JSON" + self.hugeinfo(f"Visiting {url}") + response = await self.helpers.request(url) + if response is not None: + await self.emit_event(response.json(), "WHOIS", source=event) +``` + +After saving the module, you can run it with `-m`: + +```bash +# run a scan enabling the module in bbot/modules/mymodule.py +bbot -t evilcorp.com -m whois +``` + +### `handle_event()` and `emit_event()` + +The `handle_event()` method is the most important part of the module. By overriding this method, you control what the module does. During a scan, when an [event](./scanning/events.md) from your `watched_events` is encountered (a `DNS_NAME` in this example), `handle_event()` is automatically called with that event as its argument. + +The `emit_event()` method is how modules return data. When you call `emit_event()`, it creates an [event](./scanning/events.md) and outputs it, sending it any modules that are interested in that data type. + +### `setup()` + +A module's `setup()` method is used for performing one-time setup at the start of the scan, like downloading a wordlist or checking to make sure an API key is valid. It needs to return either: + +1. `True` - module setup succeeded +2. `None` - module setup soft-failed (scan will continue but module will be disabled) +3. `False` - module setup hard-failed (scan will abort) + +Optionally, it can also return a reason. Here are some examples: + +```python +async def setup(self): + if not self.config.get("api_key"): + # soft-fail + return None, "No API key specified" + +async def setup(self): + try: + wordlist = self.helpers.wordlist("https://raw.githubusercontent.com/user/wordlist.txt") + except WordlistError as e: + # hard-fail + return False, f"Error downloading wordlist: {e}" + +async def setup(self): + self.timeout = self.config.get("timeout", 5) + # success + return True +``` + +### Module Config Options + +Each module can have its own set of config options. These live in the `options` and `options_desc` attributes on your class. Both are dictionaries; `options` is for defaults and `options_desc` is for descriptions. Here is a typical example: + +```python title="bbot/modules/nmap.py" +class nmap(BaseModule): + # ... + options = { + "top_ports": 100, + "ports": "", + "timing": "T4", + "skip_host_discovery": True, + } + options_desc = { + "top_ports": "Top ports to scan (default 100) (to override, specify 'ports')", + "ports": "Ports to scan", + "timing": "-T<0-5>: Set timing template (higher is faster)", + "skip_host_discovery": "skip host discovery (-Pn)", + } + + async def setup(self): + self.ports = self.config.get("ports", "") + self.timing = self.config.get("timing", "T4") + self.top_ports = self.config.get("top_ports", 100) + self.skip_host_discovery = self.config.get("skip_host_discovery", True) +``` + +Once you've defined these variables, you can pass the options via `-c`: + +```bash +bbot -m nmap -c modules.nmap.top_ports=250 +``` + +... or via the config: + +```yaml title="~/.config/bbot/bbot.yml" +modules: + nmap: + top_ports: 250 +``` + +Inside the module, you access them via `self.config`, e.g.: + +```python +self.config.get("top_ports") +``` + +### Module Dependencies + +BBOT automates module dependencies with **Ansible**. If your module relies on a third-party binary, OS package, or python library, you can specify them in the `deps_*` attributes of your module. + +```python +class MyModule(BaseModule): + ... + deps_apt = ["chromium-browser"] + deps_ansible = [ + { + "name": "install dev tools", + "package": {"name": ["gcc", "git", "make"], "state": "present"}, + "become": True, + "ignore_errors": True, + }, + { + "name": "Download massdns source code", + "git": { + "repo": "https://github.com/blechschmidt/massdns.git", + "dest": "#{BBOT_TEMP}/massdns", + "single_branch": True, + "version": "master", + }, + }, + { + "name": "Build massdns", + "command": {"chdir": "#{BBOT_TEMP}/massdns", "cmd": "make", "creates": "#{BBOT_TEMP}/massdns/bin/massdns"}, + }, + { + "name": "Install massdns", + "copy": {"src": "#{BBOT_TEMP}/massdns/bin/massdns", "dest": "#{BBOT_TOOLS}/", "mode": "u+x,g+x,o+x"}, + }, + ] +``` diff --git a/docs/dev/presets.md b/docs/dev/presets.md new file mode 100644 index 000000000..7bc7343e0 --- /dev/null +++ b/docs/dev/presets.md @@ -0,0 +1 @@ +::: bbot.scanner.Preset diff --git a/docs/index.md b/docs/index.md index ae590beef..7e7adb510 100644 --- a/docs/index.md +++ b/docs/index.md @@ -10,7 +10,7 @@ _A BBOT scan in real-time - visualization with [VivaGraphJS](https://github.com/ Only **Linux** is supported at this time. **Windows** and **macOS** are *not* supported. If you use one of these platforms, consider using [Docker](#Docker). -BBOT offers multiple methods of installation, including **pipx** and **Docker**. If you plan to dev on BBOT, see [Installation (Poetry)](https://www.blacklanternsecurity.com/bbot/contribution/#installation-poetry). +BBOT offers multiple methods of installation, including **pipx** and **Docker**. If you plan to dev on BBOT, see [Installation (Poetry)](./contribution/#installation-poetry). ### [Python (pip / pipx)](https://pypi.org/project/bbot/) @@ -55,50 +55,69 @@ Below are some examples of common scans. ```bash # Perform a full subdomain enumeration on evilcorp.com -bbot -t evilcorp.com -f subdomain-enum +bbot -t evilcorp.com -p subdomain-enum ``` **Subdomains (passive only):** ```bash # Perform a passive-only subdomain enumeration on evilcorp.com -bbot -t evilcorp.com -f subdomain-enum -rf passive +bbot -t evilcorp.com -p subdomain-enum -rf passive ``` **Subdomains + port scan + web screenshots:** ```bash # Port-scan every subdomain, screenshot every webpage, output to current directory -bbot -t evilcorp.com -f subdomain-enum -m nmap gowitness -n my_scan -o . +bbot -t evilcorp.com -p subdomain-enum -m nmap gowitness -n my_scan -o . ``` **Subdomains + basic web scan:** ```bash # A basic web scan includes wappalyzer, robots.txt, and other non-intrusive web modules -bbot -t evilcorp.com -f subdomain-enum web-basic +bbot -t evilcorp.com -p subdomain-enum web-basic ``` **Web spider:** ```bash # Crawl www.evilcorp.com up to a max depth of 2, automatically extracting emails, secrets, etc. -bbot -t www.evilcorp.com -m httpx robots badsecrets secretsdb -c web_spider_distance=2 web_spider_depth=2 +bbot -t www.evilcorp.com -p spider -c web_spider_distance=2 web_spider_depth=2 ``` **Everything everywhere all at once:** ```bash # Subdomains, emails, cloud buckets, port scan, basic web, web screenshots, nuclei -bbot -t evilcorp.com -f subdomain-enum email-enum cloud-enum web-basic -m nmap gowitness nuclei --allow-deadly +bbot -t evilcorp.com -p kitchen-sink ``` ## API Keys -No API keys are required to run BBOT. However, some modules need them to function. If you have API keys and want to make use of these modules, you can place them either in BBOT's YAML config (`~/.config/bbot/secrets.yml`): +BBOT works just fine without API keys. However, there are certain modules that need them to function. If you have API keys and want to make use of these modules, you can place them either in your preset: -```yaml title="~/.config/bbot/secrets.yml" +```yaml title="my_preset.yml" +description: My custom subdomain enum preset + +include: + - subdomain-enum + - cloud-enum + +config: + modules: + shodan_dns: + api_key: deadbeef + virustotal: + api_key: cafebabe +``` + +...in BBOT's global YAML config (`~/.config/bbot/bbot.yml`): + +Note: this will ensure the API keys are used in all scans, regardless of preset. + +```yaml title="~/.config/bbot/bbot.yml" modules: shodan_dns: api_key: deadbeef @@ -106,7 +125,7 @@ modules: api_key: cafebabe ``` -Or on the command-line: +...or directly on the command-line: ```bash # specify API key with -c diff --git a/docs/modules/list_of_modules.md b/docs/modules/list_of_modules.md index bd3e58997..99da69028 100644 --- a/docs/modules/list_of_modules.md +++ b/docs/modules/list_of_modules.md @@ -14,7 +14,7 @@ | bucket_google | scan | No | Check for Google object storage related to target | active, cloud-enum, safe, web-basic, web-thorough | DNS_NAME, STORAGE_BUCKET | FINDING, STORAGE_BUCKET | | bypass403 | scan | No | Check 403 pages for common bypasses | active, aggressive, web-thorough | URL | FINDING | | dastardly | scan | No | Lightweight web application security scanner | active, aggressive, deadly, slow, web-thorough | HTTP_RESPONSE | FINDING, VULNERABILITY | -| dockerhub | scan | No | Search for docker repositories of discovered orgs/usernames | active, safe | ORG_STUB, SOCIAL | CODE_REPOSITORY, SOCIAL, URL_UNVERIFIED | +| dockerhub | scan | No | Search for docker repositories of discovered orgs/usernames | active, code-enum, safe | ORG_STUB, SOCIAL | CODE_REPOSITORY, SOCIAL, URL_UNVERIFIED | | dotnetnuke | scan | No | Scan for critical DotNetNuke (DNN) vulnerabilities | active, aggressive, web-thorough | HTTP_RESPONSE | TECHNOLOGY, VULNERABILITY | | ffuf | scan | No | A fast web fuzzer written in Go | active, aggressive, deadly | URL | URL_UNVERIFIED | | ffuf_shortnames | scan | No | Use ffuf in combination IIS shortnames | active, aggressive, iis-shortnames, web-thorough | URL_HINT | URL_UNVERIFIED | @@ -22,7 +22,7 @@ | fingerprintx | scan | No | Fingerprint exposed services like RDP, SSH, MySQL, etc. | active, safe, service-enum, slow | OPEN_TCP_PORT | PROTOCOL | | generic_ssrf | scan | No | Check for generic SSRFs | active, aggressive, web-thorough | URL | VULNERABILITY | | git | scan | No | Check for exposed .git repositories | active, safe, web-basic, web-thorough | URL | FINDING | -| gitlab | scan | No | Detect GitLab instances and query them for repositories | active, safe | HTTP_RESPONSE, SOCIAL, TECHNOLOGY | CODE_REPOSITORY, FINDING, SOCIAL, TECHNOLOGY | +| gitlab | scan | No | Detect GitLab instances and query them for repositories | active, code-enum, safe | HTTP_RESPONSE, SOCIAL, TECHNOLOGY | CODE_REPOSITORY, FINDING, SOCIAL, TECHNOLOGY | | gowitness | scan | No | Take screenshots of webpages | active, safe, web-screenshots | SOCIAL, URL | TECHNOLOGY, URL, URL_UNVERIFIED, WEBSCREENSHOT | | host_header | scan | No | Try common HTTP Host header spoofing techniques | active, aggressive, web-thorough | HTTP_RESPONSE | FINDING | | httpx | scan | No | Visit webpages. Many other modules rely on httpx | active, cloud-enum, safe, social-enum, subdomain-enum, web-basic, web-thorough | OPEN_TCP_PORT, URL, URL_UNVERIFIED | HTTP_RESPONSE, URL | @@ -71,8 +71,8 @@ | emailformat | scan | No | Query email-format.com for email addresses | email-enum, passive, safe | DNS_NAME | EMAIL_ADDRESS | | fullhunt | scan | Yes | Query the fullhunt.io API for subdomains | passive, safe, subdomain-enum | DNS_NAME | DNS_NAME | | git_clone | scan | No | Clone code github repositories | passive, safe, slow | CODE_REPOSITORY | FILESYSTEM | -| github_codesearch | scan | Yes | Query Github's API for code containing the target domain name | passive, safe, subdomain-enum | DNS_NAME | CODE_REPOSITORY, URL_UNVERIFIED | -| github_org | scan | No | Query Github's API for organization and member repositories | passive, safe, subdomain-enum | ORG_STUB, SOCIAL | CODE_REPOSITORY | +| github_codesearch | scan | Yes | Query Github's API for code containing the target domain name | code-enum, passive, safe, subdomain-enum | DNS_NAME | CODE_REPOSITORY, URL_UNVERIFIED | +| github_org | scan | No | Query Github's API for organization and member repositories | code-enum, passive, safe, subdomain-enum | ORG_STUB, SOCIAL | CODE_REPOSITORY | | hackertarget | scan | No | Query the hackertarget.com API for subdomains | passive, safe, subdomain-enum | DNS_NAME | DNS_NAME | | hunterio | scan | Yes | Query hunter.io for emails | email-enum, passive, safe, subdomain-enum | DNS_NAME | DNS_NAME, EMAIL_ADDRESS, URL_UNVERIFIED | | internetdb | scan | No | Query Shodan's InternetDB for open ports, hostnames, technologies, and vulnerabilities | passive, portscan, safe, subdomain-enum | DNS_NAME, IP_ADDRESS | DNS_NAME, FINDING, OPEN_TCP_PORT, TECHNOLOGY, VULNERABILITY | @@ -105,7 +105,7 @@ | asset_inventory | output | No | Merge hosts, open ports, technologies, findings, etc. into a single asset inventory CSV | | DNS_NAME, FINDING, HTTP_RESPONSE, IP_ADDRESS, OPEN_TCP_PORT, TECHNOLOGY, URL, VULNERABILITY, WAF | IP_ADDRESS, OPEN_TCP_PORT | | csv | output | No | Output to CSV | | * | | | discord | output | No | Message a Discord channel when certain events are encountered | | * | | -| emails | output | No | Output any email addresses found belonging to the target domain | email-enum | EMAIL_ADDRESS | | +| emails | output | No | Output any email addresses found belonging to the target domain | | EMAIL_ADDRESS | | | http | output | No | Send every event to a custom URL via a web request | | * | | | human | output | No | Output to text | | * | | | json | output | No | Output to Newline-Delimited JSON (NDJSON) | | * | | @@ -113,7 +113,7 @@ | python | output | No | Output via Python API | | * | | | slack | output | No | Message a Slack channel when certain events are encountered | | * | | | splunk | output | No | Send every event to a splunk instance through HTTP Event Collector | | * | | -| subdomains | output | No | Output only resolved, in-scope subdomains | subdomain-enum | DNS_NAME, DNS_NAME_UNRESOLVED | | +| subdomains | output | No | Output only resolved, in-scope subdomains | | DNS_NAME, DNS_NAME_UNRESOLVED | | | teams | output | No | Message a Teams channel when certain events are encountered | | * | | | web_report | output | No | Create a markdown report with web assets | | FINDING, TECHNOLOGY, URL, VHOST, VULNERABILITY | | | websocket | output | No | Output to websockets | | * | | diff --git a/docs/scanning/advanced.md b/docs/scanning/advanced.md index b9bef8da7..9d9460d92 100644 --- a/docs/scanning/advanced.md +++ b/docs/scanning/advanced.md @@ -33,22 +33,15 @@ asyncio.run(main()) ```text -usage: bbot [-h] [--help-all] [-t TARGET [TARGET ...]] - [-w WHITELIST [WHITELIST ...]] [-b BLACKLIST [BLACKLIST ...]] - [--strict-scope] [-m MODULE [MODULE ...]] [-l] - [-em MODULE [MODULE ...]] [-f FLAG [FLAG ...]] [-lf] - [-rf FLAG [FLAG ...]] [-ef FLAG [FLAG ...]] - [-om MODULE [MODULE ...]] [--allow-deadly] [-n SCAN_NAME] - [-o DIR] [-c [CONFIG ...]] [-v] [-d] [-s] [--force] [-y] - [--dry-run] [--current-config] - [--no-deps | --force-deps | --retry-deps | --ignore-failed-deps | --install-all-deps] - [-a] [--version] +usage: bbot [-h] [-t TARGET [TARGET ...]] [-w WHITELIST [WHITELIST ...]] [-b BLACKLIST [BLACKLIST ...]] [--strict-scope] [-p [PRESET ...]] [-c [CONFIG ...]] [-lp] + [-m MODULE [MODULE ...]] [-l] [-lmo] [-em MODULE [MODULE ...]] [-f FLAG [FLAG ...]] [-lf] [-rf FLAG [FLAG ...]] [-ef FLAG [FLAG ...]] [-om MODULE [MODULE ...]] + [--allow-deadly] [-n SCAN_NAME] [-o DIR] [-v] [-d] [-s] [--force] [-y] [--dry-run] [--current-preset] [--current-preset-full] + [--no-deps | --force-deps | --retry-deps | --ignore-failed-deps | --install-all-deps] [--version] Bighuge BLS OSINT Tool options: -h, --help show this help message and exit - --help-all Display full help including module config options Target: -t TARGET [TARGET ...], --targets TARGET [TARGET ...] @@ -59,14 +52,23 @@ Target: Don't touch these things --strict-scope Don't consider subdomains of target/whitelist to be in-scope +Presets: + -p [PRESET ...], --preset [PRESET ...] + Enable BBOT preset(s) + -c [CONFIG ...], --config [CONFIG ...] + Custom config options in key=value format: e.g. 'modules.shodan.api_key=1234' + -lp, --list-presets List available presets. + Modules: -m MODULE [MODULE ...], --modules MODULE [MODULE ...] Modules to enable. Choices: affiliates,ajaxpro,anubisdb,asn,azure_realm,azure_tenant,baddns,baddns_zone,badsecrets,bevigil,binaryedge,bucket_amazon,bucket_azure,bucket_digitalocean,bucket_file_enum,bucket_firebase,bucket_google,builtwith,bypass403,c99,censys,certspotter,chaos,columbus,credshed,crobat,crt,dastardly,dehashed,digitorus,dnscommonsrv,dnsdumpster,docker_pull,dockerhub,dotnetnuke,emailformat,ffuf,ffuf_shortnames,filedownload,fingerprintx,fullhunt,generic_ssrf,git,git_clone,github_codesearch,github_org,gitlab,gowitness,hackertarget,host_header,httpx,hunt,hunterio,iis_shortnames,internetdb,ip2location,ipneighbor,ipstack,leakix,masscan,massdns,myssl,newsletters,nmap,ntlm,nuclei,oauth,otx,paramminer_cookies,paramminer_getparams,paramminer_headers,passivetotal,pgp,postman,rapiddns,riddler,robots,secretsdb,securitytrails,shodan_dns,sitedossier,skymem,smuggler,social,sslcert,subdomaincenter,sublist3r,telerik,threatminer,trufflehog,url_manipulation,urlscan,vhost,viewdns,virustotal,wafw00f,wappalyzer,wayback,zoomeye -l, --list-modules List available modules. + -lmo, --list-module-options + Show all module config options -em MODULE [MODULE ...], --exclude-modules MODULE [MODULE ...] Exclude these modules. -f FLAG [FLAG ...], --flags FLAG [FLAG ...] - Enable modules by flag. Choices: active,affiliates,aggressive,baddns,cloud-enum,deadly,email-enum,iis-shortnames,passive,portscan,report,safe,service-enum,slow,social-enum,subdomain-enum,subdomain-hijack,web-basic,web-paramminer,web-screenshots,web-thorough + Enable modules by flag. Choices: active,affiliates,aggressive,baddns,cloud-enum,code-enum,deadly,email-enum,iis-shortnames,passive,portscan,report,safe,service-enum,slow,social-enum,subdomain-enum,subdomain-hijack,web-basic,web-paramminer,web-screenshots,web-thorough -lf, --list-flags List available flags. -rf FLAG [FLAG ...], --require-flags FLAG [FLAG ...] Only enable modules with these flags (e.g. -rf passive) @@ -80,15 +82,15 @@ Scan: -n SCAN_NAME, --name SCAN_NAME Name of scan (default: random) -o DIR, --output-dir DIR - -c [CONFIG ...], --config [CONFIG ...] - custom config file, or configuration options in key=value format: 'modules.shodan.api_key=1234' -v, --verbose Be more verbose -d, --debug Enable debugging -s, --silent Be quiet - --force Run scan even if module setups fail + --force Run scan even in the case of condition violations or failed module setups -y, --yes Skip scan confirmation prompt --dry-run Abort before executing scan - --current-config Show current config in YAML format + --current-preset Show the current preset in YAML format + --current-preset-full + Show the current preset in its full form, including defaults Module dependencies: Control how modules install their dependencies @@ -99,37 +101,35 @@ Module dependencies: --ignore-failed-deps Run modules even if they have failed dependencies --install-all-deps Install dependencies for all modules -Agent: - Report back to a central server - - -a, --agent-mode Start in agent mode - Misc: --version show BBOT version and exit EXAMPLES Subdomains: - bbot -t evilcorp.com -f subdomain-enum + bbot -t evilcorp.com -p subdomain-enum Subdomains (passive only): - bbot -t evilcorp.com -f subdomain-enum -rf passive + bbot -t evilcorp.com -p subdomain-enum -rf passive Subdomains + port scan + web screenshots: - bbot -t evilcorp.com -f subdomain-enum -m nmap gowitness -n my_scan -o . + bbot -t evilcorp.com -p subdomain-enum -m nmap gowitness -n my_scan -o . Subdomains + basic web scan: - bbot -t evilcorp.com -f subdomain-enum web-basic + bbot -t evilcorp.com -p subdomain-enum web-basic Web spider: - bbot -t www.evilcorp.com -m httpx robots badsecrets secretsdb -c web_spider_distance=2 web_spider_depth=2 + bbot -t www.evilcorp.com -p spider -c web_spider_distance=2 web_spider_depth=2 Everything everywhere all at once: - bbot -t evilcorp.com -f subdomain-enum email-enum cloud-enum web-basic -m nmap gowitness nuclei --allow-deadly + bbot -t evilcorp.com -p kitchen-sink List modules: bbot -l + List presets: + bbot -lp + List flags: bbot -lf diff --git a/docs/scanning/configuration.md b/docs/scanning/configuration.md index cbe7ffd26..0cc7dddab 100644 --- a/docs/scanning/configuration.md +++ b/docs/scanning/configuration.md @@ -1,6 +1,8 @@ # Configuration Overview -BBOT has a YAML config at `~/.config/bbot`. This config is different from the command-line arguments. This is where you change settings such as BBOT's **HTTP proxy**, **rate limits**, or global **User-Agent**. It's also where you put modules' **API keys**. +Normally, [Presets](presets.md) are used to configure a scan. However, there may be cases where you want to change BBOT's global defaults so a certain option is always set, even if it's not specified in a preset. + +BBOT has a YAML config at `~/.config/bbot.yml`. This is the first config that BBOT loads, so it's a good place to put default settings like `http_proxy`, `max_threads`, or `http_user_agent`. You can also put any module settings here, including **API keys**. For a list of all possible config options, see: @@ -11,13 +13,13 @@ For examples of common config changes, see [Tips and Tricks](tips_and_tricks.md) ## Configuration Files -BBOT loads its config from the following files, in this order: +BBOT loads its config from the following files, in this order (last one loaded == highest priority): -- `~/.config/bbot/bbot.yml` <-- Use this one as your main config -- `~/.config/bbot/secrets.yml` <-- Use this one for sensitive stuff like API keys -- command line (`--config`) <-- Use this to specify a custom config file or override individual config options +- `~/.config/bbot/bbot.yml` <-- Global BBOT config +- presets (`-p`) <-- Presets are good for scan-specific settings +- command line (`-c`) <-- CLI overrides everything -These config files will be automatically created for you when you first run BBOT. +`bbot.yml` will be automatically created for you when you first run BBOT. ## YAML Config vs Command Line @@ -25,7 +27,7 @@ You can specify config options either via the command line or the config. For ex ```bash # send BBOT traffic through an HTTP proxy -bbot -t evilcorp.com --config http_proxy=http://127.0.0.1:8080 +bbot -t evilcorp.com -c http_proxy=http://127.0.0.1:8080 ``` Or, in `~/.config/bbot/config.yml`: @@ -36,7 +38,7 @@ http_proxy: http://127.0.0.1:8080 These two are equivalent. -Config options specified via the command-line take precedence over all others. You can give BBOT a custom config file with `--config myconf.yml`, or individual arguments like this: `--config modules.shodan_dns.api_key=deadbeef`. To display the full and current BBOT config, including any command-line arguments, use `bbot --current-config`. +Config options specified via the command-line take precedence over all others. You can give BBOT a custom config file with `-c myconf.yml`, or individual arguments like this: `-c modules.shodan_dns.api_key=deadbeef`. To display the full and current BBOT config, including any command-line arguments, use `bbot -c`. Note that placing the following in `bbot.yml`: ```yaml title="~/.bbot/config/bbot.yml" @@ -46,7 +48,7 @@ modules: ``` Is the same as: ```bash -bbot --config modules.shodan_dns.api_key=deadbeef +bbot -c modules.shodan_dns.api_key=deadbeef ``` ## Global Config Options @@ -90,6 +92,8 @@ web_spider_links_per_page: 25 ### ADVANCED OPTIONS ### +module_paths: [] + # How far out from the main scope to search scope_search_distance: 0 # How far out from the main scope to resolve DNS names / IPs @@ -104,6 +108,14 @@ excavate: True # Summarize activity at the end of a scan aggregate: True +# How to handle installation of module dependencies +# Choices are: +# - abort_on_failure (default) - if a module dependency fails to install, abort the scan +# - retry_failed - try again to install failed dependencies +# - ignore_failed - run the scan regardless of what happens with dependency installation +# - disable - completely disable BBOT's dependency system (you are responsible for install tools, pip packages, etc.) +deps_behavior: abort_on_failure + # HTTP timeout (for Python requests; API calls, etc.) http_timeout: 10 # HTTP timeout (for httpx) @@ -181,10 +193,6 @@ omit_event_types: - URL_UNVERIFIED - DNS_NAME_UNRESOLVED # - IP_ADDRESS -# URL of BBOT server -agent_url: '' -# Agent Bearer authentication token -agent_token: '' # Custom interactsh server settings interactsh_server: null @@ -353,48 +361,48 @@ Many modules accept their own configuration options. These options have the abil | modules.zoomeye.api_key | str | ZoomEye API key | | | modules.zoomeye.include_related | bool | Include domains which may be related to the target | False | | modules.zoomeye.max_pages | int | How many pages of results to fetch | 20 | -| output_modules.asset_inventory.output_file | str | Set a custom output file | | -| output_modules.asset_inventory.recheck | bool | When use_previous=True, don't retain past details like open ports or findings. Instead, allow them to be rediscovered by the new scan | False | -| output_modules.asset_inventory.summary_netmask | int | Subnet mask to use when summarizing IP addresses at end of scan | 16 | -| output_modules.asset_inventory.use_previous | bool |` Emit previous asset inventory as new events (use in conjunction with -n ) `| False | -| output_modules.csv.output_file | str | Output to CSV file | | -| output_modules.discord.event_types | list | Types of events to send | ['VULNERABILITY', 'FINDING'] | -| output_modules.discord.min_severity | str | Only allow VULNERABILITY events of this severity or higher | LOW | -| output_modules.discord.webhook_url | str | Discord webhook URL | | -| output_modules.emails.output_file | str | Output to file | | -| output_modules.http.bearer | str | Authorization Bearer token | | -| output_modules.http.method | str | HTTP method | POST | -| output_modules.http.password | str | Password (basic auth) | | -| output_modules.http.siem_friendly | bool | Format JSON in a SIEM-friendly way for ingestion into Elastic, Splunk, etc. | False | -| output_modules.http.timeout | int | HTTP timeout | 10 | -| output_modules.http.url | str | Web URL | | -| output_modules.http.username | str | Username (basic auth) | | -| output_modules.human.console | bool | Output to console | True | -| output_modules.human.output_file | str | Output to file | | -| output_modules.json.console | bool | Output to console | False | -| output_modules.json.output_file | str | Output to file | | -| output_modules.json.siem_friendly | bool | Output JSON in a SIEM-friendly format for ingestion into Elastic, Splunk, etc. | False | -| output_modules.neo4j.password | str | Neo4j password | bbotislife | -| output_modules.neo4j.uri | str | Neo4j server + port | bolt://localhost:7687 | -| output_modules.neo4j.username | str | Neo4j username | neo4j | -| output_modules.slack.event_types | list | Types of events to send | ['VULNERABILITY', 'FINDING'] | -| output_modules.slack.min_severity | str | Only allow VULNERABILITY events of this severity or higher | LOW | -| output_modules.slack.webhook_url | str | Discord webhook URL | | -| output_modules.splunk.hectoken | str | HEC Token | | -| output_modules.splunk.index | str | Index to send data to | | -| output_modules.splunk.source | str | Source path to be added to the metadata | | -| output_modules.splunk.timeout | int | HTTP timeout | 10 | -| output_modules.splunk.url | str | Web URL | | -| output_modules.subdomains.include_unresolved | bool | Include unresolved subdomains in output | False | -| output_modules.subdomains.output_file | str | Output to file | | -| output_modules.teams.event_types | list | Types of events to send | ['VULNERABILITY', 'FINDING'] | -| output_modules.teams.min_severity | str | Only allow VULNERABILITY events of this severity or higher | LOW | -| output_modules.teams.webhook_url | str | Discord webhook URL | | -| output_modules.web_report.css_theme_file | str | CSS theme URL for HTML output | https://cdnjs.cloudflare.com/ajax/libs/github-markdown-css/5.1.0/github-markdown.min.css | -| output_modules.web_report.output_file | str | Output to file | | -| output_modules.websocket.preserve_graph | bool | Preserve full chains of events in the graph (prevents orphans) | True | -| output_modules.websocket.token | str | Authorization Bearer token | | -| output_modules.websocket.url | str | Web URL | | -| internal_modules.speculate.max_hosts | int | Max number of IP_RANGE hosts to convert into IP_ADDRESS events | 65536 | -| internal_modules.speculate.ports | str | The set of ports to speculate on | 80,443 | +| modules.asset_inventory.output_file | str | Set a custom output file | | +| modules.asset_inventory.recheck | bool | When use_previous=True, don't retain past details like open ports or findings. Instead, allow them to be rediscovered by the new scan | False | +| modules.asset_inventory.summary_netmask | int | Subnet mask to use when summarizing IP addresses at end of scan | 16 | +| modules.asset_inventory.use_previous | bool |` Emit previous asset inventory as new events (use in conjunction with -n ) `| False | +| modules.csv.output_file | str | Output to CSV file | | +| modules.discord.event_types | list | Types of events to send | ['VULNERABILITY', 'FINDING'] | +| modules.discord.min_severity | str | Only allow VULNERABILITY events of this severity or higher | LOW | +| modules.discord.webhook_url | str | Discord webhook URL | | +| modules.emails.output_file | str | Output to file | | +| modules.http.bearer | str | Authorization Bearer token | | +| modules.http.method | str | HTTP method | POST | +| modules.http.password | str | Password (basic auth) | | +| modules.http.siem_friendly | bool | Format JSON in a SIEM-friendly way for ingestion into Elastic, Splunk, etc. | False | +| modules.http.timeout | int | HTTP timeout | 10 | +| modules.http.url | str | Web URL | | +| modules.http.username | str | Username (basic auth) | | +| modules.human.console | bool | Output to console | True | +| modules.human.output_file | str | Output to file | | +| modules.json.console | bool | Output to console | False | +| modules.json.output_file | str | Output to file | | +| modules.json.siem_friendly | bool | Output JSON in a SIEM-friendly format for ingestion into Elastic, Splunk, etc. | False | +| modules.neo4j.password | str | Neo4j password | bbotislife | +| modules.neo4j.uri | str | Neo4j server + port | bolt://localhost:7687 | +| modules.neo4j.username | str | Neo4j username | neo4j | +| modules.slack.event_types | list | Types of events to send | ['VULNERABILITY', 'FINDING'] | +| modules.slack.min_severity | str | Only allow VULNERABILITY events of this severity or higher | LOW | +| modules.slack.webhook_url | str | Discord webhook URL | | +| modules.splunk.hectoken | str | HEC Token | | +| modules.splunk.index | str | Index to send data to | | +| modules.splunk.source | str | Source path to be added to the metadata | | +| modules.splunk.timeout | int | HTTP timeout | 10 | +| modules.splunk.url | str | Web URL | | +| modules.subdomains.include_unresolved | bool | Include unresolved subdomains in output | False | +| modules.subdomains.output_file | str | Output to file | | +| modules.teams.event_types | list | Types of events to send | ['VULNERABILITY', 'FINDING'] | +| modules.teams.min_severity | str | Only allow VULNERABILITY events of this severity or higher | LOW | +| modules.teams.webhook_url | str | Discord webhook URL | | +| modules.web_report.css_theme_file | str | CSS theme URL for HTML output | https://cdnjs.cloudflare.com/ajax/libs/github-markdown-css/5.1.0/github-markdown.min.css | +| modules.web_report.output_file | str | Output to file | | +| modules.websocket.preserve_graph | bool | Preserve full chains of events in the graph (prevents orphans) | True | +| modules.websocket.token | str | Authorization Bearer token | | +| modules.websocket.url | str | Web URL | | +| modules.speculate.max_hosts | int | Max number of IP_RANGE hosts to convert into IP_ADDRESS events | 65536 | +| modules.speculate.ports | str | The set of ports to speculate on | 80,443 | diff --git a/docs/scanning/output.md b/docs/scanning/output.md index 4acd25250..d51bc4865 100644 --- a/docs/scanning/output.md +++ b/docs/scanning/output.md @@ -64,7 +64,7 @@ You can filter on the JSON output with `jq`: ```bash # pull out only the .data attribute of every DNS_NAME -$ jq -r 'select(.type=="DNS_NAME") | .data' ~/.bbot/scans/extreme_johnny/output.ndjson +$ jq -r 'select(.type=="DNS_NAME") | .data' ~/.bbot/scans/extreme_johnny/output.json evilcorp.com www.evilcorp.com mail.evilcorp.com diff --git a/docs/scanning/presets.md b/docs/scanning/presets.md new file mode 100644 index 000000000..f19e27550 --- /dev/null +++ b/docs/scanning/presets.md @@ -0,0 +1,195 @@ +# Presets + +Presets allow you to put all of your scan settings in one place. A Preset is a YAML file that can include scan targets, modules, and config options like API keys. + +A typical preset looks like this: + + +```yaml title="subdomain-enum.yml" +description: Enumerate subdomains via APIs, brute-force + +flags: + - subdomain-enum + +output_modules: + - subdomains + +config: + modules: + stdout: + format: text + # only output DNS_NAMEs to the console + event_types: + - DNS_NAME + # only show in-scope subdomains + in_scope_only: True + # display the raw subdomains, nothing else + event_fields: + - data + # automatically dedupe + accept_dups: False + +``` + + +## How to use Presets (`-p`) + +BBOT has a ready-made collection of presets for common tasks like subdomain enumeration and web spidering. They live in `~/.bbot/presets`. + +To list them, you can do: + +```bash +# list available presets +bbot -lp +``` + +Enable them with `-p`: + +```bash +# do a subdomain enumeration +bbot -t evilcorp.com -p subdomain-enum + +# multiple presets - subdomain enumeration + web spider +bbot -t evilcorp.com -p subdomain-enum spider + +# start with a preset but only enable modules that have the 'passive' flag +bbot -t evilcorp.com -p subdomain-enum -rf passive + +# preset + manual config override +bbot -t www.evilcorp.com -p spider -c web_spider_distance=10 +``` + +You can also build on the default presets, or create your own. Here's an example of a custom preset that builds on `subdomain-enum`: + +```yaml title="my_subdomains.yml" +description: Do a subdomain enumeration + basic web scan + nuclei + +target: + - evilcorp.com + +include: + # include these default presets + - subdomain-enum + - web-basic + +modules: + # enable nuclei in addition to the other modules + - nuclei + +config: + # global config options + http_proxy: http://127.0.0.1:8080 + # module config options + modules: + # api keys + securitytrails: + api_key: 21a270d5f59c9b05813a72bb41707266 + virustotal: + api_key: 4f41243847da693a4f356c0486114bc6 + # other module config options + massdns: + max_resolvers: 5000 +``` + +To execute your custom preset, you do: + +```bash +bbot -p ./my_subdomains.yml +``` + +## Preset Load Order + +When you enable multiple presets, the order matters. In the case of a conflict, the last preset will always win. This means, for example, if you have a custom preset called `my_spider` that sets `web_spider_distance` to 1: + +```yaml title="my_spider.yml" +web_spider_distance: 1 +``` + +...and you enable it alongside the default `spider` preset in this order: + +```bash +bbot -t evilcorp.com -p ./my_spider.yml spider +``` + +...the value of `web_spider_distance` will be overridden by `spider`. To ensure this doesn't happen, you would want to switch the order of the presets: + +```bash +bbot -t evilcorp.com -p spider ./my_spider.yml +``` + +## Validating Presets + +To make sure BBOT is configured the way you expect, you can always check the `--current-preset` to show the final verison of the config that will be used when BBOT executes: + +```bash +# verify the preset is what you want +bbot -p ./mypreset.yml --current-preset +``` + +## Advanced Usage + +BBOT Presets support advanced features like environment variable substitution and custom conditions. + +### Environment Variables + +You can insert environment variables into your preset like this: `${env:}`: + +```yaml title="my_nuclei.yml" +description: Do a nuclei scan + +target: + - evilcorp.com + +modules: + - nuclei + +config: + modules: + nuclei: + # allow the nuclei templates to be specified at runtime via an environment variable + tags: ${env:NUCLEI_TAGS} +``` + +```bash +NUCLEI_TAGS=apache,nginx bbot -p ./my_nuclei.yml +``` + +### Conditions + +Sometimes, you might need to add custom logic to a preset. BBOT supports this via `conditions`. The `conditions` attribute allows you to specify a list of custom conditions that will be evaluated before the scan starts. This is useful for performing last-minute sanity checks, or changing the behavior of the scan based on custom criteria. + +```yaml title="my_preset.yml" +description: Abort if nuclei templates aren't specified + +modules: + - nuclei + +conditions: + - | + {% if not config.modules.nuclei.templates %} + {{ abort("Don't forget to set your templates!") }} + {% endif %} +``` + +```yaml title="my_preset.yml" +description: Enable ffuf but only when the web spider isn't also enabled + +modules: + - ffuf + +conditions: + - | + {% if config.web_spider_distance > 0 and config.web_spider_depth > 0 %} + {{ warn("Disabling ffuf because the web spider is enabled") }} + {{ preset.exclude_module("ffuf") }} + {% endif %} +``` + +Conditions use [Jinja](https://palletsprojects.com/p/jinja/), which means they can contain Python code. They run inside a sandboxed environment which has access to the following variables: + +- `preset` - the current preset object +- `config` - the current config (an alias for `preset.config`) +- `warn(message)` - display a custom warning message to the user +- `abort(message)` - abort the scan with an optional message + +If you aren't able to accomplish what you want with conditions, or if you need access to a new variable/function, please let us know on [Github](https://github.com/blacklanternsecurity/bbot/issues/new/choose). diff --git a/docs/scanning/presets_list.md b/docs/scanning/presets_list.md new file mode 100644 index 000000000..692e0ce4a --- /dev/null +++ b/docs/scanning/presets_list.md @@ -0,0 +1,338 @@ +Below is a list of every default BBOT preset, including its YAML. + + +## **cloud-enum** + +Enumerate cloud resources such as storage buckets, etc. + +??? note "`cloud-enum.yml`" + ```yaml title="~/.bbot/presets/cloud-enum.yml" + description: Enumerate cloud resources such as storage buckets, etc. + + include: + - subdomain-enum + + flags: + - cloud-enum + ``` + + + +Modules: [52]("`anubisdb`, `asn`, `azure_realm`, `azure_tenant`, `baddns_zone`, `baddns`, `bevigil`, `binaryedge`, `bucket_amazon`, `bucket_azure`, `bucket_digitalocean`, `bucket_file_enum`, `bucket_firebase`, `bucket_google`, `builtwith`, `c99`, `censys`, `certspotter`, `chaos`, `columbus`, `crt`, `digitorus`, `dnscommonsrv`, `dnsdumpster`, `fullhunt`, `github_codesearch`, `github_org`, `hackertarget`, `httpx`, `hunterio`, `internetdb`, `ipneighbor`, `leakix`, `massdns`, `myssl`, `oauth`, `otx`, `passivetotal`, `postman`, `rapiddns`, `riddler`, `securitytrails`, `shodan_dns`, `sitedossier`, `social`, `sslcert`, `subdomaincenter`, `threatminer`, `urlscan`, `virustotal`, `wayback`, `zoomeye`") + +## **code-enum** + +Enumerate Git repositories, Docker images, etc. + +??? note "`code-enum.yml`" + ```yaml title="~/.bbot/presets/code-enum.yml" + description: Enumerate Git repositories, Docker images, etc. + + flags: + - code-enum + ``` + + + +Modules: [6]("`dockerhub`, `github_codesearch`, `github_org`, `gitlab`, `httpx`, `social`") + +## **dirbust-heavy** + +Recursive web directory brute-force (aggressive) + +??? note "`dirbust-heavy.yml`" + ```yaml title="~/.bbot/presets/web_advanced/dirbust-heavy.yml" + description: Recursive web directory brute-force (aggressive) + + include: + - spider + + flags: + - iis-shortnames + + modules: + - ffuf + - wayback + + config: + modules: + iis_shortnames: + # we exploit the shortnames vulnerability to produce URL_HINTs which are consumed by ffuf_shortnames + detect_only: False + ffuf: + depth: 3 + lines: 5000 + extensions: + - php + - asp + - aspx + - ashx + - asmx + - jsp + - jspx + - cfm + - zip + - conf + - config + - xml + - json + - yml + - yaml + # emit URLs from wayback + wayback: + urls: True + ``` + +Category: web_advanced + +Modules: [5]("`ffuf_shortnames`, `ffuf`, `httpx`, `iis_shortnames`, `wayback`") + +## **dirbust-light** + +Basic web directory brute-force (surface-level directories only) + +??? note "`dirbust-light.yml`" + ```yaml title="~/.bbot/presets/web_advanced/dirbust-light.yml" + description: Basic web directory brute-force (surface-level directories only) + + flags: + - iis-shortnames + + modules: + - ffuf + + config: + modules: + iis_shortnames: + # we exploit the shortnames vulnerability to produce URL_HINTs which are consumed by ffuf_shortnames + detect_only: False + ffuf: + # wordlist size = 1000 + lines: 1000 + ``` + +Category: web_advanced + +Modules: [4]("`ffuf_shortnames`, `ffuf`, `httpx`, `iis_shortnames`") + +## **email-enum** + +Enumerate email addresses from APIs, web crawling, etc. + +??? note "`email-enum.yml`" + ```yaml title="~/.bbot/presets/email-enum.yml" + description: Enumerate email addresses from APIs, web crawling, etc. + + flags: + - email-enum + + output_modules: + - emails + + config: + modules: + stdout: + format: text + # only output EMAIL_ADDRESSes to the console + event_types: + - EMAIL_ADDRESS + # only show in-scope emails + in_scope_only: True + # display the raw emails, nothing else + event_fields: + - data + # automatically dedupe + accept_dups: False + ``` + + + +Modules: [6]("`dehashed`, `emailformat`, `hunterio`, `pgp`, `skymem`, `sslcert`") + +## **kitchen-sink** + +Everything everywhere all at once + +??? note "`kitchen-sink.yml`" + ```yaml title="~/.bbot/presets/kitchen-sink.yml" + description: Everything everywhere all at once + + include: + - subdomain-enum + - cloud-enum + - code-enum + - email-enum + - spider + - web-basic + - paramminer + - dirbust-light + ``` + + + +Modules: [71]("`anubisdb`, `asn`, `azure_realm`, `azure_tenant`, `baddns_zone`, `baddns`, `badsecrets`, `bevigil`, `binaryedge`, `bucket_amazon`, `bucket_azure`, `bucket_digitalocean`, `bucket_file_enum`, `bucket_firebase`, `bucket_google`, `builtwith`, `c99`, `censys`, `certspotter`, `chaos`, `columbus`, `crt`, `dehashed`, `digitorus`, `dnscommonsrv`, `dnsdumpster`, `dockerhub`, `emailformat`, `ffuf_shortnames`, `ffuf`, `filedownload`, `fullhunt`, `git`, `github_codesearch`, `github_org`, `gitlab`, `hackertarget`, `httpx`, `hunterio`, `iis_shortnames`, `internetdb`, `ipneighbor`, `leakix`, `massdns`, `myssl`, `ntlm`, `oauth`, `otx`, `paramminer_cookies`, `paramminer_getparams`, `paramminer_headers`, `passivetotal`, `pgp`, `postman`, `rapiddns`, `riddler`, `robots`, `secretsdb`, `securitytrails`, `shodan_dns`, `sitedossier`, `skymem`, `social`, `sslcert`, `subdomaincenter`, `threatminer`, `urlscan`, `virustotal`, `wappalyzer`, `wayback`, `zoomeye`") + +## **paramminer** + +Discover new web parameters via brute-force + +??? note "`paramminer.yml`" + ```yaml title="~/.bbot/presets/web_advanced/paramminer.yml" + description: Discover new web parameters via brute-force + + flags: + - web-paramminer + + modules: + - httpx + + config: + web_spider_distance: 1 + web_spider_depth: 4 + ``` + +Category: web_advanced + +Modules: [4]("`httpx`, `paramminer_cookies`, `paramminer_getparams`, `paramminer_headers`") + +## **secrets-enum** + + + +??? note "`secrets-enum.yml`" + ```yaml title="~/.bbot/presets/secrets-enum.yml" + description: + ``` + + + +Modules: [0]("") + +## **spider** + +Recursive web spider + +??? note "`spider.yml`" + ```yaml title="~/.bbot/presets/spider.yml" + description: Recursive web spider + + modules: + - httpx + + config: + # how many links to follow in a row + web_spider_distance: 2 + # don't follow links whose directory depth is higher than 4 + web_spider_depth: 4 + # maximum number of links to follow per page + web_spider_links_per_page: 25 + + modules: + stdout: + format: text + # only output URLs to the console + event_types: + - URL + # only show in-scope URLs + in_scope_only: True + # display the raw URLs, nothing else + event_fields: + - data + # automatically dedupe + accept_dups: False + ``` + + + +Modules: [1]("`httpx`") + +## **subdomain-enum** + +Enumerate subdomains via APIs, brute-force + +??? note "`subdomain-enum.yml`" + ```yaml title="~/.bbot/presets/subdomain-enum.yml" + description: Enumerate subdomains via APIs, brute-force + + flags: + - subdomain-enum + + output_modules: + - subdomains + + config: + modules: + stdout: + format: text + # only output DNS_NAMEs to the console + event_types: + - DNS_NAME + # only show in-scope subdomains + in_scope_only: True + # display the raw subdomains, nothing else + event_fields: + - data + # automatically dedupe + accept_dups: False + ``` + + + +Modules: [45]("`anubisdb`, `asn`, `azure_realm`, `azure_tenant`, `baddns_zone`, `bevigil`, `binaryedge`, `builtwith`, `c99`, `censys`, `certspotter`, `chaos`, `columbus`, `crt`, `digitorus`, `dnscommonsrv`, `dnsdumpster`, `fullhunt`, `github_codesearch`, `github_org`, `hackertarget`, `httpx`, `hunterio`, `internetdb`, `ipneighbor`, `leakix`, `massdns`, `myssl`, `oauth`, `otx`, `passivetotal`, `postman`, `rapiddns`, `riddler`, `securitytrails`, `shodan_dns`, `sitedossier`, `social`, `sslcert`, `subdomaincenter`, `threatminer`, `urlscan`, `virustotal`, `wayback`, `zoomeye`") + +## **web-basic** + +Quick web scan + +??? note "`web-basic.yml`" + ```yaml title="~/.bbot/presets/web-basic.yml" + description: Quick web scan + + flags: + - web-basic + ``` + + + +Modules: [17]("`azure_realm`, `baddns`, `badsecrets`, `bucket_amazon`, `bucket_azure`, `bucket_firebase`, `bucket_google`, `filedownload`, `git`, `httpx`, `iis_shortnames`, `ntlm`, `oauth`, `robots`, `secretsdb`, `sslcert`, `wappalyzer`") + +## **web-thorough** + +Aggressive web scan + +??? note "`web-thorough.yml`" + ```yaml title="~/.bbot/presets/web-thorough.yml" + description: Aggressive web scan + + include: + - web-basic + + flags: + - web-thorough + ``` + + + +Modules: [30]("`ajaxpro`, `azure_realm`, `baddns`, `badsecrets`, `bucket_amazon`, `bucket_azure`, `bucket_digitalocean`, `bucket_firebase`, `bucket_google`, `bypass403`, `dastardly`, `dotnetnuke`, `ffuf_shortnames`, `filedownload`, `generic_ssrf`, `git`, `host_header`, `httpx`, `hunt`, `iis_shortnames`, `nmap`, `ntlm`, `oauth`, `robots`, `secretsdb`, `smuggler`, `sslcert`, `telerik`, `url_manipulation`, `wappalyzer`") + + +## Table of Default Presets + +Here is a the same data, but in a table: + + +| Preset | Category | Description | # Modules | Modules | +|----------------|--------------|------------------------------------------------------------------|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| cloud-enum | | Enumerate cloud resources such as storage buckets, etc. | 52 | anubisdb, asn, azure_realm, azure_tenant, baddns, baddns_zone, bevigil, binaryedge, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, builtwith, c99, censys, certspotter, chaos, columbus, crt, digitorus, dnscommonsrv, dnsdumpster, fullhunt, github_codesearch, github_org, hackertarget, httpx, hunterio, internetdb, ipneighbor, leakix, massdns, myssl, oauth, otx, passivetotal, postman, rapiddns, riddler, securitytrails, shodan_dns, sitedossier, social, sslcert, subdomaincenter, threatminer, urlscan, virustotal, wayback, zoomeye | +| code-enum | | Enumerate Git repositories, Docker images, etc. | 6 | dockerhub, github_codesearch, github_org, gitlab, httpx, social | +| dirbust-heavy | web_advanced | Recursive web directory brute-force (aggressive) | 5 | ffuf, ffuf_shortnames, httpx, iis_shortnames, wayback | +| dirbust-light | web_advanced | Basic web directory brute-force (surface-level directories only) | 4 | ffuf, ffuf_shortnames, httpx, iis_shortnames | +| email-enum | | Enumerate email addresses from APIs, web crawling, etc. | 6 | dehashed, emailformat, hunterio, pgp, skymem, sslcert | +| kitchen-sink | | Everything everywhere all at once | 71 | anubisdb, asn, azure_realm, azure_tenant, baddns, baddns_zone, badsecrets, bevigil, binaryedge, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, builtwith, c99, censys, certspotter, chaos, columbus, crt, dehashed, digitorus, dnscommonsrv, dnsdumpster, dockerhub, emailformat, ffuf, ffuf_shortnames, filedownload, fullhunt, git, github_codesearch, github_org, gitlab, hackertarget, httpx, hunterio, iis_shortnames, internetdb, ipneighbor, leakix, massdns, myssl, ntlm, oauth, otx, paramminer_cookies, paramminer_getparams, paramminer_headers, passivetotal, pgp, postman, rapiddns, riddler, robots, secretsdb, securitytrails, shodan_dns, sitedossier, skymem, social, sslcert, subdomaincenter, threatminer, urlscan, virustotal, wappalyzer, wayback, zoomeye | +| paramminer | web_advanced | Discover new web parameters via brute-force | 4 | httpx, paramminer_cookies, paramminer_getparams, paramminer_headers | +| secrets-enum | | | 0 | | +| spider | | Recursive web spider | 1 | httpx | +| subdomain-enum | | Enumerate subdomains via APIs, brute-force | 45 | anubisdb, asn, azure_realm, azure_tenant, baddns_zone, bevigil, binaryedge, builtwith, c99, censys, certspotter, chaos, columbus, crt, digitorus, dnscommonsrv, dnsdumpster, fullhunt, github_codesearch, github_org, hackertarget, httpx, hunterio, internetdb, ipneighbor, leakix, massdns, myssl, oauth, otx, passivetotal, postman, rapiddns, riddler, securitytrails, shodan_dns, sitedossier, social, sslcert, subdomaincenter, threatminer, urlscan, virustotal, wayback, zoomeye | +| web-basic | | Quick web scan | 17 | azure_realm, baddns, badsecrets, bucket_amazon, bucket_azure, bucket_firebase, bucket_google, filedownload, git, httpx, iis_shortnames, ntlm, oauth, robots, secretsdb, sslcert, wappalyzer | +| web-thorough | | Aggressive web scan | 30 | ajaxpro, azure_realm, baddns, badsecrets, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google, bypass403, dastardly, dotnetnuke, ffuf_shortnames, filedownload, generic_ssrf, git, host_header, httpx, hunt, iis_shortnames, nmap, ntlm, oauth, robots, secretsdb, smuggler, sslcert, telerik, url_manipulation, wappalyzer | + diff --git a/examples/discord_bot.py b/examples/discord_bot.py new file mode 100644 index 000000000..f435b0301 --- /dev/null +++ b/examples/discord_bot.py @@ -0,0 +1,71 @@ +import discord +from discord.ext import commands + +from bbot.scanner import Scanner +from bbot.modules.output.discord import Discord + + +class BBOTDiscordBot(commands.Cog): + """ + A simple Discord bot capable of running a BBOT scan. + + To set up: + 1. Go to Discord Developer Portal (https://discord.com/developers) + 2. Create a new application + 3. Create an invite link for the bot, visit the link to invite it to your server + - Your Application --> OAuth2 --> URL Generator + - For Scopes, select "bot"" + - For Bot Permissions, select: + - Read Messages/View Channels + - Send Messages + 4. Turn on "Message Content Intent" + - Your Application --> Bot --> Privileged Gateway Intents --> Message Content Intent + 5. Copy your Discord Bot Token and put it at the top this file + - Your Application --> Bot --> Reset Token + 6. Run this script + + To scan evilcorp.com, you would type: + + /scan evilcorp.com + + Results will be output to the same channel. + """ + + def __init__(self): + self.current_scan = None + + @commands.command(name="scan", description="Scan a target with BBOT.") + async def scan(self, ctx, target: str): + if self.current_scan is not None: + self.current_scan.stop() + await ctx.send(f"Starting scan against {target}.") + + # creates scan instance + self.current_scan = Scanner(target, flags="subdomain-enum") + discord_module = Discord(self.current_scan) + + seen = set() + num_events = 0 + # start scan and iterate through results + async for event in self.current_scan.async_start(): + if hash(event) in seen: + continue + seen.add(hash(event)) + await ctx.send(discord_module.format_message(event)) + num_events += 1 + + await ctx.send(f"Finished scan against {target}. {num_events:,} results.") + self.current_scan = None + + +if __name__ == "__main__": + intents = discord.Intents.default() + intents.message_content = True + bot = commands.Bot(command_prefix="/", intents=intents) + + @bot.event + async def on_ready(): + print(f"We have logged in as {bot.user}") + await bot.add_cog(BBOTDiscordBot()) + + bot.run("DISCORD_BOT_TOKEN_HERE") diff --git a/mkdocs.yml b/mkdocs.yml index d7bb10118..af7fab579 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -21,6 +21,9 @@ nav: - Comparison to Other Tools: comparison.md - Scanning: - Scanning Overview: scanning/index.md + - Presets: + - Overview: scanning/presets.md + - List of Presets: scanning/presets_list.md - Events: scanning/events.md - Output: scanning/output.md - Tips and Tricks: scanning/tips_and_tricks.md @@ -30,23 +33,28 @@ nav: - List of Modules: modules/list_of_modules.md - Nuclei: modules/nuclei.md - Misc: + - Contribution: contribution.md - Release History: release_history.md - Troubleshooting: troubleshooting.md - Developer Manual: - - How to Write a Module: contribution.md - Development Overview: dev/index.md - - Scanner: dev/scanner.md - - Event: dev/event.md - - Target: dev/target.md - - BaseModule: dev/basemodule.md - - Helpers: - - Overview: dev/helpers/index.md - - Command: dev/helpers/command.md - - DNS: dev/helpers/dns.md - - Interactsh: dev/helpers/interactsh.md - - Miscellaneous: dev/helpers/misc.md - - Web: dev/helpers/web.md - - Word Cloud: dev/helpers/wordcloud.md + - How to Write a BBOT Module: dev/module_howto.md + - Discord Bot Example: dev/discord_bot.md + - Code Reference: + - Scanner: dev/scanner.md + - Presets: dev/presets.md + - Event: dev/event.md + - Target: dev/target.md + - BaseModule: dev/basemodule.md + - BBOTCore: dev/core.md + - Helpers: + - Overview: dev/helpers/index.md + - Command: dev/helpers/command.md + - DNS: dev/helpers/dns.md + - Interactsh: dev/helpers/interactsh.md + - Miscellaneous: dev/helpers/misc.md + - Web: dev/helpers/web.md + - Word Cloud: dev/helpers/wordcloud.md theme: name: material @@ -54,6 +62,7 @@ theme: favicon: favicon.png features: - content.code.copy + - content.tooltips - navigation.tabs - navigation.sections - navigation.expand @@ -89,6 +98,7 @@ markdown_extensions: - attr_list - admonition - pymdownx.details + - pymdownx.snippets - pymdownx.superfences - pymdownx.highlight: use_pygments: True diff --git a/poetry.lock b/poetry.lock index 23caddf82..be6fea410 100644 --- a/poetry.lock +++ b/poetry.lock @@ -27,13 +27,13 @@ ansible-core = ">=2.15.7,<2.16.0" [[package]] name = "ansible-core" -version = "2.15.9" +version = "2.15.10" description = "Radically simple IT automation" optional = false python-versions = ">=3.9" files = [ - {file = "ansible-core-2.15.9.tar.gz", hash = "sha256:25f9b1b5a5af3c0986bd3928ed086eaddb867527fb5c83afef1a03cfad34f345"}, - {file = "ansible_core-2.15.9-py3-none-any.whl", hash = "sha256:5b6a4b12aa5358f60933e79d86763e3558862282fb1dc563a29b9999e5849fc3"}, + {file = "ansible-core-2.15.10.tar.gz", hash = "sha256:954dbe8e4e802a4dd5df0366193975b692a05806aa8d7358418a7e617346b20f"}, + {file = "ansible_core-2.15.10-py3-none-any.whl", hash = "sha256:42e49f1a6d8cf6cccde775c06c1394885353b71ad9e5f670c6f32d2890127ce8"}, ] [package.dependencies] @@ -132,33 +132,33 @@ lxml = ["lxml"] [[package]] name = "black" -version = "24.3.0" +version = "24.4.0" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-24.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395"}, - {file = "black-24.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995"}, - {file = "black-24.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7"}, - {file = "black-24.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0"}, - {file = "black-24.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9"}, - {file = "black-24.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597"}, - {file = "black-24.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d"}, - {file = "black-24.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5"}, - {file = "black-24.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f"}, - {file = "black-24.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11"}, - {file = "black-24.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4"}, - {file = "black-24.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5"}, - {file = "black-24.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837"}, - {file = "black-24.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd"}, - {file = "black-24.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213"}, - {file = "black-24.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959"}, - {file = "black-24.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb"}, - {file = "black-24.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7"}, - {file = "black-24.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7"}, - {file = "black-24.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f"}, - {file = "black-24.3.0-py3-none-any.whl", hash = "sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93"}, - {file = "black-24.3.0.tar.gz", hash = "sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f"}, + {file = "black-24.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6ad001a9ddd9b8dfd1b434d566be39b1cd502802c8d38bbb1ba612afda2ef436"}, + {file = "black-24.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3a3a092b8b756c643fe45f4624dbd5a389f770a4ac294cf4d0fce6af86addaf"}, + {file = "black-24.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dae79397f367ac8d7adb6c779813328f6d690943f64b32983e896bcccd18cbad"}, + {file = "black-24.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:71d998b73c957444fb7c52096c3843875f4b6b47a54972598741fe9a7f737fcb"}, + {file = "black-24.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8e5537f456a22cf5cfcb2707803431d2feeb82ab3748ade280d6ccd0b40ed2e8"}, + {file = "black-24.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64e60a7edd71fd542a10a9643bf369bfd2644de95ec71e86790b063aa02ff745"}, + {file = "black-24.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cd5b4f76056cecce3e69b0d4c228326d2595f506797f40b9233424e2524c070"}, + {file = "black-24.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:64578cf99b6b46a6301bc28bdb89f9d6f9b592b1c5837818a177c98525dbe397"}, + {file = "black-24.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f95cece33329dc4aa3b0e1a771c41075812e46cf3d6e3f1dfe3d91ff09826ed2"}, + {file = "black-24.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4396ca365a4310beef84d446ca5016f671b10f07abdba3e4e4304218d2c71d33"}, + {file = "black-24.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44d99dfdf37a2a00a6f7a8dcbd19edf361d056ee51093b2445de7ca09adac965"}, + {file = "black-24.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:21f9407063ec71c5580b8ad975653c66508d6a9f57bd008bb8691d273705adcd"}, + {file = "black-24.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:652e55bb722ca026299eb74e53880ee2315b181dfdd44dca98e43448620ddec1"}, + {file = "black-24.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7f2966b9b2b3b7104fca9d75b2ee856fe3fdd7ed9e47c753a4bb1a675f2caab8"}, + {file = "black-24.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bb9ca06e556a09f7f7177bc7cb604e5ed2d2df1e9119e4f7d2f1f7071c32e5d"}, + {file = "black-24.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:d4e71cdebdc8efeb6deaf5f2deb28325f8614d48426bed118ecc2dcaefb9ebf3"}, + {file = "black-24.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6644f97a7ef6f401a150cca551a1ff97e03c25d8519ee0bbc9b0058772882665"}, + {file = "black-24.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:75a2d0b4f5eb81f7eebc31f788f9830a6ce10a68c91fbe0fade34fff7a2836e6"}, + {file = "black-24.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb949f56a63c5e134dfdca12091e98ffb5fd446293ebae123d10fc1abad00b9e"}, + {file = "black-24.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:7852b05d02b5b9a8c893ab95863ef8986e4dda29af80bbbda94d7aee1abf8702"}, + {file = "black-24.4.0-py3-none-any.whl", hash = "sha256:74eb9b5420e26b42c00a3ff470dc0cd144b80a766128b1771d07643165e08d0e"}, + {file = "black-24.4.0.tar.gz", hash = "sha256:f07b69fda20578367eaebbd670ff8fc653ab181e1ff95d84497f9fa20e7d0641"}, ] [package.dependencies] @@ -388,18 +388,20 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "cloudcheck" -version = "3.1.0.318" +version = "5.0.0.350" description = "Check whether an IP address belongs to a cloud provider" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "cloudcheck-3.1.0.318-py3-none-any.whl", hash = "sha256:471dba97531e1f60aadab8daa6cb1d63727f67c16fd7b4758db46c9af2f362f1"}, - {file = "cloudcheck-3.1.0.318.tar.gz", hash = "sha256:ba7fcc026817aa05f74c7789d2ac306469f3143f91b3ea9f95c57c70a7b0b787"}, + {file = "cloudcheck-5.0.0.350-py3-none-any.whl", hash = "sha256:6f2ed981818bde6d8b6c5a6413a843e11d0aa1a4bf8b36452dcae1030a537dd6"}, + {file = "cloudcheck-5.0.0.350.tar.gz", hash = "sha256:cb59dfef966268ebc176e242634b84a3423a84ffaf4fac40566f37edfaddc106"}, ] [package.dependencies] httpx = ">=0.26,<0.28" pydantic = ">=2.4.2,<3.0.0" +radixtarget = ">=1.0.0.14,<2.0.0.0" +regex = ">=2024.4.16,<2025.0.0" [[package]] name = "colorama" @@ -414,63 +416,63 @@ files = [ [[package]] name = "coverage" -version = "7.4.3" +version = "7.4.4" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8580b827d4746d47294c0e0b92854c85a92c2227927433998f0d3320ae8a71b6"}, - {file = "coverage-7.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718187eeb9849fc6cc23e0d9b092bc2348821c5e1a901c9f8975df0bc785bfd4"}, - {file = "coverage-7.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:767b35c3a246bcb55b8044fd3a43b8cd553dd1f9f2c1eeb87a302b1f8daa0524"}, - {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae7f19afe0cce50039e2c782bff379c7e347cba335429678450b8fe81c4ef96d"}, - {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba3a8aaed13770e970b3df46980cb068d1c24af1a1968b7818b69af8c4347efb"}, - {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ee866acc0861caebb4f2ab79f0b94dbfbdbfadc19f82e6e9c93930f74e11d7a0"}, - {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:506edb1dd49e13a2d4cac6a5173317b82a23c9d6e8df63efb4f0380de0fbccbc"}, - {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd6545d97c98a192c5ac995d21c894b581f1fd14cf389be90724d21808b657e2"}, - {file = "coverage-7.4.3-cp310-cp310-win32.whl", hash = "sha256:f6a09b360d67e589236a44f0c39218a8efba2593b6abdccc300a8862cffc2f94"}, - {file = "coverage-7.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:18d90523ce7553dd0b7e23cbb28865db23cddfd683a38fb224115f7826de78d0"}, - {file = "coverage-7.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cbbe5e739d45a52f3200a771c6d2c7acf89eb2524890a4a3aa1a7fa0695d2a47"}, - {file = "coverage-7.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:489763b2d037b164846ebac0cbd368b8a4ca56385c4090807ff9fad817de4113"}, - {file = "coverage-7.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:451f433ad901b3bb00184d83fd83d135fb682d780b38af7944c9faeecb1e0bfe"}, - {file = "coverage-7.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcc66e222cf4c719fe7722a403888b1f5e1682d1679bd780e2b26c18bb648cdc"}, - {file = "coverage-7.4.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3ec74cfef2d985e145baae90d9b1b32f85e1741b04cd967aaf9cfa84c1334f3"}, - {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:abbbd8093c5229c72d4c2926afaee0e6e3140de69d5dcd918b2921f2f0c8baba"}, - {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:35eb581efdacf7b7422af677b92170da4ef34500467381e805944a3201df2079"}, - {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8249b1c7334be8f8c3abcaaa996e1e4927b0e5a23b65f5bf6cfe3180d8ca7840"}, - {file = "coverage-7.4.3-cp311-cp311-win32.whl", hash = "sha256:cf30900aa1ba595312ae41978b95e256e419d8a823af79ce670835409fc02ad3"}, - {file = "coverage-7.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:18c7320695c949de11a351742ee001849912fd57e62a706d83dfc1581897fa2e"}, - {file = "coverage-7.4.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b51bfc348925e92a9bd9b2e48dad13431b57011fd1038f08316e6bf1df107d10"}, - {file = "coverage-7.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d6cdecaedea1ea9e033d8adf6a0ab11107b49571bbb9737175444cea6eb72328"}, - {file = "coverage-7.4.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b2eccb883368f9e972e216c7b4c7c06cabda925b5f06dde0650281cb7666a30"}, - {file = "coverage-7.4.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c00cdc8fa4e50e1cc1f941a7f2e3e0f26cb2a1233c9696f26963ff58445bac7"}, - {file = "coverage-7.4.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a4a8dd3dcf4cbd3165737358e4d7dfbd9d59902ad11e3b15eebb6393b0446e"}, - {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:062b0a75d9261e2f9c6d071753f7eef0fc9caf3a2c82d36d76667ba7b6470003"}, - {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ebe7c9e67a2d15fa97b77ea6571ce5e1e1f6b0db71d1d5e96f8d2bf134303c1d"}, - {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c0a120238dd71c68484f02562f6d446d736adcc6ca0993712289b102705a9a3a"}, - {file = "coverage-7.4.3-cp312-cp312-win32.whl", hash = "sha256:37389611ba54fd6d278fde86eb2c013c8e50232e38f5c68235d09d0a3f8aa352"}, - {file = "coverage-7.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:d25b937a5d9ffa857d41be042b4238dd61db888533b53bc76dc082cb5a15e914"}, - {file = "coverage-7.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:28ca2098939eabab044ad68850aac8f8db6bf0b29bc7f2887d05889b17346454"}, - {file = "coverage-7.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:280459f0a03cecbe8800786cdc23067a8fc64c0bd51dc614008d9c36e1659d7e"}, - {file = "coverage-7.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c0cdedd3500e0511eac1517bf560149764b7d8e65cb800d8bf1c63ebf39edd2"}, - {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a9babb9466fe1da12417a4aed923e90124a534736de6201794a3aea9d98484e"}, - {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dec9de46a33cf2dd87a5254af095a409ea3bf952d85ad339751e7de6d962cde6"}, - {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:16bae383a9cc5abab9bb05c10a3e5a52e0a788325dc9ba8499e821885928968c"}, - {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2c854ce44e1ee31bda4e318af1dbcfc929026d12c5ed030095ad98197eeeaed0"}, - {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ce8c50520f57ec57aa21a63ea4f325c7b657386b3f02ccaedeccf9ebe27686e1"}, - {file = "coverage-7.4.3-cp38-cp38-win32.whl", hash = "sha256:708a3369dcf055c00ddeeaa2b20f0dd1ce664eeabde6623e516c5228b753654f"}, - {file = "coverage-7.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:1bf25fbca0c8d121a3e92a2a0555c7e5bc981aee5c3fdaf4bb7809f410f696b9"}, - {file = "coverage-7.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b253094dbe1b431d3a4ac2f053b6d7ede2664ac559705a704f621742e034f1f"}, - {file = "coverage-7.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77fbfc5720cceac9c200054b9fab50cb2a7d79660609200ab83f5db96162d20c"}, - {file = "coverage-7.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6679060424faa9c11808598504c3ab472de4531c571ab2befa32f4971835788e"}, - {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4af154d617c875b52651dd8dd17a31270c495082f3d55f6128e7629658d63765"}, - {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8640f1fde5e1b8e3439fe482cdc2b0bb6c329f4bb161927c28d2e8879c6029ee"}, - {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:69b9f6f66c0af29642e73a520b6fed25ff9fd69a25975ebe6acb297234eda501"}, - {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0842571634f39016a6c03e9d4aba502be652a6e4455fadb73cd3a3a49173e38f"}, - {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a78ed23b08e8ab524551f52953a8a05d61c3a760781762aac49f8de6eede8c45"}, - {file = "coverage-7.4.3-cp39-cp39-win32.whl", hash = "sha256:c0524de3ff096e15fcbfe8f056fdb4ea0bf497d584454f344d59fce069d3e6e9"}, - {file = "coverage-7.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:0209a6369ccce576b43bb227dc8322d8ef9e323d089c6f3f26a597b09cb4d2aa"}, - {file = "coverage-7.4.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:7cbde573904625509a3f37b6fecea974e363460b556a627c60dc2f47e2fffa51"}, - {file = "coverage-7.4.3.tar.gz", hash = "sha256:276f6077a5c61447a48d133ed13e759c09e62aff0dc84274a68dc18660104d52"}, + {file = "coverage-7.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0be5efd5127542ef31f165de269f77560d6cdef525fffa446de6f7e9186cfb2"}, + {file = "coverage-7.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ccd341521be3d1b3daeb41960ae94a5e87abe2f46f17224ba5d6f2b8398016cf"}, + {file = "coverage-7.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fa497a8ab37784fbb20ab699c246053ac294d13fc7eb40ec007a5043ec91f8"}, + {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1a93009cb80730c9bca5d6d4665494b725b6e8e157c1cb7f2db5b4b122ea562"}, + {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:690db6517f09336559dc0b5f55342df62370a48f5469fabf502db2c6d1cffcd2"}, + {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:09c3255458533cb76ef55da8cc49ffab9e33f083739c8bd4f58e79fecfe288f7"}, + {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8ce1415194b4a6bd0cdcc3a1dfbf58b63f910dcb7330fe15bdff542c56949f87"}, + {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b91cbc4b195444e7e258ba27ac33769c41b94967919f10037e6355e998af255c"}, + {file = "coverage-7.4.4-cp310-cp310-win32.whl", hash = "sha256:598825b51b81c808cb6f078dcb972f96af96b078faa47af7dfcdf282835baa8d"}, + {file = "coverage-7.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:09ef9199ed6653989ebbcaacc9b62b514bb63ea2f90256e71fea3ed74bd8ff6f"}, + {file = "coverage-7.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f9f50e7ef2a71e2fae92774c99170eb8304e3fdf9c8c3c7ae9bab3e7229c5cf"}, + {file = "coverage-7.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:623512f8ba53c422fcfb2ce68362c97945095b864cda94a92edbaf5994201083"}, + {file = "coverage-7.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0513b9508b93da4e1716744ef6ebc507aff016ba115ffe8ecff744d1322a7b63"}, + {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40209e141059b9370a2657c9b15607815359ab3ef9918f0196b6fccce8d3230f"}, + {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a2b2b78c78293782fd3767d53e6474582f62443d0504b1554370bde86cc8227"}, + {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:73bfb9c09951125d06ee473bed216e2c3742f530fc5acc1383883125de76d9cd"}, + {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f384c3cc76aeedce208643697fb3e8437604b512255de6d18dae3f27655a384"}, + {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:54eb8d1bf7cacfbf2a3186019bcf01d11c666bd495ed18717162f7eb1e9dd00b"}, + {file = "coverage-7.4.4-cp311-cp311-win32.whl", hash = "sha256:cac99918c7bba15302a2d81f0312c08054a3359eaa1929c7e4b26ebe41e9b286"}, + {file = "coverage-7.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:b14706df8b2de49869ae03a5ccbc211f4041750cd4a66f698df89d44f4bd30ec"}, + {file = "coverage-7.4.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:201bef2eea65e0e9c56343115ba3814e896afe6d36ffd37bab783261db430f76"}, + {file = "coverage-7.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:41c9c5f3de16b903b610d09650e5e27adbfa7f500302718c9ffd1c12cf9d6818"}, + {file = "coverage-7.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d898fe162d26929b5960e4e138651f7427048e72c853607f2b200909794ed978"}, + {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ea79bb50e805cd6ac058dfa3b5c8f6c040cb87fe83de10845857f5535d1db70"}, + {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce4b94265ca988c3f8e479e741693d143026632672e3ff924f25fab50518dd51"}, + {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:00838a35b882694afda09f85e469c96367daa3f3f2b097d846a7216993d37f4c"}, + {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fdfafb32984684eb03c2d83e1e51f64f0906b11e64482df3c5db936ce3839d48"}, + {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:69eb372f7e2ece89f14751fbcbe470295d73ed41ecd37ca36ed2eb47512a6ab9"}, + {file = "coverage-7.4.4-cp312-cp312-win32.whl", hash = "sha256:137eb07173141545e07403cca94ab625cc1cc6bc4c1e97b6e3846270e7e1fea0"}, + {file = "coverage-7.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:d71eec7d83298f1af3326ce0ff1d0ea83c7cb98f72b577097f9083b20bdaf05e"}, + {file = "coverage-7.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d5ae728ff3b5401cc320d792866987e7e7e880e6ebd24433b70a33b643bb0384"}, + {file = "coverage-7.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cc4f1358cb0c78edef3ed237ef2c86056206bb8d9140e73b6b89fbcfcbdd40e1"}, + {file = "coverage-7.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8130a2aa2acb8788e0b56938786c33c7c98562697bf9f4c7d6e8e5e3a0501e4a"}, + {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf271892d13e43bc2b51e6908ec9a6a5094a4df1d8af0bfc360088ee6c684409"}, + {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4cdc86d54b5da0df6d3d3a2f0b710949286094c3a6700c21e9015932b81447e"}, + {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ae71e7ddb7a413dd60052e90528f2f65270aad4b509563af6d03d53e979feafd"}, + {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:38dd60d7bf242c4ed5b38e094baf6401faa114fc09e9e6632374388a404f98e7"}, + {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa5b1c1bfc28384f1f53b69a023d789f72b2e0ab1b3787aae16992a7ca21056c"}, + {file = "coverage-7.4.4-cp38-cp38-win32.whl", hash = "sha256:dfa8fe35a0bb90382837b238fff375de15f0dcdb9ae68ff85f7a63649c98527e"}, + {file = "coverage-7.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:b2991665420a803495e0b90a79233c1433d6ed77ef282e8e152a324bbbc5e0c8"}, + {file = "coverage-7.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b799445b9f7ee8bf299cfaed6f5b226c0037b74886a4e11515e569b36fe310d"}, + {file = "coverage-7.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b4d33f418f46362995f1e9d4f3a35a1b6322cb959c31d88ae56b0298e1c22357"}, + {file = "coverage-7.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aadacf9a2f407a4688d700e4ebab33a7e2e408f2ca04dbf4aef17585389eff3e"}, + {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c95949560050d04d46b919301826525597f07b33beba6187d04fa64d47ac82e"}, + {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff7687ca3d7028d8a5f0ebae95a6e4827c5616b31a4ee1192bdfde697db110d4"}, + {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5fc1de20b2d4a061b3df27ab9b7c7111e9a710f10dc2b84d33a4ab25065994ec"}, + {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c74880fc64d4958159fbd537a091d2a585448a8f8508bf248d72112723974cbd"}, + {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:742a76a12aa45b44d236815d282b03cfb1de3b4323f3e4ec933acfae08e54ade"}, + {file = "coverage-7.4.4-cp39-cp39-win32.whl", hash = "sha256:d89d7b2974cae412400e88f35d86af72208e1ede1a541954af5d944a8ba46c57"}, + {file = "coverage-7.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:9ca28a302acb19b6af89e90f33ee3e1906961f94b54ea37de6737b7ca9d8827c"}, + {file = "coverage-7.4.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677"}, + {file = "coverage-7.4.4.tar.gz", hash = "sha256:c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49"}, ] [package.dependencies] @@ -535,20 +537,20 @@ test-randomorder = ["pytest-randomly"] [[package]] name = "deepdiff" -version = "6.7.1" +version = "7.0.1" description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "deepdiff-6.7.1-py3-none-any.whl", hash = "sha256:58396bb7a863cbb4ed5193f548c56f18218060362311aa1dc36397b2f25108bd"}, - {file = "deepdiff-6.7.1.tar.gz", hash = "sha256:b367e6fa6caac1c9f500adc79ada1b5b1242c50d5f716a1a4362030197847d30"}, + {file = "deepdiff-7.0.1-py3-none-any.whl", hash = "sha256:447760081918216aa4fd4ca78a4b6a848b81307b2ea94c810255334b759e1dc3"}, + {file = "deepdiff-7.0.1.tar.gz", hash = "sha256:260c16f052d4badbf60351b4f77e8390bee03a0b516246f6839bc813fb429ddf"}, ] [package.dependencies] -ordered-set = ">=4.0.2,<4.2.0" +ordered-set = ">=4.1.0,<4.2.0" [package.extras] -cli = ["click (==8.1.3)", "pyyaml (==6.0.1)"] +cli = ["click (==8.1.7)", "pyyaml (==6.0.1)"] optimize = ["orjson"] [[package]] @@ -584,24 +586,24 @@ wmi = ["wmi (>=1.5.1)"] [[package]] name = "docutils" -version = "0.20.1" +version = "0.21.1" description = "Docutils -- Python Documentation Utilities" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" files = [ - {file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"}, - {file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"}, + {file = "docutils-0.21.1-py3-none-any.whl", hash = "sha256:14c8d34a55b46c88f9f714adb29cefbdd69fb82f3fef825e59c5faab935390d8"}, + {file = "docutils-0.21.1.tar.gz", hash = "sha256:65249d8a5345bc95e0f40f280ba63c98eb24de35c6c8f5b662e3e8948adea83f"}, ] [[package]] name = "dunamai" -version = "1.19.2" +version = "1.20.0" description = "Dynamic version generation" optional = false python-versions = ">=3.5" files = [ - {file = "dunamai-1.19.2-py3-none-any.whl", hash = "sha256:bc126b17571a44d68ed826cec596e0f61dc01edca8b21486f70014936a5d44f2"}, - {file = "dunamai-1.19.2.tar.gz", hash = "sha256:3be4049890763e19b8df1d52960dbea60b3e263eb0c96144a677ae0633734d2e"}, + {file = "dunamai-1.20.0-py3-none-any.whl", hash = "sha256:a2185c227351a52a013c7d7a695d3f3cb6625c3eed14a5295adbbcc7e2f7f8d4"}, + {file = "dunamai-1.20.0.tar.gz", hash = "sha256:c3f1ee64a1e6cc9ebc98adafa944efaccd0db32482d2177e59c1ff6bdf23cd70"}, ] [package.dependencies] @@ -609,13 +611,13 @@ packaging = ">=20.9" [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, + {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, ] [package.extras] @@ -623,18 +625,18 @@ test = ["pytest (>=6)"] [[package]] name = "filelock" -version = "3.13.1" +version = "3.13.4" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"}, - {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"}, + {file = "filelock-3.13.4-py3-none-any.whl", hash = "sha256:404e5e9253aa60ad457cae1be07c0f0ca90a63931200a47d9b6a6af84fd7b45f"}, + {file = "filelock-3.13.4.tar.gz", hash = "sha256:d13f466618bfde72bd2c18255e269f72542c6e70e7bac83a0232d6b1cc5c8cf4"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] typing = ["typing-extensions (>=4.8)"] [[package]] @@ -672,13 +674,13 @@ dev = ["flake8", "markdown", "twine", "wheel"] [[package]] name = "griffe" -version = "0.41.0" +version = "0.44.0" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." optional = false python-versions = ">=3.8" files = [ - {file = "griffe-0.41.0-py3-none-any.whl", hash = "sha256:8aa7fc6eb00cb80af9c0198178c6b7110cb59fa2c5187bb13ea25eebbe4dd928"}, - {file = "griffe-0.41.0.tar.gz", hash = "sha256:850128c3198c18713eaf0a6cc8572e590a16b1965f72a4e871e66cf84740903f"}, + {file = "griffe-0.44.0-py3-none-any.whl", hash = "sha256:8a4471c469ba980b87c843f1168850ce39d0c1d0c7be140dca2480f76c8e5446"}, + {file = "griffe-0.44.0.tar.gz", hash = "sha256:34aee1571042f9bf00529bc715de4516fb6f482b164e90d030300601009e0223"}, ] [package.dependencies] @@ -697,13 +699,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.4" +version = "1.0.5" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.4-py3-none-any.whl", hash = "sha256:ac418c1db41bade2ad53ae2f3834a3a0f5ae76b56cf5aa497d2d033384fc7d73"}, - {file = "httpcore-1.0.4.tar.gz", hash = "sha256:cb2839ccfcba0d2d3c1131d3c3e26dfc327326fbe7a5dc0dbfe9f6c9151bb022"}, + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, ] [package.dependencies] @@ -714,7 +716,7 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.25.0)"] +trio = ["trio (>=0.22.0,<0.26.0)"] [[package]] name = "httpx" @@ -756,13 +758,13 @@ license = ["ukkonen"] [[package]] name = "idna" -version = "3.6" +version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] [[package]] @@ -870,124 +872,165 @@ files = [ [[package]] name = "lxml" -version = "5.2.0" +version = "5.2.1" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." optional = false python-versions = ">=3.6" files = [ - {file = "lxml-5.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c54f8d6160080831a76780d850302fdeb0e8d0806f661777b0714dfb55d9a08a"}, - {file = "lxml-5.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e95ae029396382a0d2e8174e4077f96befcd4a2184678db363ddc074eb4d3b2"}, - {file = "lxml-5.2.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5810fa80e64a0c689262a71af999c5735f48c0da0affcbc9041d1ef5ef3920be"}, - {file = "lxml-5.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae69524fd6a68b288574013f8fadac23cacf089c75cd3fc5b216277a445eb736"}, - {file = "lxml-5.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fadda215e32fe375d65e560b7f7e2a37c7f9c4ecee5315bb1225ca6ac9bf5838"}, - {file = "lxml-5.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:f1f164e4cc6bc646b1fc86664c3543bf4a941d45235797279b120dc740ee7af5"}, - {file = "lxml-5.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:3603a8a41097daf7672cae22cc4a860ab9ea5597f1c5371cb21beca3398b8d6a"}, - {file = "lxml-5.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b3b4bb89a785f4fd60e05f3c3a526c07d0d68e3536f17f169ca13bf5b5dd75a5"}, - {file = "lxml-5.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1effc10bf782f0696e76ecfeba0720ea02c0c31d5bffb7b29ba10debd57d1c3d"}, - {file = "lxml-5.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b03531f6cd6ce4b511dcece060ca20aa5412f8db449274b44f4003f282e6272f"}, - {file = "lxml-5.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7fac15090bb966719df06f0c4f8139783746d1e60e71016d8a65db2031ca41b8"}, - {file = "lxml-5.2.0-cp310-cp310-win32.whl", hash = "sha256:92bb37c96215c4b2eb26f3c791c0bf02c64dd251effa532b43ca5049000c4478"}, - {file = "lxml-5.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:b0181c22fdb89cc19e70240a850e5480817c3e815b1eceb171b3d7a3aa3e596a"}, - {file = "lxml-5.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ada8ce9e6e1d126ef60d215baaa0c81381ba5841c25f1d00a71cdafdc038bd27"}, - {file = "lxml-5.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3cefb133c859f06dab2ae63885d9f405000c4031ec516e0ed4f9d779f690d8e3"}, - {file = "lxml-5.2.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1ede2a7a86a977b0c741654efaeca0af7860a9b1ae39f9268f0936246a977ee0"}, - {file = "lxml-5.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d46df6f0b1a0cda39d12c5c4615a7d92f40342deb8001c7b434d7c8c78352e58"}, - {file = "lxml-5.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2259243ee734cc736e237719037efb86603c891fd363cc7973a2d0ac8a0e3f"}, - {file = "lxml-5.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c53164f29ed3c3868787144e8ea8a399ffd7d8215f59500a20173593c19e96eb"}, - {file = "lxml-5.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:371aab9a397dcc76625ad3b02fa9b21be63406d69237b773156e7d1fc2ce0cae"}, - {file = "lxml-5.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e08784288a179b59115b5e57abf6d387528b39abb61105fe17510a199a277a40"}, - {file = "lxml-5.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4c232726f7b6df5143415a06323faaa998ef8abbe1c0ed00d718755231d76f08"}, - {file = "lxml-5.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e4366e58c0508da4dee4c7c70cee657e38553d73abdffa53abbd7d743711ee11"}, - {file = "lxml-5.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c84dce8fb2e900d4fb094e76fdad34a5fd06de53e41bddc1502c146eb11abd74"}, - {file = "lxml-5.2.0-cp311-cp311-win32.whl", hash = "sha256:0947d1114e337dc2aae2fa14bbc9ed5d9ca1a0acd6d2f948df9926aef65305e9"}, - {file = "lxml-5.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:1eace37a9f4a1bef0bb5c849434933fd6213008ec583c8e31ee5b8e99c7c8500"}, - {file = "lxml-5.2.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f2cb157e279d28c66b1c27e0948687dc31dc47d1ab10ce0cd292a8334b7de3d5"}, - {file = "lxml-5.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:53c0e56f41ef68c1ce4e96f27ecdc2df389730391a2fd45439eb3facb02d36c8"}, - {file = "lxml-5.2.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:703d60e59ab45c17485c2c14b11880e4f7f0eab07134afa9007573fa5a779a5a"}, - {file = "lxml-5.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eaf5e308a5e50bc0548c4fdca0117a31ec9596f8cfc96592db170bcecc71a957"}, - {file = "lxml-5.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af64df85fecd3cf3b2e792f0b5b4d92740905adfa8ce3b24977a55415f1a0c40"}, - {file = "lxml-5.2.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:df7dfbdef11702fd22c2eaf042d7098d17edbc62d73f2199386ad06cbe466f6d"}, - {file = "lxml-5.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:7250030a7835bfd5ba6ca7d1ad483ec90f9cbc29978c5e75c1cc3e031d3c4160"}, - {file = "lxml-5.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:be5faa2d5c8c8294d770cfd09d119fb27b5589acc59635b0cf90f145dbe81dca"}, - {file = "lxml-5.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:347ec08250d5950f5b016caa3e2e13fb2cb9714fe6041d52e3716fb33c208663"}, - {file = "lxml-5.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:dc7b630c4fb428b8a40ddd0bfc4bc19de11bb3c9b031154f77360e48fe8b4451"}, - {file = "lxml-5.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ae550cbd7f229cdf2841d9b01406bcca379a5fb327b9efb53ba620a10452e835"}, - {file = "lxml-5.2.0-cp312-cp312-win32.whl", hash = "sha256:7c61ce3cdd6e6c9f4003ac118be7eb3036d0ce2afdf23929e533e54482780f74"}, - {file = "lxml-5.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:f90c36ca95a44d2636bbf55a51ca30583b59b71b6547b88d954e029598043551"}, - {file = "lxml-5.2.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:1cce2eaad7e38b985b0f91f18468dda0d6b91862d32bec945b0e46e2ffe7222e"}, - {file = "lxml-5.2.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:60a3983d32f722a8422c01e4dc4badc7a307ca55c59e2485d0e14244a52c482f"}, - {file = "lxml-5.2.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60847dfbdfddf08a56c4eefe48234e8c1ab756c7eda4a2a7c1042666a5516564"}, - {file = "lxml-5.2.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bbe335f0d1a86391671d975a1b5e9b08bb72fba6b567c43bdc2e55ca6e6c086"}, - {file = "lxml-5.2.0-cp36-cp36m-manylinux_2_28_aarch64.whl", hash = "sha256:3ac7c8a60b8ad51fe7bca99a634dd625d66492c502fd548dc6dc769ce7d94b6a"}, - {file = "lxml-5.2.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:73e69762cf740ac3ae81137ef9d6f15f93095f50854e233d50b29e7b8a91dbc6"}, - {file = "lxml-5.2.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:281ee1ffeb0ab06204dfcd22a90e9003f0bb2dab04101ad983d0b1773bc10588"}, - {file = "lxml-5.2.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ba3a86b0d5a5c93104cb899dff291e3ae13729c389725a876d00ef9696de5425"}, - {file = "lxml-5.2.0-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:356f8873b1e27b81793e30144229adf70f6d3e36e5cb7b6d289da690f4398953"}, - {file = "lxml-5.2.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:2a34e74ffe92c413f197ff4967fb1611d938ee0691b762d062ef0f73814f3aa4"}, - {file = "lxml-5.2.0-cp36-cp36m-win32.whl", hash = "sha256:6f0d2b97a5a06c00c963d4542793f3e486b1ed3a957f8c19f6006ed39d104bb0"}, - {file = "lxml-5.2.0-cp36-cp36m-win_amd64.whl", hash = "sha256:35e39c6fd089ad6674eb52d93aa874d6027b3ae44d2381cca6e9e4c2e102c9c8"}, - {file = "lxml-5.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5f6e4e5a62114ae76690c4a04c5108d067442d0a41fd092e8abd25af1288c450"}, - {file = "lxml-5.2.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93eede9bcc842f891b2267c7f0984d811940d1bc18472898a1187fe560907a99"}, - {file = "lxml-5.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ad364026c2cebacd7e01d1138bd53639822fefa8f7da90fc38cd0e6319a2699"}, - {file = "lxml-5.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f06e4460e76468d99cc36d5b9bc6fc5f43e6662af44960e13e3f4e040aacb35"}, - {file = "lxml-5.2.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:ca3236f31d565555139d5b00b790ed2a98ac6f0c4470c4032f8b5e5a5dba3c1a"}, - {file = "lxml-5.2.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:a9b67b850ab1d304cb706cf71814b0e0c3875287083d7ec55ee69504a9c48180"}, - {file = "lxml-5.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5261c858c390ae9a19aba96796948b6a2d56649cbd572968970dc8da2b2b2a42"}, - {file = "lxml-5.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e8359fb610c8c444ac473cfd82dae465f405ff807cabb98a9b9712bbd0028751"}, - {file = "lxml-5.2.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:f9e27841cddfaebc4e3ffbe5dbdff42891051acf5befc9f5323944b2c61cef16"}, - {file = "lxml-5.2.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:641a8da145aca67671205f3e89bfec9815138cf2fe06653c909eab42e486d373"}, - {file = "lxml-5.2.0-cp37-cp37m-win32.whl", hash = "sha256:931a3a13e0f574abce8f3152b207938a54304ccf7a6fd7dff1fdb2f6691d08af"}, - {file = "lxml-5.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:246c93e2503c710cf02c7e9869dc0258223cbefe5e8f9ecded0ac0aa07fd2bf8"}, - {file = "lxml-5.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:11acfcdf5a38cf89c48662123a5d02ae0a7d99142c7ee14ad90de5c96a9b6f06"}, - {file = "lxml-5.2.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:200f70b5d95fc79eb9ed7f8c4888eef4e274b9bf380b829d3d52e9ed962e9231"}, - {file = "lxml-5.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba4d02aed47c25be6775a40d55c5774327fdedba79871b7c2485e80e45750cb2"}, - {file = "lxml-5.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e283b24c14361fe9e04026a1d06c924450415491b83089951d469509900d9f32"}, - {file = "lxml-5.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:03e3962d6ad13a862dacd5b3a3ea60b4d092a550f36465234b8639311fd60989"}, - {file = "lxml-5.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:6e45fd5213e5587a610b7e7c8c5319a77591ab21ead42df46bb342e21bc1418d"}, - {file = "lxml-5.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:27877732946843f4b6bfc56eb40d865653eef34ad2edeed16b015d5c29c248df"}, - {file = "lxml-5.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4d16b44ad0dd8c948129639e34c8d301ad87ebc852568ace6fe9a5ad9ce67ee1"}, - {file = "lxml-5.2.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:b8f842df9ba26135c5414e93214e04fe0af259bb4f96a32f756f89467f7f3b45"}, - {file = "lxml-5.2.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c74e77df9e36c8c91157853e6cd400f6f9ca7a803ba89981bfe3f3fc7e5651ef"}, - {file = "lxml-5.2.0-cp38-cp38-win32.whl", hash = "sha256:1459a998c10a99711ac532abe5cc24ba354e4396dafef741c7797f8830712d56"}, - {file = "lxml-5.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:a00f5931b7cccea775123c3c0a2513aee58afdad8728550cc970bff32280bdd2"}, - {file = "lxml-5.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ddda5ba8831f258ac7e6364be03cb27aa62f50c67fd94bc1c3b6247959cc0369"}, - {file = "lxml-5.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:56835b9e9a7767202fae06310c6b67478963e535fe185bed3bf9af5b18d2b67e"}, - {file = "lxml-5.2.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25fef8794f0dc89f01bdd02df6a7fec4bcb2fbbe661d571e898167a83480185e"}, - {file = "lxml-5.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32d44af078485c4da9a7ec460162392d49d996caf89516fa0b75ad0838047122"}, - {file = "lxml-5.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f354d62345acdf22aa3e171bd9723790324a66fafe61bfe3873b86724cf6daaa"}, - {file = "lxml-5.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6a7e0935f05e1cf1a3aa1d49a87505773b04f128660eac2a24a5594ea6b1baa7"}, - {file = "lxml-5.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:75a4117b43694c72a0d89f6c18a28dc57407bde4650927d4ef5fd384bdf6dcc7"}, - {file = "lxml-5.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:57402d6cdd8a897ce21cf8d1ff36683583c17a16322a321184766c89a1980600"}, - {file = "lxml-5.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:56591e477bea531e5e1854f5dfb59309d5708669bc921562a35fd9ca5182bdcd"}, - {file = "lxml-5.2.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7efbce96719aa275d49ad5357886845561328bf07e1d5ab998f4e3066c5ccf15"}, - {file = "lxml-5.2.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a3c39def0965e8fb5c8d50973e0c7b4ce429a2fa730f3f9068a7f4f9ce78410b"}, - {file = "lxml-5.2.0-cp39-cp39-win32.whl", hash = "sha256:5188f22c00381cb44283ecb28c8d85c2db4a3035774dd851876c8647cb809c27"}, - {file = "lxml-5.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:ed1fe80e1fcdd1205a443bddb1ad3c3135bb1cd3f36cc996a1f4aed35960fbe8"}, - {file = "lxml-5.2.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d2b339fb790fc923ae2e9345c8633e3d0064d37ea7920c027f20c8ae6f65a91f"}, - {file = "lxml-5.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06036d60fccb21e22dd167f6d0e422b9cbdf3588a7e999a33799f9cbf01e41a5"}, - {file = "lxml-5.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a1611fb9de0a269c05575c024e6d8cdf2186e3fa52b364e3b03dcad82514d57"}, - {file = "lxml-5.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:05fc3720250d221792b6e0d150afc92d20cb10c9cdaa8c8f93c2a00fbdd16015"}, - {file = "lxml-5.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:11e41ffd3cd27b0ca1c76073b27bd860f96431d9b70f383990f1827ca19f2f52"}, - {file = "lxml-5.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0382e6a3eefa3f6699b14fa77c2eb32af2ada261b75120eaf4fc028a20394975"}, - {file = "lxml-5.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:be5c8e776ecbcf8c1bce71a7d90e3a3680c9ceae516cac0be08b47e9fac0ca43"}, - {file = "lxml-5.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da12b4efc93d53068888cb3b58e355b31839f2428b8f13654bd25d68b201c240"}, - {file = "lxml-5.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f46f8033da364bacc74aca5e319509a20bb711c8a133680ca5f35020f9eaf025"}, - {file = "lxml-5.2.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:50a26f68d090594477df8572babac64575cd5c07373f7a8319c527c8e56c0f99"}, - {file = "lxml-5.2.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:57cbadf028727705086047994d2e50124650e63ce5a035b0aa79ab50f001989f"}, - {file = "lxml-5.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:8aa11638902ac23f944f16ce45c9f04c9d5d57bb2da66822abb721f4efe5fdbb"}, - {file = "lxml-5.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b7150e630b879390e02121e71ceb1807f682b88342e2ea2082e2c8716cf8bd93"}, - {file = "lxml-5.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4add722393c99da4d51c8d9f3e1ddf435b30677f2d9ba9aeaa656f23c1b7b580"}, - {file = "lxml-5.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd0f25a431cd16f70ec1c47c10b413e7ddfe1ccaaddd1a7abd181e507c012374"}, - {file = "lxml-5.2.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:883e382695f346c2ea3ad96bdbdf4ca531788fbeedb4352be3a8fcd169fc387d"}, - {file = "lxml-5.2.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:80cc2b55bb6e35d3cb40936b658837eb131e9f16357241cd9ba106ae1e9c5ecb"}, - {file = "lxml-5.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:59ec2948385336e9901008fdf765780fe30f03e7fdba8090aafdbe5d1b7ea0cd"}, - {file = "lxml-5.2.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ddbea6e58cce1a640d9d65947f1e259423fc201c9cf9761782f355f53b7f3097"}, - {file = "lxml-5.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52d6cdea438eb7282c41c5ac00bd6d47d14bebb6e8a8d2a1c168ed9e0cacfbab"}, - {file = "lxml-5.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c556bbf88a8b667c849d326dd4dd9c6290ede5a33383ffc12b0ed17777f909d"}, - {file = "lxml-5.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:947fa8bf15d1c62c6db36c6ede9389cac54f59af27010251747f05bddc227745"}, - {file = "lxml-5.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e6cb8f7a332eaa2d876b649a748a445a38522e12f2168e5e838d1505a91cdbb7"}, - {file = "lxml-5.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:16e65223f34fd3d65259b174f0f75a4bb3d9893698e5e7d01e54cd8c5eb98d85"}, - {file = "lxml-5.2.0.tar.gz", hash = "sha256:21dc490cdb33047bc7f7ad76384f3366fa8f5146b86cc04c4af45de901393b90"}, + {file = "lxml-5.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1f7785f4f789fdb522729ae465adcaa099e2a3441519df750ebdccc481d961a1"}, + {file = "lxml-5.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6cc6ee342fb7fa2471bd9b6d6fdfc78925a697bf5c2bcd0a302e98b0d35bfad3"}, + {file = "lxml-5.2.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:794f04eec78f1d0e35d9e0c36cbbb22e42d370dda1609fb03bcd7aeb458c6377"}, + {file = "lxml-5.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817d420c60a5183953c783b0547d9eb43b7b344a2c46f69513d5952a78cddf3"}, + {file = "lxml-5.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2213afee476546a7f37c7a9b4ad4d74b1e112a6fafffc9185d6d21f043128c81"}, + {file = "lxml-5.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b070bbe8d3f0f6147689bed981d19bbb33070225373338df755a46893528104a"}, + {file = "lxml-5.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e02c5175f63effbd7c5e590399c118d5db6183bbfe8e0d118bdb5c2d1b48d937"}, + {file = "lxml-5.2.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:3dc773b2861b37b41a6136e0b72a1a44689a9c4c101e0cddb6b854016acc0aa8"}, + {file = "lxml-5.2.1-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:d7520db34088c96cc0e0a3ad51a4fd5b401f279ee112aa2b7f8f976d8582606d"}, + {file = "lxml-5.2.1-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:bcbf4af004f98793a95355980764b3d80d47117678118a44a80b721c9913436a"}, + {file = "lxml-5.2.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a2b44bec7adf3e9305ce6cbfa47a4395667e744097faed97abb4728748ba7d47"}, + {file = "lxml-5.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1c5bb205e9212d0ebddf946bc07e73fa245c864a5f90f341d11ce7b0b854475d"}, + {file = "lxml-5.2.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2c9d147f754b1b0e723e6afb7ba1566ecb162fe4ea657f53d2139bbf894d050a"}, + {file = "lxml-5.2.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:3545039fa4779be2df51d6395e91a810f57122290864918b172d5dc7ca5bb433"}, + {file = "lxml-5.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a91481dbcddf1736c98a80b122afa0f7296eeb80b72344d7f45dc9f781551f56"}, + {file = "lxml-5.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2ddfe41ddc81f29a4c44c8ce239eda5ade4e7fc305fb7311759dd6229a080052"}, + {file = "lxml-5.2.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a7baf9ffc238e4bf401299f50e971a45bfcc10a785522541a6e3179c83eabf0a"}, + {file = "lxml-5.2.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:31e9a882013c2f6bd2f2c974241bf4ba68c85eba943648ce88936d23209a2e01"}, + {file = "lxml-5.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0a15438253b34e6362b2dc41475e7f80de76320f335e70c5528b7148cac253a1"}, + {file = "lxml-5.2.1-cp310-cp310-win32.whl", hash = "sha256:6992030d43b916407c9aa52e9673612ff39a575523c5f4cf72cdef75365709a5"}, + {file = "lxml-5.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:da052e7962ea2d5e5ef5bc0355d55007407087392cf465b7ad84ce5f3e25fe0f"}, + {file = "lxml-5.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:70ac664a48aa64e5e635ae5566f5227f2ab7f66a3990d67566d9907edcbbf867"}, + {file = "lxml-5.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1ae67b4e737cddc96c99461d2f75d218bdf7a0c3d3ad5604d1f5e7464a2f9ffe"}, + {file = "lxml-5.2.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f18a5a84e16886898e51ab4b1d43acb3083c39b14c8caeb3589aabff0ee0b270"}, + {file = "lxml-5.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6f2c8372b98208ce609c9e1d707f6918cc118fea4e2c754c9f0812c04ca116d"}, + {file = "lxml-5.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:394ed3924d7a01b5bd9a0d9d946136e1c2f7b3dc337196d99e61740ed4bc6fe1"}, + {file = "lxml-5.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d077bc40a1fe984e1a9931e801e42959a1e6598edc8a3223b061d30fbd26bbc"}, + {file = "lxml-5.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:764b521b75701f60683500d8621841bec41a65eb739b8466000c6fdbc256c240"}, + {file = "lxml-5.2.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:3a6b45da02336895da82b9d472cd274b22dc27a5cea1d4b793874eead23dd14f"}, + {file = "lxml-5.2.1-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:5ea7b6766ac2dfe4bcac8b8595107665a18ef01f8c8343f00710b85096d1b53a"}, + {file = "lxml-5.2.1-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:e196a4ff48310ba62e53a8e0f97ca2bca83cdd2fe2934d8b5cb0df0a841b193a"}, + {file = "lxml-5.2.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:200e63525948e325d6a13a76ba2911f927ad399ef64f57898cf7c74e69b71095"}, + {file = "lxml-5.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dae0ed02f6b075426accbf6b2863c3d0a7eacc1b41fb40f2251d931e50188dad"}, + {file = "lxml-5.2.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:ab31a88a651039a07a3ae327d68ebdd8bc589b16938c09ef3f32a4b809dc96ef"}, + {file = "lxml-5.2.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:df2e6f546c4df14bc81f9498bbc007fbb87669f1bb707c6138878c46b06f6510"}, + {file = "lxml-5.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5dd1537e7cc06efd81371f5d1a992bd5ab156b2b4f88834ca852de4a8ea523fa"}, + {file = "lxml-5.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9b9ec9c9978b708d488bec36b9e4c94d88fd12ccac3e62134a9d17ddba910ea9"}, + {file = "lxml-5.2.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8e77c69d5892cb5ba71703c4057091e31ccf534bd7f129307a4d084d90d014b8"}, + {file = "lxml-5.2.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a8d5c70e04aac1eda5c829a26d1f75c6e5286c74743133d9f742cda8e53b9c2f"}, + {file = "lxml-5.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c94e75445b00319c1fad60f3c98b09cd63fe1134a8a953dcd48989ef42318534"}, + {file = "lxml-5.2.1-cp311-cp311-win32.whl", hash = "sha256:4951e4f7a5680a2db62f7f4ab2f84617674d36d2d76a729b9a8be4b59b3659be"}, + {file = "lxml-5.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:5c670c0406bdc845b474b680b9a5456c561c65cf366f8db5a60154088c92d102"}, + {file = "lxml-5.2.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:abc25c3cab9ec7fcd299b9bcb3b8d4a1231877e425c650fa1c7576c5107ab851"}, + {file = "lxml-5.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6935bbf153f9a965f1e07c2649c0849d29832487c52bb4a5c5066031d8b44fd5"}, + {file = "lxml-5.2.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d793bebb202a6000390a5390078e945bbb49855c29c7e4d56a85901326c3b5d9"}, + {file = "lxml-5.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afd5562927cdef7c4f5550374acbc117fd4ecc05b5007bdfa57cc5355864e0a4"}, + {file = "lxml-5.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0e7259016bc4345a31af861fdce942b77c99049d6c2107ca07dc2bba2435c1d9"}, + {file = "lxml-5.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:530e7c04f72002d2f334d5257c8a51bf409db0316feee7c87e4385043be136af"}, + {file = "lxml-5.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59689a75ba8d7ffca577aefd017d08d659d86ad4585ccc73e43edbfc7476781a"}, + {file = "lxml-5.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f9737bf36262046213a28e789cc82d82c6ef19c85a0cf05e75c670a33342ac2c"}, + {file = "lxml-5.2.1-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:3a74c4f27167cb95c1d4af1c0b59e88b7f3e0182138db2501c353555f7ec57f4"}, + {file = "lxml-5.2.1-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:68a2610dbe138fa8c5826b3f6d98a7cfc29707b850ddcc3e21910a6fe51f6ca0"}, + {file = "lxml-5.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f0a1bc63a465b6d72569a9bba9f2ef0334c4e03958e043da1920299100bc7c08"}, + {file = "lxml-5.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c2d35a1d047efd68027817b32ab1586c1169e60ca02c65d428ae815b593e65d4"}, + {file = "lxml-5.2.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:79bd05260359170f78b181b59ce871673ed01ba048deef4bf49a36ab3e72e80b"}, + {file = "lxml-5.2.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:865bad62df277c04beed9478fe665b9ef63eb28fe026d5dedcb89b537d2e2ea6"}, + {file = "lxml-5.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:44f6c7caff88d988db017b9b0e4ab04934f11e3e72d478031efc7edcac6c622f"}, + {file = "lxml-5.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:71e97313406ccf55d32cc98a533ee05c61e15d11b99215b237346171c179c0b0"}, + {file = "lxml-5.2.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:057cdc6b86ab732cf361f8b4d8af87cf195a1f6dc5b0ff3de2dced242c2015e0"}, + {file = "lxml-5.2.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f3bbbc998d42f8e561f347e798b85513ba4da324c2b3f9b7969e9c45b10f6169"}, + {file = "lxml-5.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:491755202eb21a5e350dae00c6d9a17247769c64dcf62d8c788b5c135e179dc4"}, + {file = "lxml-5.2.1-cp312-cp312-win32.whl", hash = "sha256:8de8f9d6caa7f25b204fc861718815d41cbcf27ee8f028c89c882a0cf4ae4134"}, + {file = "lxml-5.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:f2a9efc53d5b714b8df2b4b3e992accf8ce5bbdfe544d74d5c6766c9e1146a3a"}, + {file = "lxml-5.2.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:70a9768e1b9d79edca17890175ba915654ee1725975d69ab64813dd785a2bd5c"}, + {file = "lxml-5.2.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c38d7b9a690b090de999835f0443d8aa93ce5f2064035dfc48f27f02b4afc3d0"}, + {file = "lxml-5.2.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5670fb70a828663cc37552a2a85bf2ac38475572b0e9b91283dc09efb52c41d1"}, + {file = "lxml-5.2.1-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:958244ad566c3ffc385f47dddde4145088a0ab893504b54b52c041987a8c1863"}, + {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:2a66bf12fbd4666dd023b6f51223aed3d9f3b40fef06ce404cb75bafd3d89536"}, + {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:9123716666e25b7b71c4e1789ec829ed18663152008b58544d95b008ed9e21e9"}, + {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:0c3f67e2aeda739d1cc0b1102c9a9129f7dc83901226cc24dd72ba275ced4218"}, + {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:5d5792e9b3fb8d16a19f46aa8208987cfeafe082363ee2745ea8b643d9cc5b45"}, + {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:88e22fc0a6684337d25c994381ed8a1580a6f5ebebd5ad41f89f663ff4ec2885"}, + {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_2_ppc64le.whl", hash = "sha256:21c2e6b09565ba5b45ae161b438e033a86ad1736b8c838c766146eff8ceffff9"}, + {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_2_s390x.whl", hash = "sha256:afbbdb120d1e78d2ba8064a68058001b871154cc57787031b645c9142b937a62"}, + {file = "lxml-5.2.1-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:627402ad8dea044dde2eccde4370560a2b750ef894c9578e1d4f8ffd54000461"}, + {file = "lxml-5.2.1-cp36-cp36m-win32.whl", hash = "sha256:e89580a581bf478d8dcb97d9cd011d567768e8bc4095f8557b21c4d4c5fea7d0"}, + {file = "lxml-5.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:59565f10607c244bc4c05c0c5fa0c190c990996e0c719d05deec7030c2aa8289"}, + {file = "lxml-5.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:857500f88b17a6479202ff5fe5f580fc3404922cd02ab3716197adf1ef628029"}, + {file = "lxml-5.2.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56c22432809085b3f3ae04e6e7bdd36883d7258fcd90e53ba7b2e463efc7a6af"}, + {file = "lxml-5.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a55ee573116ba208932e2d1a037cc4b10d2c1cb264ced2184d00b18ce585b2c0"}, + {file = "lxml-5.2.1-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:6cf58416653c5901e12624e4013708b6e11142956e7f35e7a83f1ab02f3fe456"}, + {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:64c2baa7774bc22dd4474248ba16fe1a7f611c13ac6123408694d4cc93d66dbd"}, + {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:74b28c6334cca4dd704e8004cba1955af0b778cf449142e581e404bd211fb619"}, + {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:7221d49259aa1e5a8f00d3d28b1e0b76031655ca74bb287123ef56c3db92f213"}, + {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3dbe858ee582cbb2c6294dc85f55b5f19c918c2597855e950f34b660f1a5ede6"}, + {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:04ab5415bf6c86e0518d57240a96c4d1fcfc3cb370bb2ac2a732b67f579e5a04"}, + {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:6ab833e4735a7e5533711a6ea2df26459b96f9eec36d23f74cafe03631647c41"}, + {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f443cdef978430887ed55112b491f670bba6462cea7a7742ff8f14b7abb98d75"}, + {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:9e2addd2d1866fe112bc6f80117bcc6bc25191c5ed1bfbcf9f1386a884252ae8"}, + {file = "lxml-5.2.1-cp37-cp37m-win32.whl", hash = "sha256:f51969bac61441fd31f028d7b3b45962f3ecebf691a510495e5d2cd8c8092dbd"}, + {file = "lxml-5.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:b0b58fbfa1bf7367dde8a557994e3b1637294be6cf2169810375caf8571a085c"}, + {file = "lxml-5.2.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3e183c6e3298a2ed5af9d7a356ea823bccaab4ec2349dc9ed83999fd289d14d5"}, + {file = "lxml-5.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:804f74efe22b6a227306dd890eecc4f8c59ff25ca35f1f14e7482bbce96ef10b"}, + {file = "lxml-5.2.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:08802f0c56ed150cc6885ae0788a321b73505d2263ee56dad84d200cab11c07a"}, + {file = "lxml-5.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f8c09ed18ecb4ebf23e02b8e7a22a05d6411911e6fabef3a36e4f371f4f2585"}, + {file = "lxml-5.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3d30321949861404323c50aebeb1943461a67cd51d4200ab02babc58bd06a86"}, + {file = "lxml-5.2.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:b560e3aa4b1d49e0e6c847d72665384db35b2f5d45f8e6a5c0072e0283430533"}, + {file = "lxml-5.2.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:058a1308914f20784c9f4674036527e7c04f7be6fb60f5d61353545aa7fcb739"}, + {file = "lxml-5.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:adfb84ca6b87e06bc6b146dc7da7623395db1e31621c4785ad0658c5028b37d7"}, + {file = "lxml-5.2.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:417d14450f06d51f363e41cace6488519038f940676ce9664b34ebf5653433a5"}, + {file = "lxml-5.2.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a2dfe7e2473f9b59496247aad6e23b405ddf2e12ef0765677b0081c02d6c2c0b"}, + {file = "lxml-5.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bf2e2458345d9bffb0d9ec16557d8858c9c88d2d11fed53998512504cd9df49b"}, + {file = "lxml-5.2.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:58278b29cb89f3e43ff3e0c756abbd1518f3ee6adad9e35b51fb101c1c1daaec"}, + {file = "lxml-5.2.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:64641a6068a16201366476731301441ce93457eb8452056f570133a6ceb15fca"}, + {file = "lxml-5.2.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:78bfa756eab503673991bdcf464917ef7845a964903d3302c5f68417ecdc948c"}, + {file = "lxml-5.2.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:11a04306fcba10cd9637e669fd73aa274c1c09ca64af79c041aa820ea992b637"}, + {file = "lxml-5.2.1-cp38-cp38-win32.whl", hash = "sha256:66bc5eb8a323ed9894f8fa0ee6cb3e3fb2403d99aee635078fd19a8bc7a5a5da"}, + {file = "lxml-5.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:9676bfc686fa6a3fa10cd4ae6b76cae8be26eb5ec6811d2a325636c460da1806"}, + {file = "lxml-5.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cf22b41fdae514ee2f1691b6c3cdeae666d8b7fa9434de445f12bbeee0cf48dd"}, + {file = "lxml-5.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ec42088248c596dbd61d4ae8a5b004f97a4d91a9fd286f632e42e60b706718d7"}, + {file = "lxml-5.2.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd53553ddad4a9c2f1f022756ae64abe16da1feb497edf4d9f87f99ec7cf86bd"}, + {file = "lxml-5.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feaa45c0eae424d3e90d78823f3828e7dc42a42f21ed420db98da2c4ecf0a2cb"}, + {file = "lxml-5.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddc678fb4c7e30cf830a2b5a8d869538bc55b28d6c68544d09c7d0d8f17694dc"}, + {file = "lxml-5.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:853e074d4931dbcba7480d4dcab23d5c56bd9607f92825ab80ee2bd916edea53"}, + {file = "lxml-5.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc4691d60512798304acb9207987e7b2b7c44627ea88b9d77489bbe3e6cc3bd4"}, + {file = "lxml-5.2.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:beb72935a941965c52990f3a32d7f07ce869fe21c6af8b34bf6a277b33a345d3"}, + {file = "lxml-5.2.1-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:6588c459c5627fefa30139be4d2e28a2c2a1d0d1c265aad2ba1935a7863a4913"}, + {file = "lxml-5.2.1-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:588008b8497667f1ddca7c99f2f85ce8511f8f7871b4a06ceede68ab62dff64b"}, + {file = "lxml-5.2.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b6787b643356111dfd4032b5bffe26d2f8331556ecb79e15dacb9275da02866e"}, + {file = "lxml-5.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7c17b64b0a6ef4e5affae6a3724010a7a66bda48a62cfe0674dabd46642e8b54"}, + {file = "lxml-5.2.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:27aa20d45c2e0b8cd05da6d4759649170e8dfc4f4e5ef33a34d06f2d79075d57"}, + {file = "lxml-5.2.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:d4f2cc7060dc3646632d7f15fe68e2fa98f58e35dd5666cd525f3b35d3fed7f8"}, + {file = "lxml-5.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff46d772d5f6f73564979cd77a4fffe55c916a05f3cb70e7c9c0590059fb29ef"}, + {file = "lxml-5.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:96323338e6c14e958d775700ec8a88346014a85e5de73ac7967db0367582049b"}, + {file = "lxml-5.2.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:52421b41ac99e9d91934e4d0d0fe7da9f02bfa7536bb4431b4c05c906c8c6919"}, + {file = "lxml-5.2.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:7a7efd5b6d3e30d81ec68ab8a88252d7c7c6f13aaa875009fe3097eb4e30b84c"}, + {file = "lxml-5.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ed777c1e8c99b63037b91f9d73a6aad20fd035d77ac84afcc205225f8f41188"}, + {file = "lxml-5.2.1-cp39-cp39-win32.whl", hash = "sha256:644df54d729ef810dcd0f7732e50e5ad1bd0a135278ed8d6bcb06f33b6b6f708"}, + {file = "lxml-5.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:9ca66b8e90daca431b7ca1408cae085d025326570e57749695d6a01454790e95"}, + {file = "lxml-5.2.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9b0ff53900566bc6325ecde9181d89afadc59c5ffa39bddf084aaedfe3b06a11"}, + {file = "lxml-5.2.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd6037392f2d57793ab98d9e26798f44b8b4da2f2464388588f48ac52c489ea1"}, + {file = "lxml-5.2.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b9c07e7a45bb64e21df4b6aa623cb8ba214dfb47d2027d90eac197329bb5e94"}, + {file = "lxml-5.2.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3249cc2989d9090eeac5467e50e9ec2d40704fea9ab72f36b034ea34ee65ca98"}, + {file = "lxml-5.2.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f42038016852ae51b4088b2862126535cc4fc85802bfe30dea3500fdfaf1864e"}, + {file = "lxml-5.2.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:533658f8fbf056b70e434dff7e7aa611bcacb33e01f75de7f821810e48d1bb66"}, + {file = "lxml-5.2.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:622020d4521e22fb371e15f580d153134bfb68d6a429d1342a25f051ec72df1c"}, + {file = "lxml-5.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efa7b51824aa0ee957ccd5a741c73e6851de55f40d807f08069eb4c5a26b2baa"}, + {file = "lxml-5.2.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c6ad0fbf105f6bcc9300c00010a2ffa44ea6f555df1a2ad95c88f5656104817"}, + {file = "lxml-5.2.1-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e233db59c8f76630c512ab4a4daf5a5986da5c3d5b44b8e9fc742f2a24dbd460"}, + {file = "lxml-5.2.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6a014510830df1475176466b6087fc0c08b47a36714823e58d8b8d7709132a96"}, + {file = "lxml-5.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:d38c8f50ecf57f0463399569aa388b232cf1a2ffb8f0a9a5412d0db57e054860"}, + {file = "lxml-5.2.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5aea8212fb823e006b995c4dda533edcf98a893d941f173f6c9506126188860d"}, + {file = "lxml-5.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff097ae562e637409b429a7ac958a20aab237a0378c42dabaa1e3abf2f896e5f"}, + {file = "lxml-5.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f5d65c39f16717a47c36c756af0fb36144069c4718824b7533f803ecdf91138"}, + {file = "lxml-5.2.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3d0c3dd24bb4605439bf91068598d00c6370684f8de4a67c2992683f6c309d6b"}, + {file = "lxml-5.2.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e32be23d538753a8adb6c85bd539f5fd3b15cb987404327c569dfc5fd8366e85"}, + {file = "lxml-5.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cc518cea79fd1e2f6c90baafa28906d4309d24f3a63e801d855e7424c5b34144"}, + {file = "lxml-5.2.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a0af35bd8ebf84888373630f73f24e86bf016642fb8576fba49d3d6b560b7cbc"}, + {file = "lxml-5.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8aca2e3a72f37bfc7b14ba96d4056244001ddcc18382bd0daa087fd2e68a354"}, + {file = "lxml-5.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ca1e8188b26a819387b29c3895c47a5e618708fe6f787f3b1a471de2c4a94d9"}, + {file = "lxml-5.2.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c8ba129e6d3b0136a0f50345b2cb3db53f6bda5dd8c7f5d83fbccba97fb5dcb5"}, + {file = "lxml-5.2.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e998e304036198b4f6914e6a1e2b6f925208a20e2042563d9734881150c6c246"}, + {file = "lxml-5.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d3be9b2076112e51b323bdf6d5a7f8a798de55fb8d95fcb64bd179460cdc0704"}, + {file = "lxml-5.2.1.tar.gz", hash = "sha256:3f7765e69bbce0906a7c74d5fe46d2c7a7596147318dbc08e4a2431f3060e306"}, ] [package.extras] @@ -999,13 +1042,13 @@ source = ["Cython (>=3.0.10)"] [[package]] name = "markdown" -version = "3.5.2" +version = "3.6" description = "Python implementation of John Gruber's Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "Markdown-3.5.2-py3-none-any.whl", hash = "sha256:d43323865d89fc0cb9b20c75fc8ad313af307cc087e84b657d9eec768eddeadd"}, - {file = "Markdown-3.5.2.tar.gz", hash = "sha256:e1ac7b3dc550ee80e602e71c1d168002f062e49f1b11e26a36264dafd4df2ef8"}, + {file = "Markdown-3.6-py3-none-any.whl", hash = "sha256:48f276f4d8cfb8ce6527c8f79e2ee29708508bf4d40aa410fbc3b4ee832c850f"}, + {file = "Markdown-3.6.tar.gz", hash = "sha256:ed4f41f6daecbeeb96e576ce414c41d2d876daa9a16cb35fa8ed8c2ddfad0224"}, ] [package.dependencies] @@ -1139,13 +1182,13 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp [[package]] name = "mkdocs-autorefs" -version = "1.0.0" +version = "1.0.1" description = "Automatically link across pages in MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_autorefs-1.0.0-py3-none-any.whl", hash = "sha256:2b6d288f0582589d1be7c99ce4470c8e7c5077892014051ff0d4ff574a73dbe8"}, - {file = "mkdocs_autorefs-1.0.0.tar.gz", hash = "sha256:1b20db41cade632b07b7a73dee818977b52e869c9deb438f6f20e2896ff01859"}, + {file = "mkdocs_autorefs-1.0.1-py3-none-any.whl", hash = "sha256:aacdfae1ab197780fb7a2dac92ad8a3d8f7ca8049a9cbe56a4218cd52e8da570"}, + {file = "mkdocs_autorefs-1.0.1.tar.gz", hash = "sha256:f684edf847eced40b570b57846b15f0bf57fb93ac2c510450775dcf16accb971"}, ] [package.dependencies] @@ -1171,13 +1214,13 @@ mkdocs = ">=1.1" [[package]] name = "mkdocs-material" -version = "9.5.16" +version = "9.5.18" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.16-py3-none-any.whl", hash = "sha256:32fce3cd8ecbd5dca6e5887cc0cf5bc78707a36f7d0f6f1bbbe9edaf428b8055"}, - {file = "mkdocs_material-9.5.16.tar.gz", hash = "sha256:8b89b639592660f24657bb058de4aff0060cd0383148f8f51711201730f17503"}, + {file = "mkdocs_material-9.5.18-py3-none-any.whl", hash = "sha256:1e0e27fc9fe239f9064318acf548771a4629d5fd5dfd45444fd80a953fe21eb4"}, + {file = "mkdocs_material-9.5.18.tar.gz", hash = "sha256:a43f470947053fa2405c33995f282d24992c752a50114f23f30da9d8d0c57e62"}, ] [package.dependencies] @@ -1211,13 +1254,13 @@ files = [ [[package]] name = "mkdocstrings" -version = "0.24.1" +version = "0.24.3" description = "Automatic documentation from sources, for MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocstrings-0.24.1-py3-none-any.whl", hash = "sha256:b4206f9a2ca8a648e222d5a0ca1d36ba7dee53c88732818de183b536f9042b5d"}, - {file = "mkdocstrings-0.24.1.tar.gz", hash = "sha256:cc83f9a1c8724fc1be3c2fa071dd73d91ce902ef6a79710249ec8d0ee1064401"}, + {file = "mkdocstrings-0.24.3-py3-none-any.whl", hash = "sha256:5c9cf2a32958cd161d5428699b79c8b0988856b0d4a8c5baf8395fc1bf4087c3"}, + {file = "mkdocstrings-0.24.3.tar.gz", hash = "sha256:f327b234eb8d2551a306735436e157d0a22d45f79963c60a8b585d5f7a94c1d2"}, ] [package.dependencies] @@ -1239,19 +1282,18 @@ python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] [[package]] name = "mkdocstrings-python" -version = "1.9.0" +version = "1.10.0" description = "A Python handler for mkdocstrings." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocstrings_python-1.9.0-py3-none-any.whl", hash = "sha256:fad27d7314b4ec9c0359a187b477fb94c65ef561fdae941dca1b717c59aae96f"}, - {file = "mkdocstrings_python-1.9.0.tar.gz", hash = "sha256:6e1a442367cf75d30cf69774cbb1ad02aebec58bfff26087439df4955efecfde"}, + {file = "mkdocstrings_python-1.10.0-py3-none-any.whl", hash = "sha256:ba833fbd9d178a4b9d5cb2553a4df06e51dc1f51e41559a4d2398c16a6f69ecc"}, + {file = "mkdocstrings_python-1.10.0.tar.gz", hash = "sha256:71678fac657d4d2bb301eed4e4d2d91499c095fd1f8a90fa76422a87a5693828"}, ] [package.dependencies] -griffe = ">=0.37" -markdown = ">=3.3,<3.6" -mkdocstrings = ">=0.20" +griffe = ">=0.44" +mkdocstrings = ">=0.24.2" [[package]] name = "mypy-extensions" @@ -1309,13 +1351,13 @@ dev = ["black", "mypy", "pytest"] [[package]] name = "packaging" -version = "23.2" +version = "24.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, + {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, + {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, ] [[package]] @@ -1472,13 +1514,13 @@ files = [ [[package]] name = "pycparser" -version = "2.21" +version = "2.22" description = "C parser in Python" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.8" files = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, ] [[package]] @@ -1524,18 +1566,18 @@ files = [ [[package]] name = "pydantic" -version = "2.6.4" +version = "2.7.0" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.6.4-py3-none-any.whl", hash = "sha256:cc46fce86607580867bdc3361ad462bab9c222ef042d3da86f2fb333e1d916c5"}, - {file = "pydantic-2.6.4.tar.gz", hash = "sha256:b1704e0847db01817624a6b86766967f552dd9dbf3afba4004409f908dcc84e6"}, + {file = "pydantic-2.7.0-py3-none-any.whl", hash = "sha256:9dee74a271705f14f9a1567671d144a851c675b072736f0a7b2608fd9e495352"}, + {file = "pydantic-2.7.0.tar.gz", hash = "sha256:b5ecdd42262ca2462e2624793551e80911a1e989f462910bb81aef974b4bb383"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.16.3" +pydantic-core = "2.18.1" typing-extensions = ">=4.6.1" [package.extras] @@ -1543,90 +1585,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.16.3" -description = "" +version = "2.18.1" +description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, - {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, - {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"}, - {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"}, - {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"}, - {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"}, - {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"}, - {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"}, - {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, - {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, - {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, - {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, - {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, - {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, + {file = "pydantic_core-2.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ee9cf33e7fe14243f5ca6977658eb7d1042caaa66847daacbd2117adb258b226"}, + {file = "pydantic_core-2.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6b7bbb97d82659ac8b37450c60ff2e9f97e4eb0f8a8a3645a5568b9334b08b50"}, + {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df4249b579e75094f7e9bb4bd28231acf55e308bf686b952f43100a5a0be394c"}, + {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d0491006a6ad20507aec2be72e7831a42efc93193d2402018007ff827dc62926"}, + {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ae80f72bb7a3e397ab37b53a2b49c62cc5496412e71bc4f1277620a7ce3f52b"}, + {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:58aca931bef83217fca7a390e0486ae327c4af9c3e941adb75f8772f8eeb03a1"}, + {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1be91ad664fc9245404a789d60cba1e91c26b1454ba136d2a1bf0c2ac0c0505a"}, + {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:667880321e916a8920ef49f5d50e7983792cf59f3b6079f3c9dac2b88a311d17"}, + {file = "pydantic_core-2.18.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f7054fdc556f5421f01e39cbb767d5ec5c1139ea98c3e5b350e02e62201740c7"}, + {file = "pydantic_core-2.18.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:030e4f9516f9947f38179249778709a460a3adb516bf39b5eb9066fcfe43d0e6"}, + {file = "pydantic_core-2.18.1-cp310-none-win32.whl", hash = "sha256:2e91711e36e229978d92642bfc3546333a9127ecebb3f2761372e096395fc649"}, + {file = "pydantic_core-2.18.1-cp310-none-win_amd64.whl", hash = "sha256:9a29726f91c6cb390b3c2338f0df5cd3e216ad7a938762d11c994bb37552edb0"}, + {file = "pydantic_core-2.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9ece8a49696669d483d206b4474c367852c44815fca23ac4e48b72b339807f80"}, + {file = "pydantic_core-2.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a5d83efc109ceddb99abd2c1316298ced2adb4570410defe766851a804fcd5b"}, + {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7973c381283783cd1043a8c8f61ea5ce7a3a58b0369f0ee0ee975eaf2f2a1b"}, + {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:54c7375c62190a7845091f521add19b0f026bcf6ae674bdb89f296972272e86d"}, + {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd63cec4e26e790b70544ae5cc48d11b515b09e05fdd5eff12e3195f54b8a586"}, + {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:561cf62c8a3498406495cfc49eee086ed2bb186d08bcc65812b75fda42c38294"}, + {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68717c38a68e37af87c4da20e08f3e27d7e4212e99e96c3d875fbf3f4812abfc"}, + {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d5728e93d28a3c63ee513d9ffbac9c5989de8c76e049dbcb5bfe4b923a9739d"}, + {file = "pydantic_core-2.18.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f0f17814c505f07806e22b28856c59ac80cee7dd0fbb152aed273e116378f519"}, + {file = "pydantic_core-2.18.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d816f44a51ba5175394bc6c7879ca0bd2be560b2c9e9f3411ef3a4cbe644c2e9"}, + {file = "pydantic_core-2.18.1-cp311-none-win32.whl", hash = "sha256:09f03dfc0ef8c22622eaa8608caa4a1e189cfb83ce847045eca34f690895eccb"}, + {file = "pydantic_core-2.18.1-cp311-none-win_amd64.whl", hash = "sha256:27f1009dc292f3b7ca77feb3571c537276b9aad5dd4efb471ac88a8bd09024e9"}, + {file = "pydantic_core-2.18.1-cp311-none-win_arm64.whl", hash = "sha256:48dd883db92e92519201f2b01cafa881e5f7125666141a49ffba8b9facc072b0"}, + {file = "pydantic_core-2.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b6b0e4912030c6f28bcb72b9ebe4989d6dc2eebcd2a9cdc35fefc38052dd4fe8"}, + {file = "pydantic_core-2.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3202a429fe825b699c57892d4371c74cc3456d8d71b7f35d6028c96dfecad31"}, + {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3982b0a32d0a88b3907e4b0dc36809fda477f0757c59a505d4e9b455f384b8b"}, + {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25595ac311f20e5324d1941909b0d12933f1fd2171075fcff763e90f43e92a0d"}, + {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14fe73881cf8e4cbdaded8ca0aa671635b597e42447fec7060d0868b52d074e6"}, + {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca976884ce34070799e4dfc6fbd68cb1d181db1eefe4a3a94798ddfb34b8867f"}, + {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684d840d2c9ec5de9cb397fcb3f36d5ebb6fa0d94734f9886032dd796c1ead06"}, + {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:54764c083bbe0264f0f746cefcded6cb08fbbaaf1ad1d78fb8a4c30cff999a90"}, + {file = "pydantic_core-2.18.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:201713f2f462e5c015b343e86e68bd8a530a4f76609b33d8f0ec65d2b921712a"}, + {file = "pydantic_core-2.18.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fd1a9edb9dd9d79fbeac1ea1f9a8dd527a6113b18d2e9bcc0d541d308dae639b"}, + {file = "pydantic_core-2.18.1-cp312-none-win32.whl", hash = "sha256:d5e6b7155b8197b329dc787356cfd2684c9d6a6b1a197f6bbf45f5555a98d411"}, + {file = "pydantic_core-2.18.1-cp312-none-win_amd64.whl", hash = "sha256:9376d83d686ec62e8b19c0ac3bf8d28d8a5981d0df290196fb6ef24d8a26f0d6"}, + {file = "pydantic_core-2.18.1-cp312-none-win_arm64.whl", hash = "sha256:c562b49c96906b4029b5685075fe1ebd3b5cc2601dfa0b9e16c2c09d6cbce048"}, + {file = "pydantic_core-2.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3e352f0191d99fe617371096845070dee295444979efb8f27ad941227de6ad09"}, + {file = "pydantic_core-2.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0295d52b012cbe0d3059b1dba99159c3be55e632aae1999ab74ae2bd86a33d7"}, + {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56823a92075780582d1ffd4489a2e61d56fd3ebb4b40b713d63f96dd92d28144"}, + {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dd3f79e17b56741b5177bcc36307750d50ea0698df6aa82f69c7db32d968c1c2"}, + {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38a5024de321d672a132b1834a66eeb7931959c59964b777e8f32dbe9523f6b1"}, + {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2ce426ee691319d4767748c8e0895cfc56593d725594e415f274059bcf3cb76"}, + {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2adaeea59849ec0939af5c5d476935f2bab4b7f0335b0110f0f069a41024278e"}, + {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9b6431559676a1079eac0f52d6d0721fb8e3c5ba43c37bc537c8c83724031feb"}, + {file = "pydantic_core-2.18.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:85233abb44bc18d16e72dc05bf13848a36f363f83757541f1a97db2f8d58cfd9"}, + {file = "pydantic_core-2.18.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:641a018af4fe48be57a2b3d7a1f0f5dbca07c1d00951d3d7463f0ac9dac66622"}, + {file = "pydantic_core-2.18.1-cp38-none-win32.whl", hash = "sha256:63d7523cd95d2fde0d28dc42968ac731b5bb1e516cc56b93a50ab293f4daeaad"}, + {file = "pydantic_core-2.18.1-cp38-none-win_amd64.whl", hash = "sha256:907a4d7720abfcb1c81619863efd47c8a85d26a257a2dbebdb87c3b847df0278"}, + {file = "pydantic_core-2.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:aad17e462f42ddbef5984d70c40bfc4146c322a2da79715932cd8976317054de"}, + {file = "pydantic_core-2.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:94b9769ba435b598b547c762184bcfc4783d0d4c7771b04a3b45775c3589ca44"}, + {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80e0e57cc704a52fb1b48f16d5b2c8818da087dbee6f98d9bf19546930dc64b5"}, + {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76b86e24039c35280ceee6dce7e62945eb93a5175d43689ba98360ab31eebc4a"}, + {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a05db5013ec0ca4a32cc6433f53faa2a014ec364031408540ba858c2172bb0"}, + {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:250ae39445cb5475e483a36b1061af1bc233de3e9ad0f4f76a71b66231b07f88"}, + {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a32204489259786a923e02990249c65b0f17235073149d0033efcebe80095570"}, + {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6395a4435fa26519fd96fdccb77e9d00ddae9dd6c742309bd0b5610609ad7fb2"}, + {file = "pydantic_core-2.18.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2533ad2883f001efa72f3d0e733fb846710c3af6dcdd544fe5bf14fa5fe2d7db"}, + {file = "pydantic_core-2.18.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b560b72ed4816aee52783c66854d96157fd8175631f01ef58e894cc57c84f0f6"}, + {file = "pydantic_core-2.18.1-cp39-none-win32.whl", hash = "sha256:582cf2cead97c9e382a7f4d3b744cf0ef1a6e815e44d3aa81af3ad98762f5a9b"}, + {file = "pydantic_core-2.18.1-cp39-none-win_amd64.whl", hash = "sha256:ca71d501629d1fa50ea7fa3b08ba884fe10cefc559f5c6c8dfe9036c16e8ae89"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e178e5b66a06ec5bf51668ec0d4ac8cfb2bdcb553b2c207d58148340efd00143"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:72722ce529a76a4637a60be18bd789d8fb871e84472490ed7ddff62d5fed620d"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fe0c1ce5b129455e43f941f7a46f61f3d3861e571f2905d55cdbb8b5c6f5e2c"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4284c621f06a72ce2cb55f74ea3150113d926a6eb78ab38340c08f770eb9b4d"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a0c3e718f4e064efde68092d9d974e39572c14e56726ecfaeebbe6544521f47"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2027493cc44c23b598cfaf200936110433d9caa84e2c6cf487a83999638a96ac"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:76909849d1a6bffa5a07742294f3fa1d357dc917cb1fe7b470afbc3a7579d539"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ee7ccc7fb7e921d767f853b47814c3048c7de536663e82fbc37f5eb0d532224b"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ee2794111c188548a4547eccc73a6a8527fe2af6cf25e1a4ebda2fd01cdd2e60"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a139fe9f298dc097349fb4f28c8b81cc7a202dbfba66af0e14be5cfca4ef7ce5"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d074b07a10c391fc5bbdcb37b2f16f20fcd9e51e10d01652ab298c0d07908ee2"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c69567ddbac186e8c0aadc1f324a60a564cfe25e43ef2ce81bcc4b8c3abffbae"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:baf1c7b78cddb5af00971ad5294a4583188bda1495b13760d9f03c9483bb6203"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2684a94fdfd1b146ff10689c6e4e815f6a01141781c493b97342cdc5b06f4d5d"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:73c1bc8a86a5c9e8721a088df234265317692d0b5cd9e86e975ce3bc3db62a59"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e60defc3c15defb70bb38dd605ff7e0fae5f6c9c7cbfe0ad7868582cb7e844a6"}, + {file = "pydantic_core-2.18.1.tar.gz", hash = "sha256:de9d3e8717560eb05e28739d1b35e4eac2e458553a52a301e51352a7ffc86a35"}, ] [package.dependencies] @@ -1677,17 +1719,17 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] name = "pymdown-extensions" -version = "10.7" +version = "10.8" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "pymdown_extensions-10.7-py3-none-any.whl", hash = "sha256:6ca215bc57bc12bf32b414887a68b810637d039124ed9b2e5bd3325cbb2c050c"}, - {file = "pymdown_extensions-10.7.tar.gz", hash = "sha256:c0d64d5cf62566f59e6b2b690a4095c931107c250a8c8e1351c1de5f6b036deb"}, + {file = "pymdown_extensions-10.8-py3-none-any.whl", hash = "sha256:3539003ff0d5e219ba979d2dc961d18fcad5ac259e66c764482e8347b4c0503c"}, + {file = "pymdown_extensions-10.8.tar.gz", hash = "sha256:91ca336caf414e1e5e0626feca86e145de9f85a3921a7bcbd32890b51738c428"}, ] [package.dependencies] -markdown = ">=3.5" +markdown = ">=3.6" pyyaml = "*" [package.extras] @@ -1852,13 +1894,13 @@ test = ["coverage", "docutils", "testscenarios (>=0.4)", "testtools"] [[package]] name = "python-dateutil" -version = "2.8.2" +version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] [package.dependencies] @@ -1938,106 +1980,222 @@ files = [ [package.dependencies] pyyaml = "*" +[[package]] +name = "pyzmq" +version = "25.1.2" +description = "Python bindings for 0MQ" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pyzmq-25.1.2-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:e624c789359f1a16f83f35e2c705d07663ff2b4d4479bad35621178d8f0f6ea4"}, + {file = "pyzmq-25.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:49151b0efece79f6a79d41a461d78535356136ee70084a1c22532fc6383f4ad0"}, + {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9a5f194cf730f2b24d6af1f833c14c10f41023da46a7f736f48b6d35061e76e"}, + {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:faf79a302f834d9e8304fafdc11d0d042266667ac45209afa57e5efc998e3872"}, + {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f51a7b4ead28d3fca8dda53216314a553b0f7a91ee8fc46a72b402a78c3e43d"}, + {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0ddd6d71d4ef17ba5a87becf7ddf01b371eaba553c603477679ae817a8d84d75"}, + {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:246747b88917e4867e2367b005fc8eefbb4a54b7db363d6c92f89d69abfff4b6"}, + {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:00c48ae2fd81e2a50c3485de1b9d5c7c57cd85dc8ec55683eac16846e57ac979"}, + {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5a68d491fc20762b630e5db2191dd07ff89834086740f70e978bb2ef2668be08"}, + {file = "pyzmq-25.1.2-cp310-cp310-win32.whl", hash = "sha256:09dfe949e83087da88c4a76767df04b22304a682d6154de2c572625c62ad6886"}, + {file = "pyzmq-25.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:fa99973d2ed20417744fca0073390ad65ce225b546febb0580358e36aa90dba6"}, + {file = "pyzmq-25.1.2-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:82544e0e2d0c1811482d37eef297020a040c32e0687c1f6fc23a75b75db8062c"}, + {file = "pyzmq-25.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:01171fc48542348cd1a360a4b6c3e7d8f46cdcf53a8d40f84db6707a6768acc1"}, + {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc69c96735ab501419c432110016329bf0dea8898ce16fab97c6d9106dc0b348"}, + {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3e124e6b1dd3dfbeb695435dff0e383256655bb18082e094a8dd1f6293114642"}, + {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7598d2ba821caa37a0f9d54c25164a4fa351ce019d64d0b44b45540950458840"}, + {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d1299d7e964c13607efd148ca1f07dcbf27c3ab9e125d1d0ae1d580a1682399d"}, + {file = "pyzmq-25.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4e6f689880d5ad87918430957297c975203a082d9a036cc426648fcbedae769b"}, + {file = "pyzmq-25.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cc69949484171cc961e6ecd4a8911b9ce7a0d1f738fcae717177c231bf77437b"}, + {file = "pyzmq-25.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9880078f683466b7f567b8624bfc16cad65077be046b6e8abb53bed4eeb82dd3"}, + {file = "pyzmq-25.1.2-cp311-cp311-win32.whl", hash = "sha256:4e5837af3e5aaa99a091302df5ee001149baff06ad22b722d34e30df5f0d9097"}, + {file = "pyzmq-25.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:25c2dbb97d38b5ac9fd15586e048ec5eb1e38f3d47fe7d92167b0c77bb3584e9"}, + {file = "pyzmq-25.1.2-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:11e70516688190e9c2db14fcf93c04192b02d457b582a1f6190b154691b4c93a"}, + {file = "pyzmq-25.1.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:313c3794d650d1fccaaab2df942af9f2c01d6217c846177cfcbc693c7410839e"}, + {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b3cbba2f47062b85fe0ef9de5b987612140a9ba3a9c6d2543c6dec9f7c2ab27"}, + {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc31baa0c32a2ca660784d5af3b9487e13b61b3032cb01a115fce6588e1bed30"}, + {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02c9087b109070c5ab0b383079fa1b5f797f8d43e9a66c07a4b8b8bdecfd88ee"}, + {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f8429b17cbb746c3e043cb986328da023657e79d5ed258b711c06a70c2ea7537"}, + {file = "pyzmq-25.1.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5074adeacede5f810b7ef39607ee59d94e948b4fd954495bdb072f8c54558181"}, + {file = "pyzmq-25.1.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7ae8f354b895cbd85212da245f1a5ad8159e7840e37d78b476bb4f4c3f32a9fe"}, + {file = "pyzmq-25.1.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b264bf2cc96b5bc43ce0e852be995e400376bd87ceb363822e2cb1964fcdc737"}, + {file = "pyzmq-25.1.2-cp312-cp312-win32.whl", hash = "sha256:02bbc1a87b76e04fd780b45e7f695471ae6de747769e540da909173d50ff8e2d"}, + {file = "pyzmq-25.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:ced111c2e81506abd1dc142e6cd7b68dd53747b3b7ae5edbea4578c5eeff96b7"}, + {file = "pyzmq-25.1.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7b6d09a8962a91151f0976008eb7b29b433a560fde056ec7a3db9ec8f1075438"}, + {file = "pyzmq-25.1.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:967668420f36878a3c9ecb5ab33c9d0ff8d054f9c0233d995a6d25b0e95e1b6b"}, + {file = "pyzmq-25.1.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5edac3f57c7ddaacdb4d40f6ef2f9e299471fc38d112f4bc6d60ab9365445fb0"}, + {file = "pyzmq-25.1.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:0dabfb10ef897f3b7e101cacba1437bd3a5032ee667b7ead32bbcdd1a8422fe7"}, + {file = "pyzmq-25.1.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:2c6441e0398c2baacfe5ba30c937d274cfc2dc5b55e82e3749e333aabffde561"}, + {file = "pyzmq-25.1.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:16b726c1f6c2e7625706549f9dbe9b06004dfbec30dbed4bf50cbdfc73e5b32a"}, + {file = "pyzmq-25.1.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:a86c2dd76ef71a773e70551a07318b8e52379f58dafa7ae1e0a4be78efd1ff16"}, + {file = "pyzmq-25.1.2-cp36-cp36m-win32.whl", hash = "sha256:359f7f74b5d3c65dae137f33eb2bcfa7ad9ebefd1cab85c935f063f1dbb245cc"}, + {file = "pyzmq-25.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:55875492f820d0eb3417b51d96fea549cde77893ae3790fd25491c5754ea2f68"}, + {file = "pyzmq-25.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b8c8a419dfb02e91b453615c69568442e897aaf77561ee0064d789705ff37a92"}, + {file = "pyzmq-25.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8807c87fa893527ae8a524c15fc505d9950d5e856f03dae5921b5e9aa3b8783b"}, + {file = "pyzmq-25.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5e319ed7d6b8f5fad9b76daa0a68497bc6f129858ad956331a5835785761e003"}, + {file = "pyzmq-25.1.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3c53687dde4d9d473c587ae80cc328e5b102b517447456184b485587ebd18b62"}, + {file = "pyzmq-25.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9add2e5b33d2cd765ad96d5eb734a5e795a0755f7fc49aa04f76d7ddda73fd70"}, + {file = "pyzmq-25.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e690145a8c0c273c28d3b89d6fb32c45e0d9605b2293c10e650265bf5c11cfec"}, + {file = "pyzmq-25.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:00a06faa7165634f0cac1abb27e54d7a0b3b44eb9994530b8ec73cf52e15353b"}, + {file = "pyzmq-25.1.2-cp37-cp37m-win32.whl", hash = "sha256:0f97bc2f1f13cb16905a5f3e1fbdf100e712d841482b2237484360f8bc4cb3d7"}, + {file = "pyzmq-25.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6cc0020b74b2e410287e5942e1e10886ff81ac77789eb20bec13f7ae681f0fdd"}, + {file = "pyzmq-25.1.2-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:bef02cfcbded83473bdd86dd8d3729cd82b2e569b75844fb4ea08fee3c26ae41"}, + {file = "pyzmq-25.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e10a4b5a4b1192d74853cc71a5e9fd022594573926c2a3a4802020360aa719d8"}, + {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8c5f80e578427d4695adac6fdf4370c14a2feafdc8cb35549c219b90652536ae"}, + {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5dde6751e857910c1339890f3524de74007958557593b9e7e8c5f01cd919f8a7"}, + {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea1608dd169da230a0ad602d5b1ebd39807ac96cae1845c3ceed39af08a5c6df"}, + {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0f513130c4c361201da9bc69df25a086487250e16b5571ead521b31ff6b02220"}, + {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:019744b99da30330798bb37df33549d59d380c78e516e3bab9c9b84f87a9592f"}, + {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2e2713ef44be5d52dd8b8e2023d706bf66cb22072e97fc71b168e01d25192755"}, + {file = "pyzmq-25.1.2-cp38-cp38-win32.whl", hash = "sha256:07cd61a20a535524906595e09344505a9bd46f1da7a07e504b315d41cd42eb07"}, + {file = "pyzmq-25.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb7e49a17fb8c77d3119d41a4523e432eb0c6932187c37deb6fbb00cc3028088"}, + {file = "pyzmq-25.1.2-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:94504ff66f278ab4b7e03e4cba7e7e400cb73bfa9d3d71f58d8972a8dc67e7a6"}, + {file = "pyzmq-25.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6dd0d50bbf9dca1d0bdea219ae6b40f713a3fb477c06ca3714f208fd69e16fd8"}, + {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:004ff469d21e86f0ef0369717351073e0e577428e514c47c8480770d5e24a565"}, + {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c0b5ca88a8928147b7b1e2dfa09f3b6c256bc1135a1338536cbc9ea13d3b7add"}, + {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c9a79f1d2495b167119d02be7448bfba57fad2a4207c4f68abc0bab4b92925b"}, + {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:518efd91c3d8ac9f9b4f7dd0e2b7b8bf1a4fe82a308009016b07eaa48681af82"}, + {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1ec23bd7b3a893ae676d0e54ad47d18064e6c5ae1fadc2f195143fb27373f7f6"}, + {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db36c27baed588a5a8346b971477b718fdc66cf5b80cbfbd914b4d6d355e44e2"}, + {file = "pyzmq-25.1.2-cp39-cp39-win32.whl", hash = "sha256:39b1067f13aba39d794a24761e385e2eddc26295826530a8c7b6c6c341584289"}, + {file = "pyzmq-25.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:8e9f3fabc445d0ce320ea2c59a75fe3ea591fdbdeebec5db6de530dd4b09412e"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a8c1d566344aee826b74e472e16edae0a02e2a044f14f7c24e123002dcff1c05"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:759cfd391a0996345ba94b6a5110fca9c557ad4166d86a6e81ea526c376a01e8"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c61e346ac34b74028ede1c6b4bcecf649d69b707b3ff9dc0fab453821b04d1e"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cb8fc1f8d69b411b8ec0b5f1ffbcaf14c1db95b6bccea21d83610987435f1a4"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3c00c9b7d1ca8165c610437ca0c92e7b5607b2f9076f4eb4b095c85d6e680a1d"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:df0c7a16ebb94452d2909b9a7b3337940e9a87a824c4fc1c7c36bb4404cb0cde"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:45999e7f7ed5c390f2e87ece7f6c56bf979fb213550229e711e45ecc7d42ccb8"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ac170e9e048b40c605358667aca3d94e98f604a18c44bdb4c102e67070f3ac9b"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1b604734bec94f05f81b360a272fc824334267426ae9905ff32dc2be433ab96"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:a793ac733e3d895d96f865f1806f160696422554e46d30105807fdc9841b9f7d"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0806175f2ae5ad4b835ecd87f5f85583316b69f17e97786f7443baaf54b9bb98"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ef12e259e7bc317c7597d4f6ef59b97b913e162d83b421dd0db3d6410f17a244"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea253b368eb41116011add00f8d5726762320b1bda892f744c91997b65754d73"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b9b1f2ad6498445a941d9a4fee096d387fee436e45cc660e72e768d3d8ee611"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:8b14c75979ce932c53b79976a395cb2a8cd3aaf14aef75e8c2cb55a330b9b49d"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:889370d5174a741a62566c003ee8ddba4b04c3f09a97b8000092b7ca83ec9c49"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a18fff090441a40ffda8a7f4f18f03dc56ae73f148f1832e109f9bffa85df15"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99a6b36f95c98839ad98f8c553d8507644c880cf1e0a57fe5e3a3f3969040882"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4345c9a27f4310afbb9c01750e9461ff33d6fb74cd2456b107525bbeebcb5be3"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3516e0b6224cf6e43e341d56da15fd33bdc37fa0c06af4f029f7d7dfceceabbc"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:146b9b1f29ead41255387fb07be56dc29639262c0f7344f570eecdcd8d683314"}, + {file = "pyzmq-25.1.2.tar.gz", hash = "sha256:93f1aa311e8bb912e34f004cf186407a4e90eec4f0ecc0efd26056bf7eda0226"}, +] + +[package.dependencies] +cffi = {version = "*", markers = "implementation_name == \"pypy\""} + +[[package]] +name = "radixtarget" +version = "1.0.0.15" +description = "Check whether an IP address belongs to a cloud provider" +optional = false +python-versions = "<4.0,>=3.9" +files = [ + {file = "radixtarget-1.0.0.15-py3-none-any.whl", hash = "sha256:4e3f0620bfbc0ef2ff3d71270dd281c0e8428906d260f737f82b573a7b636dd8"}, + {file = "radixtarget-1.0.0.15.tar.gz", hash = "sha256:c8294ebbb76e6d2826deaa8fe18d568308eddfd25f20644e166c492d2626a70c"}, +] + [[package]] name = "regex" -version = "2023.12.25" +version = "2024.4.16" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.7" files = [ - {file = "regex-2023.12.25-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0694219a1d54336fd0445ea382d49d36882415c0134ee1e8332afd1529f0baa5"}, - {file = "regex-2023.12.25-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b014333bd0217ad3d54c143de9d4b9a3ca1c5a29a6d0d554952ea071cff0f1f8"}, - {file = "regex-2023.12.25-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d865984b3f71f6d0af64d0d88f5733521698f6c16f445bb09ce746c92c97c586"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e0eabac536b4cc7f57a5f3d095bfa557860ab912f25965e08fe1545e2ed8b4c"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c25a8ad70e716f96e13a637802813f65d8a6760ef48672aa3502f4c24ea8b400"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9b6d73353f777630626f403b0652055ebfe8ff142a44ec2cf18ae470395766e"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9cc99d6946d750eb75827cb53c4371b8b0fe89c733a94b1573c9dd16ea6c9e4"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88d1f7bef20c721359d8675f7d9f8e414ec5003d8f642fdfd8087777ff7f94b5"}, - {file = "regex-2023.12.25-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cb3fe77aec8f1995611f966d0c656fdce398317f850d0e6e7aebdfe61f40e1cd"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7aa47c2e9ea33a4a2a05f40fcd3ea36d73853a2aae7b4feab6fc85f8bf2c9704"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:df26481f0c7a3f8739fecb3e81bc9da3fcfae34d6c094563b9d4670b047312e1"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c40281f7d70baf6e0db0c2f7472b31609f5bc2748fe7275ea65a0b4601d9b392"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:d94a1db462d5690ebf6ae86d11c5e420042b9898af5dcf278bd97d6bda065423"}, - {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ba1b30765a55acf15dce3f364e4928b80858fa8f979ad41f862358939bdd1f2f"}, - {file = "regex-2023.12.25-cp310-cp310-win32.whl", hash = "sha256:150c39f5b964e4d7dba46a7962a088fbc91f06e606f023ce57bb347a3b2d4630"}, - {file = "regex-2023.12.25-cp310-cp310-win_amd64.whl", hash = "sha256:09da66917262d9481c719599116c7dc0c321ffcec4b1f510c4f8a066f8768105"}, - {file = "regex-2023.12.25-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1b9d811f72210fa9306aeb88385b8f8bcef0dfbf3873410413c00aa94c56c2b6"}, - {file = "regex-2023.12.25-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d902a43085a308cef32c0d3aea962524b725403fd9373dea18110904003bac97"}, - {file = "regex-2023.12.25-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d166eafc19f4718df38887b2bbe1467a4f74a9830e8605089ea7a30dd4da8887"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7ad32824b7f02bb3c9f80306d405a1d9b7bb89362d68b3c5a9be53836caebdb"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:636ba0a77de609d6510235b7f0e77ec494d2657108f777e8765efc060094c98c"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fda75704357805eb953a3ee15a2b240694a9a514548cd49b3c5124b4e2ad01b"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f72cbae7f6b01591f90814250e636065850c5926751af02bb48da94dfced7baa"}, - {file = "regex-2023.12.25-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db2a0b1857f18b11e3b0e54ddfefc96af46b0896fb678c85f63fb8c37518b3e7"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7502534e55c7c36c0978c91ba6f61703faf7ce733715ca48f499d3dbbd7657e0"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e8c7e08bb566de4faaf11984af13f6bcf6a08f327b13631d41d62592681d24fe"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:283fc8eed679758de38fe493b7d7d84a198b558942b03f017b1f94dda8efae80"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:f44dd4d68697559d007462b0a3a1d9acd61d97072b71f6d1968daef26bc744bd"}, - {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:67d3ccfc590e5e7197750fcb3a2915b416a53e2de847a728cfa60141054123d4"}, - {file = "regex-2023.12.25-cp311-cp311-win32.whl", hash = "sha256:68191f80a9bad283432385961d9efe09d783bcd36ed35a60fb1ff3f1ec2efe87"}, - {file = "regex-2023.12.25-cp311-cp311-win_amd64.whl", hash = "sha256:7d2af3f6b8419661a0c421584cfe8aaec1c0e435ce7e47ee2a97e344b98f794f"}, - {file = "regex-2023.12.25-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8a0ccf52bb37d1a700375a6b395bff5dd15c50acb745f7db30415bae3c2b0715"}, - {file = "regex-2023.12.25-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c3c4a78615b7762740531c27cf46e2f388d8d727d0c0c739e72048beb26c8a9d"}, - {file = "regex-2023.12.25-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad83e7545b4ab69216cef4cc47e344d19622e28aabec61574b20257c65466d6a"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7a635871143661feccce3979e1727c4e094f2bdfd3ec4b90dfd4f16f571a87a"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d498eea3f581fbe1b34b59c697512a8baef88212f92e4c7830fcc1499f5b45a5"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:43f7cd5754d02a56ae4ebb91b33461dc67be8e3e0153f593c509e21d219c5060"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51f4b32f793812714fd5307222a7f77e739b9bc566dc94a18126aba3b92b98a3"}, - {file = "regex-2023.12.25-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba99d8077424501b9616b43a2d208095746fb1284fc5ba490139651f971d39d9"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4bfc2b16e3ba8850e0e262467275dd4d62f0d045e0e9eda2bc65078c0110a11f"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8c2c19dae8a3eb0ea45a8448356ed561be843b13cbc34b840922ddf565498c1c"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:60080bb3d8617d96f0fb7e19796384cc2467447ef1c491694850ebd3670bc457"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b77e27b79448e34c2c51c09836033056a0547aa360c45eeeb67803da7b0eedaf"}, - {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:518440c991f514331f4850a63560321f833979d145d7d81186dbe2f19e27ae3d"}, - {file = "regex-2023.12.25-cp312-cp312-win32.whl", hash = "sha256:e2610e9406d3b0073636a3a2e80db05a02f0c3169b5632022b4e81c0364bcda5"}, - {file = "regex-2023.12.25-cp312-cp312-win_amd64.whl", hash = "sha256:cc37b9aeebab425f11f27e5e9e6cf580be7206c6582a64467a14dda211abc232"}, - {file = "regex-2023.12.25-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:da695d75ac97cb1cd725adac136d25ca687da4536154cdc2815f576e4da11c69"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d126361607b33c4eb7b36debc173bf25d7805847346dd4d99b5499e1fef52bc7"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4719bb05094d7d8563a450cf8738d2e1061420f79cfcc1fa7f0a44744c4d8f73"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dd58946bce44b53b06d94aa95560d0b243eb2fe64227cba50017a8d8b3cd3e2"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22a86d9fff2009302c440b9d799ef2fe322416d2d58fc124b926aa89365ec482"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2aae8101919e8aa05ecfe6322b278f41ce2994c4a430303c4cd163fef746e04f"}, - {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e692296c4cc2873967771345a876bcfc1c547e8dd695c6b89342488b0ea55cd8"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:263ef5cc10979837f243950637fffb06e8daed7f1ac1e39d5910fd29929e489a"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:d6f7e255e5fa94642a0724e35406e6cb7001c09d476ab5fce002f652b36d0c39"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:88ad44e220e22b63b0f8f81f007e8abbb92874d8ced66f32571ef8beb0643b2b"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:3a17d3ede18f9cedcbe23d2daa8a2cd6f59fe2bf082c567e43083bba3fb00347"}, - {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d15b274f9e15b1a0b7a45d2ac86d1f634d983ca40d6b886721626c47a400bf39"}, - {file = "regex-2023.12.25-cp37-cp37m-win32.whl", hash = "sha256:ed19b3a05ae0c97dd8f75a5d8f21f7723a8c33bbc555da6bbe1f96c470139d3c"}, - {file = "regex-2023.12.25-cp37-cp37m-win_amd64.whl", hash = "sha256:a6d1047952c0b8104a1d371f88f4ab62e6275567d4458c1e26e9627ad489b445"}, - {file = "regex-2023.12.25-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b43523d7bc2abd757119dbfb38af91b5735eea45537ec6ec3a5ec3f9562a1c53"}, - {file = "regex-2023.12.25-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:efb2d82f33b2212898f1659fb1c2e9ac30493ac41e4d53123da374c3b5541e64"}, - {file = "regex-2023.12.25-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7fca9205b59c1a3d5031f7e64ed627a1074730a51c2a80e97653e3e9fa0d415"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086dd15e9435b393ae06f96ab69ab2d333f5d65cbe65ca5a3ef0ec9564dfe770"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e81469f7d01efed9b53740aedd26085f20d49da65f9c1f41e822a33992cb1590"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:34e4af5b27232f68042aa40a91c3b9bb4da0eeb31b7632e0091afc4310afe6cb"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9852b76ab558e45b20bf1893b59af64a28bd3820b0c2efc80e0a70a4a3ea51c1"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff100b203092af77d1a5a7abe085b3506b7eaaf9abf65b73b7d6905b6cb76988"}, - {file = "regex-2023.12.25-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cc038b2d8b1470364b1888a98fd22d616fba2b6309c5b5f181ad4483e0017861"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:094ba386bb5c01e54e14434d4caabf6583334090865b23ef58e0424a6286d3dc"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5cd05d0f57846d8ba4b71d9c00f6f37d6b97d5e5ef8b3c3840426a475c8f70f4"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:9aa1a67bbf0f957bbe096375887b2505f5d8ae16bf04488e8b0f334c36e31360"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:98a2636994f943b871786c9e82bfe7883ecdaba2ef5df54e1450fa9869d1f756"}, - {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:37f8e93a81fc5e5bd8db7e10e62dc64261bcd88f8d7e6640aaebe9bc180d9ce2"}, - {file = "regex-2023.12.25-cp38-cp38-win32.whl", hash = "sha256:d78bd484930c1da2b9679290a41cdb25cc127d783768a0369d6b449e72f88beb"}, - {file = "regex-2023.12.25-cp38-cp38-win_amd64.whl", hash = "sha256:b521dcecebc5b978b447f0f69b5b7f3840eac454862270406a39837ffae4e697"}, - {file = "regex-2023.12.25-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f7bc09bc9c29ebead055bcba136a67378f03d66bf359e87d0f7c759d6d4ffa31"}, - {file = "regex-2023.12.25-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e14b73607d6231f3cc4622809c196b540a6a44e903bcfad940779c80dffa7be7"}, - {file = "regex-2023.12.25-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9eda5f7a50141291beda3edd00abc2d4a5b16c29c92daf8d5bd76934150f3edc"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc6bb9aa69aacf0f6032c307da718f61a40cf970849e471254e0e91c56ffca95"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:298dc6354d414bc921581be85695d18912bea163a8b23cac9a2562bbcd5088b1"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f4e475a80ecbd15896a976aa0b386c5525d0ed34d5c600b6d3ebac0a67c7ddf"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:531ac6cf22b53e0696f8e1d56ce2396311254eb806111ddd3922c9d937151dae"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22f3470f7524b6da61e2020672df2f3063676aff444db1daa283c2ea4ed259d6"}, - {file = "regex-2023.12.25-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:89723d2112697feaa320c9d351e5f5e7b841e83f8b143dba8e2d2b5f04e10923"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0ecf44ddf9171cd7566ef1768047f6e66975788258b1c6c6ca78098b95cf9a3d"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:905466ad1702ed4acfd67a902af50b8db1feeb9781436372261808df7a2a7bca"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:4558410b7a5607a645e9804a3e9dd509af12fb72b9825b13791a37cd417d73a5"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:7e316026cc1095f2a3e8cc012822c99f413b702eaa2ca5408a513609488cb62f"}, - {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3b1de218d5375cd6ac4b5493e0b9f3df2be331e86520f23382f216c137913d20"}, - {file = "regex-2023.12.25-cp39-cp39-win32.whl", hash = "sha256:11a963f8e25ab5c61348d090bf1b07f1953929c13bd2309a0662e9ff680763c9"}, - {file = "regex-2023.12.25-cp39-cp39-win_amd64.whl", hash = "sha256:e693e233ac92ba83a87024e1d32b5f9ab15ca55ddd916d878146f4e3406b5c91"}, - {file = "regex-2023.12.25.tar.gz", hash = "sha256:29171aa128da69afdf4bde412d5bedc335f2ca8fcfe4489038577d05f16181e5"}, + {file = "regex-2024.4.16-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb83cc090eac63c006871fd24db5e30a1f282faa46328572661c0a24a2323a08"}, + {file = "regex-2024.4.16-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8c91e1763696c0eb66340c4df98623c2d4e77d0746b8f8f2bee2c6883fd1fe18"}, + {file = "regex-2024.4.16-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:10188fe732dec829c7acca7422cdd1bf57d853c7199d5a9e96bb4d40db239c73"}, + {file = "regex-2024.4.16-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:956b58d692f235cfbf5b4f3abd6d99bf102f161ccfe20d2fd0904f51c72c4c66"}, + {file = "regex-2024.4.16-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a70b51f55fd954d1f194271695821dd62054d949efd6368d8be64edd37f55c86"}, + {file = "regex-2024.4.16-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c02fcd2bf45162280613d2e4a1ca3ac558ff921ae4e308ecb307650d3a6ee51"}, + {file = "regex-2024.4.16-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ed75ea6892a56896d78f11006161eea52c45a14994794bcfa1654430984b22"}, + {file = "regex-2024.4.16-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd727ad276bb91928879f3aa6396c9a1d34e5e180dce40578421a691eeb77f47"}, + {file = "regex-2024.4.16-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7cbc5d9e8a1781e7be17da67b92580d6ce4dcef5819c1b1b89f49d9678cc278c"}, + {file = "regex-2024.4.16-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:78fddb22b9ef810b63ef341c9fcf6455232d97cfe03938cbc29e2672c436670e"}, + {file = "regex-2024.4.16-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:445ca8d3c5a01309633a0c9db57150312a181146315693273e35d936472df912"}, + {file = "regex-2024.4.16-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:95399831a206211d6bc40224af1c635cb8790ddd5c7493e0bd03b85711076a53"}, + {file = "regex-2024.4.16-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:7731728b6568fc286d86745f27f07266de49603a6fdc4d19c87e8c247be452af"}, + {file = "regex-2024.4.16-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4facc913e10bdba42ec0aee76d029aedda628161a7ce4116b16680a0413f658a"}, + {file = "regex-2024.4.16-cp310-cp310-win32.whl", hash = "sha256:911742856ce98d879acbea33fcc03c1d8dc1106234c5e7d068932c945db209c0"}, + {file = "regex-2024.4.16-cp310-cp310-win_amd64.whl", hash = "sha256:e0a2df336d1135a0b3a67f3bbf78a75f69562c1199ed9935372b82215cddd6e2"}, + {file = "regex-2024.4.16-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1210365faba7c2150451eb78ec5687871c796b0f1fa701bfd2a4a25420482d26"}, + {file = "regex-2024.4.16-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9ab40412f8cd6f615bfedea40c8bf0407d41bf83b96f6fc9ff34976d6b7037fd"}, + {file = "regex-2024.4.16-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fd80d1280d473500d8086d104962a82d77bfbf2b118053824b7be28cd5a79ea5"}, + {file = "regex-2024.4.16-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bb966fdd9217e53abf824f437a5a2d643a38d4fd5fd0ca711b9da683d452969"}, + {file = "regex-2024.4.16-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:20b7a68444f536365af42a75ccecb7ab41a896a04acf58432db9e206f4e525d6"}, + {file = "regex-2024.4.16-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b74586dd0b039c62416034f811d7ee62810174bb70dffcca6439f5236249eb09"}, + {file = "regex-2024.4.16-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c8290b44d8b0af4e77048646c10c6e3aa583c1ca67f3b5ffb6e06cf0c6f0f89"}, + {file = "regex-2024.4.16-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2d80a6749724b37853ece57988b39c4e79d2b5fe2869a86e8aeae3bbeef9eb0"}, + {file = "regex-2024.4.16-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3a1018e97aeb24e4f939afcd88211ace472ba566efc5bdf53fd8fd7f41fa7170"}, + {file = "regex-2024.4.16-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8d015604ee6204e76569d2f44e5a210728fa917115bef0d102f4107e622b08d5"}, + {file = "regex-2024.4.16-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:3d5ac5234fb5053850d79dd8eb1015cb0d7d9ed951fa37aa9e6249a19aa4f336"}, + {file = "regex-2024.4.16-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:0a38d151e2cdd66d16dab550c22f9521ba79761423b87c01dae0a6e9add79c0d"}, + {file = "regex-2024.4.16-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:159dc4e59a159cb8e4e8f8961eb1fa5d58f93cb1acd1701d8aff38d45e1a84a6"}, + {file = "regex-2024.4.16-cp311-cp311-win32.whl", hash = "sha256:ba2336d6548dee3117520545cfe44dc28a250aa091f8281d28804aa8d707d93d"}, + {file = "regex-2024.4.16-cp311-cp311-win_amd64.whl", hash = "sha256:8f83b6fd3dc3ba94d2b22717f9c8b8512354fd95221ac661784df2769ea9bba9"}, + {file = "regex-2024.4.16-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:80b696e8972b81edf0af2a259e1b2a4a661f818fae22e5fa4fa1a995fb4a40fd"}, + {file = "regex-2024.4.16-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d61ae114d2a2311f61d90c2ef1358518e8f05eafda76eaf9c772a077e0b465ec"}, + {file = "regex-2024.4.16-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8ba6745440b9a27336443b0c285d705ce73adb9ec90e2f2004c64d95ab5a7598"}, + {file = "regex-2024.4.16-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6295004b2dd37b0835ea5c14a33e00e8cfa3c4add4d587b77287825f3418d310"}, + {file = "regex-2024.4.16-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4aba818dcc7263852aabb172ec27b71d2abca02a593b95fa79351b2774eb1d2b"}, + {file = "regex-2024.4.16-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0800631e565c47520aaa04ae38b96abc5196fe8b4aa9bd864445bd2b5848a7a"}, + {file = "regex-2024.4.16-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08dea89f859c3df48a440dbdcd7b7155bc675f2fa2ec8c521d02dc69e877db70"}, + {file = "regex-2024.4.16-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eeaa0b5328b785abc344acc6241cffde50dc394a0644a968add75fcefe15b9d4"}, + {file = "regex-2024.4.16-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4e819a806420bc010489f4e741b3036071aba209f2e0989d4750b08b12a9343f"}, + {file = "regex-2024.4.16-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:c2d0e7cbb6341e830adcbfa2479fdeebbfbb328f11edd6b5675674e7a1e37730"}, + {file = "regex-2024.4.16-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:91797b98f5e34b6a49f54be33f72e2fb658018ae532be2f79f7c63b4ae225145"}, + {file = "regex-2024.4.16-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:d2da13568eff02b30fd54fccd1e042a70fe920d816616fda4bf54ec705668d81"}, + {file = "regex-2024.4.16-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:370c68dc5570b394cbaadff50e64d705f64debed30573e5c313c360689b6aadc"}, + {file = "regex-2024.4.16-cp312-cp312-win32.whl", hash = "sha256:904c883cf10a975b02ab3478bce652f0f5346a2c28d0a8521d97bb23c323cc8b"}, + {file = "regex-2024.4.16-cp312-cp312-win_amd64.whl", hash = "sha256:785c071c982dce54d44ea0b79cd6dfafddeccdd98cfa5f7b86ef69b381b457d9"}, + {file = "regex-2024.4.16-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e2f142b45c6fed48166faeb4303b4b58c9fcd827da63f4cf0a123c3480ae11fb"}, + {file = "regex-2024.4.16-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e87ab229332ceb127a165612d839ab87795972102cb9830e5f12b8c9a5c1b508"}, + {file = "regex-2024.4.16-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:81500ed5af2090b4a9157a59dbc89873a25c33db1bb9a8cf123837dcc9765047"}, + {file = "regex-2024.4.16-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b340cccad138ecb363324aa26893963dcabb02bb25e440ebdf42e30963f1a4e0"}, + {file = "regex-2024.4.16-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c72608e70f053643437bd2be0608f7f1c46d4022e4104d76826f0839199347a"}, + {file = "regex-2024.4.16-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a01fe2305e6232ef3e8f40bfc0f0f3a04def9aab514910fa4203bafbc0bb4682"}, + {file = "regex-2024.4.16-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:03576e3a423d19dda13e55598f0fd507b5d660d42c51b02df4e0d97824fdcae3"}, + {file = "regex-2024.4.16-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:549c3584993772e25f02d0656ac48abdda73169fe347263948cf2b1cead622f3"}, + {file = "regex-2024.4.16-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:34422d5a69a60b7e9a07a690094e824b66f5ddc662a5fc600d65b7c174a05f04"}, + {file = "regex-2024.4.16-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:5f580c651a72b75c39e311343fe6875d6f58cf51c471a97f15a938d9fe4e0d37"}, + {file = "regex-2024.4.16-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:3399dd8a7495bbb2bacd59b84840eef9057826c664472e86c91d675d007137f5"}, + {file = "regex-2024.4.16-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8d1f86f3f4e2388aa3310b50694ac44daefbd1681def26b4519bd050a398dc5a"}, + {file = "regex-2024.4.16-cp37-cp37m-win32.whl", hash = "sha256:dd5acc0a7d38fdc7a3a6fd3ad14c880819008ecb3379626e56b163165162cc46"}, + {file = "regex-2024.4.16-cp37-cp37m-win_amd64.whl", hash = "sha256:ba8122e3bb94ecda29a8de4cf889f600171424ea586847aa92c334772d200331"}, + {file = "regex-2024.4.16-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:743deffdf3b3481da32e8a96887e2aa945ec6685af1cfe2bcc292638c9ba2f48"}, + {file = "regex-2024.4.16-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7571f19f4a3fd00af9341c7801d1ad1967fc9c3f5e62402683047e7166b9f2b4"}, + {file = "regex-2024.4.16-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:df79012ebf6f4efb8d307b1328226aef24ca446b3ff8d0e30202d7ebcb977a8c"}, + {file = "regex-2024.4.16-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e757d475953269fbf4b441207bb7dbdd1c43180711b6208e129b637792ac0b93"}, + {file = "regex-2024.4.16-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4313ab9bf6a81206c8ac28fdfcddc0435299dc88cad12cc6305fd0e78b81f9e4"}, + {file = "regex-2024.4.16-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d83c2bc678453646f1a18f8db1e927a2d3f4935031b9ad8a76e56760461105dd"}, + {file = "regex-2024.4.16-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9df1bfef97db938469ef0a7354b2d591a2d438bc497b2c489471bec0e6baf7c4"}, + {file = "regex-2024.4.16-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62120ed0de69b3649cc68e2965376048793f466c5a6c4370fb27c16c1beac22d"}, + {file = "regex-2024.4.16-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c2ef6f7990b6e8758fe48ad08f7e2f66c8f11dc66e24093304b87cae9037bb4a"}, + {file = "regex-2024.4.16-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8fc6976a3395fe4d1fbeb984adaa8ec652a1e12f36b56ec8c236e5117b585427"}, + {file = "regex-2024.4.16-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:03e68f44340528111067cecf12721c3df4811c67268b897fbe695c95f860ac42"}, + {file = "regex-2024.4.16-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ec7e0043b91115f427998febaa2beb82c82df708168b35ece3accb610b91fac1"}, + {file = "regex-2024.4.16-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c21fc21a4c7480479d12fd8e679b699f744f76bb05f53a1d14182b31f55aac76"}, + {file = "regex-2024.4.16-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:12f6a3f2f58bb7344751919a1876ee1b976fe08b9ffccb4bbea66f26af6017b9"}, + {file = "regex-2024.4.16-cp38-cp38-win32.whl", hash = "sha256:479595a4fbe9ed8f8f72c59717e8cf222da2e4c07b6ae5b65411e6302af9708e"}, + {file = "regex-2024.4.16-cp38-cp38-win_amd64.whl", hash = "sha256:0534b034fba6101611968fae8e856c1698da97ce2efb5c2b895fc8b9e23a5834"}, + {file = "regex-2024.4.16-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a7ccdd1c4a3472a7533b0a7aa9ee34c9a2bef859ba86deec07aff2ad7e0c3b94"}, + {file = "regex-2024.4.16-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6f2f017c5be19984fbbf55f8af6caba25e62c71293213f044da3ada7091a4455"}, + {file = "regex-2024.4.16-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:803b8905b52de78b173d3c1e83df0efb929621e7b7c5766c0843704d5332682f"}, + {file = "regex-2024.4.16-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:684008ec44ad275832a5a152f6e764bbe1914bea10968017b6feaecdad5736e0"}, + {file = "regex-2024.4.16-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65436dce9fdc0aeeb0a0effe0839cb3d6a05f45aa45a4d9f9c60989beca78b9c"}, + {file = "regex-2024.4.16-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea355eb43b11764cf799dda62c658c4d2fdb16af41f59bb1ccfec517b60bcb07"}, + {file = "regex-2024.4.16-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98c1165f3809ce7774f05cb74e5408cd3aa93ee8573ae959a97a53db3ca3180d"}, + {file = "regex-2024.4.16-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cccc79a9be9b64c881f18305a7c715ba199e471a3973faeb7ba84172abb3f317"}, + {file = "regex-2024.4.16-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:00169caa125f35d1bca6045d65a662af0202704489fada95346cfa092ec23f39"}, + {file = "regex-2024.4.16-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6cc38067209354e16c5609b66285af17a2863a47585bcf75285cab33d4c3b8df"}, + {file = "regex-2024.4.16-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:23cff1b267038501b179ccbbd74a821ac4a7192a1852d1d558e562b507d46013"}, + {file = "regex-2024.4.16-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:b9d320b3bf82a39f248769fc7f188e00f93526cc0fe739cfa197868633d44701"}, + {file = "regex-2024.4.16-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:89ec7f2c08937421bbbb8b48c54096fa4f88347946d4747021ad85f1b3021b3c"}, + {file = "regex-2024.4.16-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4918fd5f8b43aa7ec031e0fef1ee02deb80b6afd49c85f0790be1dc4ce34cb50"}, + {file = "regex-2024.4.16-cp39-cp39-win32.whl", hash = "sha256:684e52023aec43bdf0250e843e1fdd6febbe831bd9d52da72333fa201aaa2335"}, + {file = "regex-2024.4.16-cp39-cp39-win_amd64.whl", hash = "sha256:e697e1c0238133589e00c244a8b676bc2cfc3ab4961318d902040d099fec7483"}, + {file = "regex-2024.4.16.tar.gz", hash = "sha256:fa454d26f2e87ad661c4f0c5a5fe4cf6aab1e307d1b94f16ffdfcb089ba685c0"}, ] [[package]] @@ -2069,7 +2227,6 @@ optional = false python-versions = "*" files = [ {file = "requests-file-2.0.0.tar.gz", hash = "sha256:20c5931629c558fda566cacc10cfe2cd502433e628f568c34c80d96a0cc95972"}, - {file = "requests_file-2.0.0-py2.py3-none-any.whl", hash = "sha256:3e493d390adb44aa102ebea827a48717336d5268968c370eaf19abaf5cae13bf"}, ] [package.dependencies] @@ -2077,13 +2234,13 @@ requests = ">=1.0.0" [[package]] name = "resolvelib" -version = "0.8.1" +version = "1.0.1" description = "Resolve abstract dependencies into concrete ones" optional = false python-versions = "*" files = [ - {file = "resolvelib-0.8.1-py2.py3-none-any.whl", hash = "sha256:d9b7907f055c3b3a2cfc56c914ffd940122915826ff5fb5b1de0c99778f4de98"}, - {file = "resolvelib-0.8.1.tar.gz", hash = "sha256:c6ea56732e9fb6fca1b2acc2ccc68a0b6b8c566d8f3e78e0443310ede61dbd37"}, + {file = "resolvelib-1.0.1-py2.py3-none-any.whl", hash = "sha256:d2da45d1a8dfee81bdd591647783e340ef3bcb104b54c383f70d422ef5cc7dbf"}, + {file = "resolvelib-1.0.1.tar.gz", hash = "sha256:04ce76cbd63fded2078ce224785da6ecd42b9564b1390793f64ddecbe997b309"}, ] [package.extras] @@ -2094,18 +2251,18 @@ test = ["commentjson", "packaging", "pytest"] [[package]] name = "setuptools" -version = "69.1.1" +version = "69.5.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.1.1-py3-none-any.whl", hash = "sha256:02fa291a0471b3a18b2b2481ed902af520c69e8ae0919c13da936542754b4c56"}, - {file = "setuptools-69.1.1.tar.gz", hash = "sha256:5c0806c7d9af348e6dd3777b4f4dbb42c7ad85b190104837488eab9a7c945cf8"}, + {file = "setuptools-69.5.1-py3-none-any.whl", hash = "sha256:c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32"}, + {file = "setuptools-69.5.1.tar.gz", hash = "sha256:6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] @@ -2231,13 +2388,24 @@ files = [ [[package]] name = "typing-extensions" -version = "4.10.0" +version = "4.11.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, - {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, + {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, + {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, +] + +[[package]] +name = "unidecode" +version = "1.3.8" +description = "ASCII transliterations of Unicode text" +optional = false +python-versions = ">=3.5" +files = [ + {file = "Unidecode-1.3.8-py3-none-any.whl", hash = "sha256:d130a61ce6696f8148a3bd8fe779c99adeb4b870584eeb9526584e9aa091fd39"}, + {file = "Unidecode-1.3.8.tar.gz", hash = "sha256:cfdb349d46ed3873ece4586b96aa75258726e2fa8ec21d6f00a591d98806c2f4"}, ] [[package]] @@ -2259,13 +2427,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" -version = "20.25.1" +version = "20.25.3" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.25.1-py3-none-any.whl", hash = "sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a"}, - {file = "virtualenv-20.25.1.tar.gz", hash = "sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197"}, + {file = "virtualenv-20.25.3-py3-none-any.whl", hash = "sha256:8aac4332f2ea6ef519c648d0bc48a5b1d324994753519919bddbb1aff25a104e"}, + {file = "virtualenv-20.25.3.tar.gz", hash = "sha256:7bb554bbdfeaacc3349fa614ea5bff6ac300fc7c335e9facf3a3bcfc703f45be"}, ] [package.dependencies] @@ -2274,7 +2442,7 @@ filelock = ">=3.12.2,<4" platformdirs = ">=3.9.1,<5" [package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] [[package]] @@ -2401,13 +2569,13 @@ files = [ [[package]] name = "werkzeug" -version = "3.0.1" +version = "3.0.2" description = "The comprehensive WSGI web application library." optional = false python-versions = ">=3.8" files = [ - {file = "werkzeug-3.0.1-py3-none-any.whl", hash = "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"}, - {file = "werkzeug-3.0.1.tar.gz", hash = "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc"}, + {file = "werkzeug-3.0.2-py3-none-any.whl", hash = "sha256:3aac3f5da756f93030740bc235d3e09449efcf65f2f55e3602e1d851b8f48795"}, + {file = "werkzeug-3.0.2.tar.gz", hash = "sha256:e39b645a6ac92822588e7b39a692e7828724ceae0b0d702ef96701f90e70128d"}, ] [package.dependencies] @@ -2453,20 +2621,20 @@ xmltodict = ">=0.12.0,<0.13.0" [[package]] name = "zipp" -version = "3.17.0" +version = "3.18.1" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"}, - {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"}, + {file = "zipp-3.18.1-py3-none-any.whl", hash = "sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b"}, + {file = "zipp-3.18.1.tar.gz", hash = "sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "4a8c30d18c95c6087fba11cfacac5276adf08d43006b46a11c92d60b67bdbbdd" +content-hash = "ed8bb07e4ff5a5f665402db33f9016409547bef1ccb6a8c2c626c44fde075abb" diff --git a/pyproject.toml b/pyproject.toml index c10d7c559..7ba00c488 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,7 @@ omegaconf = "^2.3.0" psutil = "^5.9.4" wordninja = "^2.0.0" ansible-runner = "^2.3.2" -deepdiff = "^6.2.3" +deepdiff = ">=6.2.3,<8.0.0" xmltojson = "^2.0.2" pycryptodome = "^3.17" idna = "^3.4" @@ -43,10 +43,15 @@ lxml = ">=4.9.2,<6.0.0" dnspython = "^2.4.2" pydantic = "^2.4.2" httpx = "^0.26.0" -cloudcheck = ">=2.1.0.181,<4.0.0.0" tldextract = "^5.1.1" cachetools = "^5.3.2" socksio = "^1.0.0" +jinja2 = "^3.1.3" +pyzmq = "^25.1.2" +regex = "^2024.4.16" +unidecode = "^1.3.8" +radixtarget = "^1.0.0.15" +cloudcheck = "^5.0.0.350" [tool.poetry.group.dev.dependencies] flake8 = ">=6,<8"