diff --git a/README.md b/README.md index 18aa347..d04fbaf 100644 --- a/README.md +++ b/README.md @@ -4,11 +4,11 @@ -## Introduction +## Introduction: BountyDrive is a comprehensive tool designed for penetration testers and cybersecurity researchers. It integrates various modules for performing attacks, reporting, and managing VPN/proxy settings, making it an indispensable asset for any security professional. -## Features +## Features: - **Automation**: Automate the process of finding vulnerabilities. - **Dorking**: Automate Google, GitHub, and Shodan dorking to find vulnerabilities. - **Web Crawling**: Crawl web pages to collect data. @@ -18,11 +18,53 @@ BountyDrive is a comprehensive tool designed for penetration testers and cyberse - **WAF Bypassing**: Techniques to bypass Web Application Firewalls. - **Reporting**: Generate detailed reports of findings. - **VPN/Proxies Management**: Seamlessly switch between different VPN services and proxies to anonymize your activities. -- **pypy3 Support**: Use pypy3 to speed up the execution of the tool. -## Installation +## Python -### Packages +- **Python3** is natively supported: + +```bash +# Dorking process time with 9 threads: + + +# Crawling process time with 9 threads: + + +# XSS process time with 9 threads: + + +``` + +- **pypy3 Support**: Use pypy3 to speed up the execution of the tool: + +```bash +# Dorking process time with 9 threads: + + +# Crawling process time with 9 threads: + + +# XSS process time with 9 threads: + +``` + +- **numba Support**: Use numba to speed up the execution of the tool: + +```bash +# Dorking process time with 9 threads: + + +# Crawling process time with 9 threads: + + +# XSS process time with 9 threads: + + +``` + +## Installation: + +### Packages: ```bash # For reCAPTCHA @@ -30,14 +72,15 @@ sudo apt-get install portaudio19-dev ``` -### Pre-Commit +### Pre-Commit: ```bash python3 -m pip install pre-commit pre-commit installed at .git/hooks/pre-commit +mypy bounty_drive/ ``` -### Classical +### Classical: ```bash sudo apt-get install python3 python3-dev python3-venv @@ -56,7 +99,7 @@ Update `config.ini` Run with `python3 bounty_drive.py` -### PyPy +### PyPy: Not ready - SEGFAULT in some libs (urllib3, cryptography downgraded). @@ -85,7 +128,7 @@ pdate `config.ini` Run with `pypy3 bounty_drive.py` -## Usage +## Usage: ```bash # update configs/config.ini @@ -93,12 +136,12 @@ python3 bountry_drive.py [config_file] pypy3 bountry_drive.py [config_file] ``` -## VPN/Proxies Management +## VPN/Proxies Management: * NordVPN: Switch between NordVPN servers. * Proxies: Use different proxy lists to route your traffic. -## Contributing +## Contributing: We welcome contributions from the community. To contribute: @@ -113,9 +156,16 @@ We welcome contributions from the community. To contribute: * https://github.com/hahwul/dalfox * https://github.com/mandiant/PwnAuth -## TODOs -Also watch module for more specfic TODOs -* add a vulnerable wordpress plugin and then dork to find vulnerable wordpress sites -* use singletons for config !!! -* create class for each attack -* change the color used +## TODOs: + +Also watch module for more specfic TODOs: + +* Implement API/SCAN/SQLi/SSTI +* https://python-hyperscan.readthedocs.io/en/latest/usage/ for regex +* Improving Selenium for WAF bypass and perform attack (check for edge driver seems better) +* Add a vulnerable wordpress plugin and then dork to find vulnerable wordpress sites +* Create class for each attack +* Change the color used +* Implement the login module in website to attacks with Cookie & co. +* Add similar page detector to avoid duplicate crawling +* implement asyncio \ No newline at end of file diff --git a/bounty_drive/attacks/crawl/crawling.py b/bounty_drive/attacks/crawl/crawling.py index 6bd74a2..f06909b 100644 --- a/bounty_drive/attacks/crawl/crawling.py +++ b/bounty_drive/attacks/crawl/crawling.py @@ -1,12 +1,15 @@ +import os import sys import threading import concurrent.futures +import time from urllib.parse import urlparse from termcolor import cprint -import tqdm +from tqdm import tqdm from attacks.xss.xss_striker import photon_crawler from reporting.results_manager import ( + get_links, get_processed_crawled, save_crawling_query, crawling_results, @@ -15,7 +18,20 @@ from scraping.web_scraper import scrape_links_from_url -def launch_crawling_attack(config, website_to_test): +def launch_crawling_attack(config): + start_time = time.time() + website_to_test = get_links(config) + cprint( + "\nCrawling/Webscraping for vulnerable website...\n", + "blue", + file=sys.stderr, + ) + if not website_to_test: + cprint( + "No websites found matching the dorks. Please adjust your search criteria.", + "red", + file=sys.stderr, + ) try: proxies, proxy_cycle = get_proxies_and_cycle(config) @@ -26,7 +42,7 @@ def launch_crawling_attack(config, website_to_test): lock = threading.Lock() # Now, append a proxy to each task - number_of_worker = len(proxies) + number_of_worker = 30 # min(len(proxies)*2, 30) search_tasks_with_proxy = [] for website in website_to_test: proxy = next(proxy_cycle) @@ -59,26 +75,22 @@ def launch_crawling_attack(config, website_to_test): website_to_test = list(set(website_to_test)) elif config["do_crawl"]: + + # TODO add the set outside the thread and add al lock lock = threading.Lock() - number_of_worker = len(proxies) + number_of_worker = 30 # min(len(proxies)*2, 30) search_tasks_with_proxy = [] for website in website_to_test: - cprint( - f"Testing {website} for crawling", color="yellow", file=sys.stderr - ) + cprint(f"Testing {website} for crawling", color="blue", file=sys.stderr) scheme = urlparse(website).scheme cprint( "Target scheme: {}".format(scheme), - color="yellow", + color="blue", file=sys.stderr, ) host = urlparse(website).netloc - - main_url = scheme + "://" + host - - cprint("Target host: {}".format(host), color="yellow", file=sys.stderr) - + cprint("Target host: {}".format(host), color="blue", file=sys.stderr) proxy = next(proxy_cycle) search_tasks_with_proxy.append({"website": website, "proxy": proxy}) @@ -99,58 +111,92 @@ def launch_crawling_attack(config, website_to_test): ): task for task in search_tasks_with_proxy } - for website in tqdm( - concurrent.futures.as_completed(future_to_search), - desc=f"Photon Crawling links DB for xss website", - unit="site", - total=len(future_to_search), - ): - with lock: - crawling_result = website.result() - seedUrl = website["website"] - - cprint( - f"Forms: {crawling_result[0]}", - color="green", - file=sys.stderr, - ) - cprint( - f"DOM URLs: {crawling_result[1]}", - color="green", - file=sys.stderr, - ) - forms_temps = list(set(crawling_result[0])) - - domURLs_temps = list(set(list(crawling_result[1]))) - - difference = abs(len(domURLs) - len(forms)) - - if len(domURLs_temps) > len(forms_temps): - for i in range(difference): - forms_temps.append(0) - elif len(forms_temps) > len(domURLs_temps): - for i in range(difference): - domURLs_temps.append(0) - - result = (seedUrl, forms_temps, domURLs_temps) - - crawling_results.append((result, config)) - - domURLs += domURLs_temps - forms += forms_temps - cprint( - f"Total domURLs links: {len(domURLs)}", - color="green", - file=sys.stderr, - ) - cprint( - f"Total forms links: {len(forms)}", - color="green", - file=sys.stderr, - ) - except KeyboardInterrupt: + try: + for website in tqdm( + concurrent.futures.as_completed(future_to_search), + desc=f"Photon Crawling links DB for xss website", + unit="site", + total=len(future_to_search), + ): + with lock: + crawling_result = website.result() + seedUrl = crawling_result[2] + cprint( + f"Photon crawling finish for {seedUrl}", + color="green", + file=sys.stderr, + ) + + cprint( + f"Forms: {crawling_result[0]}", + color="green", + file=sys.stderr, + ) + cprint( + f"DOM URLs: {crawling_result[1]}", + color="green", + file=sys.stderr, + ) + forms_temps = crawling_result[0] + + domURLs_temps = crawling_result[1] + + difference = abs(len(domURLs) - len(forms)) + + if len(domURLs_temps) > len(forms_temps): + for i in range(difference): + forms_temps.append(0) + elif len(forms_temps) > len(domURLs_temps): + for i in range(difference): + domURLs_temps.append(0) + + result = (seedUrl, forms_temps, domURLs_temps) + + crawling_results.append((result, config)) + + domURLs += domURLs_temps + forms += forms_temps + except KeyboardInterrupt: + cprint( + "Process interrupted by user during crawling attack phase ... Saving results", + "red", + file=sys.stderr, + ) + end_time = time.time() + cprint( + "Total time taken: " + str(end_time - start_time), + "green", + file=sys.stderr, + ) + executor._threads.clear() + concurrent.futures.thread._threads_queues.clear() + # https://stackoverflow.com/questions/49992329/the-workers-in-threadpoolexecutor-is-not-really-daemon + for result, config in crawling_results: + save_crawling_query(result, config) + # TODO with attacks + exit(1) + # except Exception as e: + # exc_type, exc_obj, exc_tb = sys.exc_info() + # fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + # cprint(f"{exc_type}, {fname}, {exc_tb.tb_lineno}", "red", file=sys.stderr) + # cprint(f"Error: {e}", color="red", file=sys.stderr) + finally: + end_time = time.time() + cprint( + "Total time taken: " + str(end_time - start_time), "green", file=sys.stderr + ) cprint( - "Process interrupted by user during crawling attack phase ... Saving results", + f"Total domURLs links: {len(domURLs)}", + color="green", + file=sys.stderr, + ) + cprint( + f"Total forms links: {len(forms)}", + color="green", + file=sys.stderr, + ) + print( + "Ending crawling attack phase ... Saving results", "red", file=sys.stderr, ) @@ -159,6 +205,4 @@ def launch_crawling_attack(config, website_to_test): for result, config in crawling_results: save_crawling_query(result, config) # TODO with attacks - exit(1) - except Exception as e: - cprint(f"Error: {e}", color="red", file=sys.stderr) + # exit(1) diff --git a/bounty_drive/attacks/dorks/search_engine_dorking.py b/bounty_drive/attacks/dorks/search_engine_dorking.py index 55aeff9..65e1f6d 100644 --- a/bounty_drive/attacks/dorks/search_engine_dorking.py +++ b/bounty_drive/attacks/dorks/search_engine_dorking.py @@ -6,6 +6,7 @@ import glob import random import threading +import time import requests from tqdm import tqdm import sys @@ -185,7 +186,6 @@ def execute_search_with_retries( "Accept-Encoding": "gzip,deflate", "Connection": "close", "DNT": "1", - "accept-language": "en-US,en;q=0.9", "cache-control": "max-age=0", "Upgrade-Insecure-Requests": "1", } @@ -410,7 +410,9 @@ def launch_google_dorks_and_search_attack(config, categories): Returns: None """ + start_time = time.time() try: + proxies, proxy_cycle = get_proxies_and_cycle(config) search_tasks = {} @@ -444,7 +446,7 @@ def launch_google_dorks_and_search_attack(config, categories): thread = threading.Thread(target=change_vpn) thread.start() - number_of_worker = min(len(proxies), 30) + number_of_worker = 30 # min(len(proxies)*2, 30) cprint(f"Number of workers: {number_of_worker}", "yellow", file=sys.stderr) search_tasks_with_proxy = [] @@ -465,7 +467,7 @@ def launch_google_dorks_and_search_attack(config, categories): "yellow", file=sys.stderr, ) - + # TODO https://stackoverflow.com/questions/65832061/threadpoolexecutor-keyboardinterrupt with concurrent.futures.ThreadPoolExecutor( max_workers=number_of_worker ) as executor: @@ -481,32 +483,48 @@ def launch_google_dorks_and_search_attack(config, categories): ): task for task in search_tasks_with_proxy } - for future in tqdm( - concurrent.futures.as_completed(future_to_search), - total=len(future_to_search), - desc="Searching for vulnerable website", - unit="site", - ): - future.result() + try: + for future in tqdm( + concurrent.futures.as_completed(future_to_search), + total=len(future_to_search), + desc="Searching for vulnerable website", + unit="site", + ): + future.result() + except KeyboardInterrupt: + cprint( + "Process interrupted by user during google dorking phase ... Saving results", + "red", + file=sys.stderr, + ) + + end_time = time.time() + cprint( + "Total time taken: " + str(end_time - start_time), + "green", + file=sys.stderr, + ) + executor._threads.clear() + concurrent.futures.thread._threads_queues.clear() + # https://stackoverflow.com/questions/49992329/the-workers-in-threadpoolexecutor-is-not-really-daemon + for result, config in google_dorking_results: + save_dorking_query(result, config) + exit() + + end_time = time.time() + cprint( + "Total time taken: " + str(end_time - start_time), "green", file=sys.stderr + ) cprint( f"Saving dorks - Total number of dorks processed: {len(google_dorking_results)}", "green", file=sys.stderr, ) + + # TODO remove duplicate url and merge dorks for result, config in google_dorking_results: save_dorking_query(result, config) - except KeyboardInterrupt: - cprint( - "Process interrupted by user during google dorking phase ... Saving results", - "red", - file=sys.stderr, - ) - # concurrent.futures.thread._threads_queues.clear() - # https://stackoverflow.com/questions/49992329/the-workers-in-threadpoolexecutor-is-not-really-daemon - for result, config in google_dorking_results: - save_dorking_query(result, config) - exit() except Exception as e: cprint(f"Error searching for dorks: {e}", "red", file=sys.stderr) raise e diff --git a/bounty_drive/attacks/sqli/sqli.py b/bounty_drive/attacks/sqli/sqli.py index 9ebb3f3..38a9438 100644 --- a/bounty_drive/attacks/sqli/sqli.py +++ b/bounty_drive/attacks/sqli/sqli.py @@ -100,21 +100,22 @@ def launch_sqli_attack(proxies): """ Test a list of websites for SQL injection vulnerability using multithreading and proxies. """ - VULN_PATHS["sqli"][1] = [] - - # The code snippet provided is written in Python and performs the following tasks: - - proxy_cycle = round_robin_proxies(proxies) - - for website in tqdm( - POTENTIAL_PATHS["sqli"][1], desc="Testing for SQLi", unit="site" - ): - proxy = next(proxy_cycle) - url, result = test_sqli_with_proxy(website, proxy) - if result is True: - cprint(f"{url} ===> Vulnerable!", "green", file=sys.stderr) - VULN_PATHS["sqli"][1].append(url) - elif result is False: - cprint(f"{url} ===> Not Vulnerable", "red", file=sys.stderr) - else: - cprint(f"{url} ===> Can not be Determined", "blue", file=sys.stderr) + pass + # VULN_PATHS["sqli"][1] = [] + + # # The code snippet provided is written in Python and performs the following tasks: + + # proxy_cycle = round_robin_proxies(proxies) + + # for website in tqdm( + # POTENTIAL_PATHS["sqli"][1], desc="Testing for SQLi", unit="site" + # ): + # proxy = next(proxy_cycle) + # url, result = test_sqli_with_proxy(website, proxy) + # if result is True: + # cprint(f"{url} ===> Vulnerable!", "green", file=sys.stderr) + # VULN_PATHS["sqli"][1].append(url) + # elif result is False: + # cprint(f"{url} ===> Not Vulnerable", "red", file=sys.stderr) + # else: + # cprint(f"{url} ===> Can not be Determined", "blue", file=sys.stderr) diff --git a/bounty_drive/attacks/xss/xss.py b/bounty_drive/attacks/xss/xss.py index 4119b30..c63d683 100644 --- a/bounty_drive/attacks/xss/xss.py +++ b/bounty_drive/attacks/xss/xss.py @@ -14,7 +14,11 @@ from attacks.dorks.search_engine_dorking import get_proxies_and_cycle from attacks.xss.xss_striker import attacker_crawler -from reporting.results_manager import get_crawling_results, update_attack_result +from reporting.results_manager import ( + get_crawling_results, + get_xss_links, + update_attack_result, +) from vpn_proxies.proxies_manager import prepare_proxies from bypasser.waf_mitigation import waf_detector from utils.app_config import ( @@ -54,7 +58,6 @@ def test_xss_target(url, proxy, config, dork_id, link_id, attack_id): "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip,deflate", "Connection": "close", - "accept-language": "en-US,en;q=0.9", "cache-control": "max-age=0", "DNT": "1", "Upgrade-Insecure-Requests": "1", @@ -155,26 +158,44 @@ def interceptor(request): # time.sleep(delay) # Wait before retrying -def launch_xss_attack(config, website_to_test): +def launch_xss_attack(config): """ Test a list of websites for XSS vulnerability using multithreading and proxies. """ + website_to_test = get_xss_links(config) + cprint( + "\nTesting websites for XSS vulnerability...\n", + "blue", + file=sys.stderr, + ) + if not website_to_test: + cprint( + "No websites found matching the dorks. Please adjust your search criteria.", + "red", + file=sys.stderr, + ) if len(website_to_test) > 0: try: proxies, proxy_cycle = get_proxies_and_cycle(config) - number_of_worker = len(proxies) + number_of_worker = 30 # min(len(proxies)*2, 30) # TODO: use blind-xss-payload-list.txt # configure a domain for the attacks website = get_crawling_results(config) + cprint( + f"Creating {len(website)} targets for XSS", + color="yellow", + file=sys.stderr, + ) + search_tasks_with_proxy = [] for website, domUrls, forms in website: proxy = next(proxy_cycle) scheme = urlparse(website).scheme host = urlparse(website).netloc main_url = scheme + "://" + host - for form, domURL in zip(forms, domUrls): + for form, domURL in list(zip(forms, domUrls)): search_tasks_with_proxy.append( { "main_url": main_url, @@ -189,7 +210,9 @@ def launch_xss_attack(config, website_to_test): if config["fuzz_xss"]: raise NotImplementedError("Fuzzing is not implemented yet") else: - blindPayloads = "alert(1)" # TODO read from file + blindPayloads = [] + with open("attacks/xss/payloads/blind-xss-payload-list.txt", "r") as f: + blindPayloads = f.readlines() encoding = base64 if config["encode_xss"] else False with concurrent.futures.ThreadPoolExecutor( max_workers=number_of_worker @@ -282,7 +305,7 @@ def launch_xss_attack(config, website_to_test): file=sys.stderr, ) exit(1) - except Exception as e: - cprint(f"Error: {e}", color="red", file=sys.stderr) + # except Exception as e: + # cprint(f"Error: {e}", color="red", file=sys.stderr) else: cprint("No Potential XSS", color="red", file=sys.stderr) diff --git a/bounty_drive/attacks/xss/xss_cve.py b/bounty_drive/attacks/xss/xss_cve.py index e7adb3e..3c5b949 100644 --- a/bounty_drive/attacks/xss/xss_cve.py +++ b/bounty_drive/attacks/xss/xss_cve.py @@ -285,7 +285,6 @@ def retire_js(url, response, config, proxies): "Connection": "close", "DNT": "1", "Upgrade-Insecure-Requests": "1", - "accept-language": "en-US,en;q=0.9", "cache-control": "max-age=0", } cprint( diff --git a/bounty_drive/attacks/xss/xss_striker.py b/bounty_drive/attacks/xss/xss_striker.py index e0b7ee7..fc25ca0 100644 --- a/bounty_drive/attacks/xss/xss_striker.py +++ b/bounty_drive/attacks/xss/xss_striker.py @@ -5,8 +5,10 @@ import re import sys import glob +import threading from urllib.parse import unquote, urlparse import bs4 +import html_similarity from termcolor import cprint from fuzzywuzzy import fuzz from bypasser.waf_mitigation import waf_detector @@ -480,7 +482,12 @@ def d(string): return forms -def photon_crawler(seedUrl, config, proxy, processed_xss_photon_crawl): +processed = set() # urls that have been crawled +processed_content = {} +lock_processed = threading.Lock() + + +def photon_crawler(seedUrl, config, proxy, processed_xss_photon_crawl, threshold=0.9): """Crawls a website to find forms and links for XSS vulnerability testing. # TODO update to crawl also for sqli @@ -494,7 +501,6 @@ def photon_crawler(seedUrl, config, proxy, processed_xss_photon_crawl): """ forms = [] # web forms - processed = set() # urls that have been crawled storage = set() # urls that belong to the target i.e. in-scope schema = urlparse(seedUrl).scheme # extract the scheme e.g. http or https host = urlparse(seedUrl).netloc # extract the host e.g. example.com @@ -508,7 +514,8 @@ def recursive_crawl(target): Args: target (_type_): _description_ """ - processed.add(target) + with lock_processed: + processed.add(target) printableTarget = "/".join(target.split("/")[3:]) if len(printableTarget) > 40: printableTarget = printableTarget[-40:] @@ -530,7 +537,6 @@ def recursive_crawl(target): "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip,deflate", - "accept-language": "en-US,en;q=0.9", "cache-control": "max-age=0", "Connection": "close", "DNT": "1", @@ -563,6 +569,21 @@ def recursive_crawl(target): else: response = "" + with lock_processed: + for p in processed_content: + if processed_content[p] and response: + similarity = html_similarity.structural_similarity( + processed_content[p], response + ) + if similarity > threshold: + cprint( + f"Skipping already processed URL: {target} - similarity ratio: {similarity} with {p}", + "blue", + file=sys.stderr, + ) + return + processed_content[target] = response + retire_js(url, response, config, proxies) if not config["skip_dom"]: @@ -577,8 +598,18 @@ def recursive_crawl(target): color="green", file=sys.stderr, ) - for line in highlighted: - cprint(line, color="green", file=sys.stderr) + with lock_processed: + with open( + os.path.join( + config["experiment_folder"], "xss_dom_vectors.txt" + ), + "a", + ) as file: + file.write("URL: " + url + "\n") + for line in highlighted: + cprint(line, color="green", file=sys.stderr) + file.write(line + "\n") + file.write("\n") forms.append(zetanize(response)) @@ -621,7 +652,8 @@ def recursive_crawl(target): try: for x in range(config["level"]): - urls = storage - processed + with lock_processed: + urls = storage - processed # urls to crawl = all urls - urls that have been crawled if seedUrl in processed_xss_photon_crawl: @@ -631,7 +663,7 @@ def recursive_crawl(target): "yellow", file=sys.stderr, ) - return [forms, processed] + return [forms, checkedDOMs, seedUrl] cprint( "Crawling %s urls for forms and links\r" % len(urls), @@ -657,8 +689,8 @@ def recursive_crawl(target): # for i in concurrent.futures.as_completed(futures): # pass except KeyboardInterrupt: - return [forms, processed] - return [forms, processed] + return [forms, checkedDOMs, seedUrl] + return [forms, checkedDOMs, seedUrl] def html_xss_parser(response, encoding): @@ -835,7 +867,6 @@ def checker(config, proxy, url, params, GET, payload, positions, encoding): "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip,deflate", - "accept-language": "en-US,en;q=0.9", "cache-control": "max-age=0", "Connection": "close", "DNT": "1", @@ -1338,6 +1369,7 @@ def attacker_crawler( proxy (str): The proxy server to use for the attack. """ if form: + cprint(f"Attacking forms: {form}", "green", file=sys.stderr) for each in form.values(): url = each["action"] if url: @@ -1412,11 +1444,13 @@ def attacker_crawler( file=sys.stderr, ) if not occurences: - cprint("No reflection found", "yellow", file=sys.stderr) + cprint( + "No XSS reflection found", "yellow", file=sys.stderr + ) continue else: cprint( - "Reflections found: %i" % len(occurences), + "XSS reflections found: %i" % len(occurences), "green", file=sys.stderr, ) @@ -1440,12 +1474,13 @@ def attacker_crawler( cprint("Generating payloads:", "green", file=sys.stderr) vectors = generator(occurences, response.text) - write_xss_vectors( - vectors, - os.path.join( - config["experiment_folder"], "xss_vectors.txt" - ), - ) + with lock_processed: + write_xss_vectors( + vectors, + os.path.join( + config["experiment_folder"], "xss_vectors.txt" + ), + ) if vectors: for confidence, vects in vectors.items(): try: @@ -1490,6 +1525,11 @@ def attacker_crawler( headers=headers, GET=GET, ) + cprint( + "Response: %s" % response.text, + "green", + file=sys.stderr, + ) # def xss_attack( diff --git a/bounty_drive/bounty_drive.py b/bounty_drive/bounty_drive.py index fa046d5..72c50c3 100755 --- a/bounty_drive/bounty_drive.py +++ b/bounty_drive/bounty_drive.py @@ -160,8 +160,8 @@ def get_user_input(config_file="configs/config.ini"): setup_experiment_folder(config, categories) cprint( - f"-Extension: {config['extension']}\n-Total Output: {config['total_output']}\n-Page No: {config['page_no']}\n-Do Google Dorking: {config['do_dorking_google']}\n-Do Github Dorking {config['do_dorking_github']}\n-Do XSS: {config['do_xss']}\n-Do SQLi: {config['do_sqli']},\n Domain: {config['subdomain']}\n-Use Proxy: {config['use_proxy']}", - "green", + f"-Extension: {config['extension']}\n-Total Output: {config['total_output']}\n-Page No: {config['page_no']}\n-Do Google Dorking: {config['do_dorking_google']}\n-Do Github Dorking {config['do_dorking_github']}\n-Do XSS: {config['do_xss']}\n-Do SQLi: {config['do_sqli']},\n -Domain: {config['subdomain']}\n-Use Proxy: {config['use_proxy']}", + "blue", file=sys.stderr, ) @@ -193,8 +193,8 @@ def get_user_input(config_file="configs/config.ini"): config["total_output"] = current_total_output search_tasks_with_proxy = [] - number_of_worker = min(len(proxies), 30) - cprint(f"Number of workers: {number_of_worker}", "yellow", file=sys.stderr) + number_of_worker = 30 # min(len(proxies)*2, 30) + cprint(f"Number of workers: {number_of_worker}", "blue", file=sys.stderr) with open(config["target_file"], "r") as file: subdomain_list = file.read().splitlines() @@ -226,7 +226,7 @@ def get_user_input(config_file="configs/config.ini"): else: cprint( f"Already initialized Dorking search for based targets {domain} - {category}", - "yellow", + color="cyan", file=sys.stderr, ) @@ -271,7 +271,7 @@ def setup_experiment_folder(config, categories): ) config["experiment_folder"] = folder_name if not os.path.exists(folder_name): - cprint(f"Creating folder {folder_name}", "yellow", file=sys.stderr) + cprint(f"Creating folder {folder_name}", "blue", file=sys.stderr) os.makedirs(folder_name) setup_csv(config, categories, folder_name) @@ -401,85 +401,31 @@ def setup_csv(config, categories, folder_name): exit() if config["do_dorking_google"]: - cprint( - "\nStarting Google dorking scan phase...\n", "yellow", file=sys.stderr - ) + cprint("\nStarting Google dorking scan phase...\n", "blue", file=sys.stderr) launch_google_dorks_and_search_attack(config, categories) if config["do_dorking_github"]: - cprint( - "\nStarting Github dorking scan phase...\n", "yellow", file=sys.stderr - ) + cprint("\nStarting Github dorking scan phase...\n", "blue", file=sys.stderr) raise NotImplementedError("Github dorking scan phase not implemented yet") launch_github_dorks_and_search_attack(config, categories) if config["do_dorking_shodan"]: - cprint( - "\nStarting Shodan dorking scan phase...\n", "yellow", file=sys.stderr - ) + cprint("\nStarting Shodan dorking scan phase...\n", "blue", file=sys.stderr) raise NotImplementedError("Shodan dorking scan phase not implemented yet") launch_shodan_dorks_and_search_attack(config, categories) if config["do_crawl"]: - website_to_test = get_links(config) - cprint( - "\nTesting websites for XSS vulnerability...\n", - "yellow", - file=sys.stderr, - ) - if not website_to_test: - cprint( - "No websites found matching the dorks. Please adjust your search criteria.", - "red", - file=sys.stderr, - ) - launch_crawling_attack(config, website_to_test) + launch_crawling_attack(config) if config["do_xss"]: - website_to_test = get_xss_links(config) - cprint( - "\nTesting websites for XSS vulnerability...\n", - "yellow", - file=sys.stderr, - ) - if not website_to_test: - cprint( - "No websites found matching the dorks. Please adjust your search criteria.", - "red", - file=sys.stderr, - ) - launch_xss_attack(config, website_to_test) + launch_xss_attack(config) if config["do_sqli"]: raise NotImplementedError("SQLi phase not implemented yet") - website_to_test = POTENTIAL_PATHS["sqli"][1] - cprint( - "\nTesting websites for SQL injection vulnerability...\n", - "yellow", - file=sys.stderr, - ) - if not website_to_test: - cprint( - "No websites found matching the dorks. Please adjust your search criteria.", - "red", - file=sys.stderr, - ) launch_sqli_attack(config) if config["do_api"]: raise NotImplementedError("API phase not implemented yet") - website_to_test = POTENTIAL_PATHS["sqli"][1] - cprint( - "\nTesting websites for SQL injection vulnerability...\n", - "yellow", - file=sys.stderr, - ) - if not website_to_test: - cprint( - "No websites found matching the dorks. Please adjust your search criteria.", - "red", - file=sys.stderr, - ) launch_api_attack(config) cprint(banner_terminal_res, "green", file=sys.stderr) @@ -498,8 +444,11 @@ def setup_csv(config, categories, folder_name): # ) # for target in VULN_PATHS["xss"][1]: # cprint(target, "red", file=sys.stderr) - except Exception as e: - cprint(f"Error: {e}", "red", file=sys.stderr) + # except Exception as e: + # exc_type, exc_obj, exc_tb = sys.exc_info() + # fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + # cprint(f"{exc_type}, {fname}, {exc_tb.tb_lineno}", "red", file=sys.stderr) + # cprint(f"Error: {e}", "red", file=sys.stderr) except KeyboardInterrupt: cprint("Exiting...", "red", file=sys.stderr) # TODO save progress diff --git a/bounty_drive/bypasser/waf_mitigation.py b/bounty_drive/bypasser/waf_mitigation.py index 1024b72..0f52c1e 100644 --- a/bounty_drive/bypasser/waf_mitigation.py +++ b/bounty_drive/bypasser/waf_mitigation.py @@ -45,7 +45,6 @@ def waf_detector(proxies, url, config, mode="xss"): "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip,deflate", - "accept-language": "en-US,en;q=0.9", "cache-control": "max-age=0", "Connection": "close", "DNT": "1", diff --git a/bounty_drive/configs/config.ini b/bounty_drive/configs/config.ini index 57a6c08..43ee746 100644 --- a/bounty_drive/configs/config.ini +++ b/bounty_drive/configs/config.ini @@ -2,7 +2,7 @@ extension = subdomain = true do_web_scap = true -target_file = configs/target_pornbox.txt +target_file = configs/target_toolsforhumanity.txt exclusion_file = configs/exclusion_pornbox.txt target_login = [] logging=DEBUG @@ -52,8 +52,8 @@ do_api = false [Proxy] use_proxy = true use_free_proxy_file = false -use_free_proxy = true -use_nordvpn_proxy = false +use_free_proxy = false +use_nordvpn_proxy = true proxies = [None] proxy_mean_delay = 10 proxy_factor = 1 diff --git a/bounty_drive/configs/target_pornbox.txt b/bounty_drive/configs/target_pornbox.txt index 3aba199..4813ab7 100644 --- a/bounty_drive/configs/target_pornbox.txt +++ b/bounty_drive/configs/target_pornbox.txt @@ -1,4 +1,4 @@ -www.pornbox.com -www.analvids.com -www.ddfcontent.com -www.pornworld.com \ No newline at end of file +pornbox.com +analvids.com +ddfcontent.com +pornworld.com \ No newline at end of file diff --git a/bounty_drive/configs/target_toolsforhumanity.txt b/bounty_drive/configs/target_toolsforhumanity.txt index 016c763..016291a 100644 --- a/bounty_drive/configs/target_toolsforhumanity.txt +++ b/bounty_drive/configs/target_toolsforhumanity.txt @@ -5,11 +5,6 @@ getworldcoin.com *.worldcoin-distributors.com bioid-management.app *.worldcoin.dev -https://apps.apple.com/no/app/world-app-worldcoin-wallet/id1560859847 -https://play.google.com/store/apps/details?id=com.worldcoin -https://github.com/worldcoin -https://github.com/worldcoin/world-id-contracts -https://github.com/worldcoin/world-id-state-bridge worldcoin.org *.toolsforhumanity.com support.worldcoin.com \ No newline at end of file diff --git a/bounty_drive/reporting/results_manager.py b/bounty_drive/reporting/results_manager.py index 878d05b..7bd0339 100644 --- a/bounty_drive/reporting/results_manager.py +++ b/bounty_drive/reporting/results_manager.py @@ -1,4 +1,5 @@ import csv +import json import os import sys @@ -261,7 +262,9 @@ def get_crawling_results(settings): with open(settings["crawl_csv"], mode="r", newline="") as file: reader = csv.DictReader(file) for row in reader: - crawling_results.append((row["seedUrl"], row["domURLs"], row["forms"])) + crawling_results.append( + (row["seedUrl"], json.loads(row["doms"]), json.loads(row["forms"])) + ) return crawling_results diff --git a/bounty_drive/scraping/web_scraper.py b/bounty_drive/scraping/web_scraper.py index 7624cd9..6138130 100644 --- a/bounty_drive/scraping/web_scraper.py +++ b/bounty_drive/scraping/web_scraper.py @@ -5,6 +5,7 @@ import sys import tempfile import time +import unicodedata from urllib.parse import urljoin import uuid import requests @@ -168,6 +169,43 @@ def by_pass_captcha(driver): pass +def slugify(value, allow_unicode=False): + """ + Taken from https://github.com/django/django/blob/master/django/utils/text.py + Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated + dashes to single dashes. Remove characters that aren't alphanumerics, + underscores, or hyphens. Convert to lowercase. Also strip leading and + trailing whitespace, dashes, and underscores. + """ + value = str(value) + if allow_unicode: + value = unicodedata.normalize("NFKC", value) + else: + value = ( + unicodedata.normalize("NFKD", value) + .encode("ascii", "ignore") + .decode("ascii") + ) + value = re.sub(r"[^\w\s-]", "", value.lower()) + return re.sub(r"[-\s]+", "-", value).strip("-_") + + +def parse_headers(headers): + headers = headers.replace("\\n", "\n") + sorted_headers = {} + matches = re.findall(r"(.*):\s(.*)", headers) + for match in matches: + header = match[0] + value = match[1] + try: + if value[-1] == ",": + value = value[:-1] + sorted_headers[header] = value + except IndexError: + pass + return sorted_headers + + def bypass_captcha_audio_phase(driver): recognizer = speech_recognition.Recognizer() @@ -286,9 +324,12 @@ def parse_google_search_results(proxies, advanced, full_query, response): file=sys.stderr, ) try: - with open( - f"outputs/html_google_todo/'google_search_{full_query}'.html", "w" - ) as f: + filename = ( + "outputs/html_google_todo/" + + slugify(f"google_search_{full_query}") + + ".html" + ) + with open(filename, "w") as f: f.write(response) except Exception as e: cprint( diff --git a/bounty_drive/vpn_proxies/nordvpn_switcher/nordvpn_switch.py b/bounty_drive/vpn_proxies/nordvpn_switcher/nordvpn_switch.py index e6643ec..fa7aa26 100644 --- a/bounty_drive/vpn_proxies/nordvpn_switcher/nordvpn_switch.py +++ b/bounty_drive/vpn_proxies/nordvpn_switcher/nordvpn_switch.py @@ -91,7 +91,6 @@ def set_headers(user_agent_rotator): "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.3", "Accept-Encoding": "none", - "accept-language": "en-US,en;q=0.9", "cache-control": "max-age=0", "Accept-Language": "en-US,en;q=0.8", "Connection": "keep-alive", diff --git a/requirements.txt b/requirements.txt index a96057b..5a87618 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,4 +12,9 @@ SpeechRecognition==3.8.1 pydub bs4 psutil -random_user_agent \ No newline at end of file +random_user_agent +numba +tbb +types-tqdm +types-psutil +types-beautifulsoup4 \ No newline at end of file diff --git a/requirements_pypy.txt b/requirements_pypy.txt index 304ad4f..d417301 100644 --- a/requirements_pypy.txt +++ b/requirements_pypy.txt @@ -1,6 +1,7 @@ terminal_banner termcolor tqdm +types-tqdm selenium urllib3==1.17 cryptography==38.0.4 @@ -14,4 +15,6 @@ SpeechRecognition==3.8.1 pydub bs4 psutil -random_user_agent \ No newline at end of file +types-beautifulsoup4 +random_user_agent +types-psutil