Search - Results
Suggestions:
- Make sure that all - words are spelled correctly.
- Try - different keywords.
- Try more general - keywords.
- Try fewer - keywords.
diff --git a/.gitignore b/.gitignore index 3a53240..34db760 100644 --- a/.gitignore +++ b/.gitignore @@ -5,8 +5,10 @@ potential*.txt __pycache__/ nordvpn_login.csv nordvpn_login_token.csv -bounty_drive/outputs/reports/*.csv -bounty_drive/outputs/reports/*.zip +bounty_drive/outputs/reports/* bounty_drive/outputs/html_google_todo/*.html pypy3-venv/* -python3-venv/* \ No newline at end of file +python3-venv/* +.VSCodeCounter/* +.mypy_cache/*- +.mypy_cache \ No newline at end of file diff --git a/Makefile b/Makefile deleted file mode 100644 index e18adaa..0000000 --- a/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -# Makefile - -# Variables -VENV := venv -PYTHON := $(VENV)/bin/python3 -PIP := $(VENV)/bin/pip3 - -# Default target -all: setup - -# Setup the virtual environment and install requirements -setup: $(VENV)/bin/activate - $(PYTHON) -m pip install wheel - $(PYTHON) -m pip install -r requirements.txt - -# Create virtual environment -$(VENV)/bin/activate: - python3 -m venv $(VENV) - -# Clean up the environment -clean: - rm -rf $(VENV) - -# Note: Use 'source venv/bin/activate' to activate the virtual environment manually. diff --git a/README.md b/README.md index d04fbaf..8e074d8 100644 --- a/README.md +++ b/README.md @@ -168,4 +168,9 @@ Also watch module for more specfic TODOs: * Change the color used * Implement the login module in website to attacks with Cookie & co. * Add similar page detector to avoid duplicate crawling -* implement asyncio \ No newline at end of file +* implement asyncio +* robot.txt: | http-robots.txt: 69 disallowed entries (15 shown) +| /_admrus/ /_admrusr/ /publicite/www/delivery/ +| /signaler-contenu-illicite.html* /desabo/ /optins/preferences/ /php/ /php/ajax/* +| /feuilletables/ /iframe/ /newsletters/ +|_/jeu-nouveau-rustica-bien-etre/ /concours/ /popunder/ /popup/ \ No newline at end of file diff --git a/bounty_drive/attacks/crawl/crawling.py b/bounty_drive/attacks/crawl/crawling.py index f06909b..f4ffe9f 100644 --- a/bounty_drive/attacks/crawl/crawling.py +++ b/bounty_drive/attacks/crawl/crawling.py @@ -175,11 +175,22 @@ def launch_crawling_attack(config): save_crawling_query(result, config) # TODO with attacks exit(1) - # except Exception as e: - # exc_type, exc_obj, exc_tb = sys.exc_info() - # fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] - # cprint(f"{exc_type}, {fname}, {exc_tb.tb_lineno}", "red", file=sys.stderr) - # cprint(f"Error: {e}", color="red", file=sys.stderr) + except Exception as e: + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + cprint(f"{exc_type}, {fname}, {exc_tb.tb_lineno}", "red", file=sys.stderr) + cprint(f"Error: {e}", color="red", file=sys.stderr) + end_time = time.time() + cprint( + "Total time taken: " + str(end_time - start_time), + "green", + file=sys.stderr, + ) + executor._threads.clear() + concurrent.futures.thread._threads_queues.clear() + # https://stackoverflow.com/questions/49992329/the-workers-in-threadpoolexecutor-is-not-really-daemon + for result, config in crawling_results: + save_crawling_query(result, config) finally: end_time = time.time() cprint( diff --git a/bounty_drive/attacks/cve/regresshion-cve-2024-6387.py b/bounty_drive/attacks/cve/regresshion-cve-2024-6387.py new file mode 100644 index 0000000..e69de29 diff --git a/bounty_drive/attacks/xss/xss.py b/bounty_drive/attacks/xss/xss.py index c63d683..130248d 100644 --- a/bounty_drive/attacks/xss/xss.py +++ b/bounty_drive/attacks/xss/xss.py @@ -13,7 +13,7 @@ from tqdm import tqdm from attacks.dorks.search_engine_dorking import get_proxies_and_cycle -from attacks.xss.xss_striker import attacker_crawler +from attacks.xss.xss_striker import attacker_crawler, base64_encoder from reporting.results_manager import ( get_crawling_results, get_xss_links, @@ -195,6 +195,10 @@ def launch_xss_attack(config): scheme = urlparse(website).scheme host = urlparse(website).netloc main_url = scheme + "://" + host + print(f"Main URL: {main_url}") + print(f"Forms: {forms}") + print(f"DOM URLS: {domUrls}") + print(f"zip(forms, domUrls): {list(zip(forms, domUrls))}") for form, domURL in list(zip(forms, domUrls)): search_tasks_with_proxy.append( { @@ -207,13 +211,20 @@ def launch_xss_attack(config): } ) + cprint( + f"Total XSS Targets: {len(search_tasks_with_proxy)}", + color="yellow", + file=sys.stderr, + ) + if config["fuzz_xss"]: raise NotImplementedError("Fuzzing is not implemented yet") else: blindPayloads = [] with open("attacks/xss/payloads/blind-xss-payload-list.txt", "r") as f: blindPayloads = f.readlines() - encoding = base64 if config["encode_xss"] else False + + encoding = base64_encoder if config["encode_xss"] else False with concurrent.futures.ThreadPoolExecutor( max_workers=number_of_worker ) as executor: diff --git a/bounty_drive/attacks/xss/xss_striker.py b/bounty_drive/attacks/xss/xss_striker.py index fc25ca0..a40a99c 100644 --- a/bounty_drive/attacks/xss/xss_striker.py +++ b/bounty_drive/attacks/xss/xss_striker.py @@ -1,3 +1,4 @@ +import base64 import concurrent.futures import copy import os @@ -328,9 +329,9 @@ def generate_xss_urls(url): def base64_encoder(string): if re.match(r"^[A-Za-z0-9+\/=]+$", string) and (len(string) % 4) == 0: - return bs4.b64decode(string.encode("utf-8")).decode("utf-8") + return base64.b64decode(string.encode("utf-8")).decode("utf-8") else: - return bs4.b64encode(string.encode("utf-8")).decode("utf-8") + return base64.b64encode(string.encode("utf-8")).decode("utf-8") def dom(response): @@ -527,8 +528,9 @@ def recursive_crawl(target): params = get_params(target, "", True) if "=" in target: # if there's a = in the url, there should be GET parameters inps = [] - for name, value in params.items(): - inps.append({"name": name, "value": value}) + if params: + for name, value in params.items(): + inps.append({"name": name, "value": value}) forms.append({0: {"action": url, "method": "get", "inputs": inps}}) headers = { @@ -572,9 +574,17 @@ def recursive_crawl(target): with lock_processed: for p in processed_content: if processed_content[p] and response: - similarity = html_similarity.structural_similarity( - processed_content[p], response - ) + try: + similarity = html_similarity.structural_similarity( + processed_content[p], response + ) + except Exception as e: + cprint( + f"Error while comparing HTML content similarity: {e}", + "red", + file=sys.stderr, + ) + similarity = 0 if similarity > threshold: cprint( f"Skipping already processed URL: {target} - similarity ratio: {similarity} with {p}", @@ -723,7 +733,10 @@ def html_xss_parser(response, encoding): } """ rawResponse = response # raw response returned by requests - response = response.text # response content + if response: + response = response.text # response content + else: + response = "" if encoding: # if the user has specified an encoding, encode the probe in that response = response.replace(encoding(xsschecker), xsschecker) reflections = response.count(xsschecker) @@ -874,18 +887,30 @@ def checker(config, proxy, url, params, GET, payload, positions, encoding): } response = start_request( proxies=proxies, + config=config, base_url=url, params=replace_value(params, xsschecker, checkString, copy.deepcopy), headers=headers, GET=GET, ) - if hasattr(response, "text"): - response = response.text.lower() + if response: + if hasattr(response, "text"): + response = response.text.lower() + else: + response = response.read().decode("utf-8") reflectedPositions = [] - for match in re.finditer("st4r7s", response): - reflectedPositions.append(match.start()) + try: + for match in re.finditer("st4r7s", response): + reflectedPositions.append(match.start()) + except Exception as e: + cprint( + "An error occurred while processing the st4r7s, response - {e}", + color="red", + file=sys.stderr, + ) + pass filledPositions = fill_holes(positions, reflectedPositions) # Itretating over the reflections num = 0 @@ -1399,13 +1424,13 @@ def attacker_crawler( if is_waffed: cprint( "WAF detected: %s%s%s" % (green, is_waffed, end), - "red", + color="red", file=sys.stderr, ) else: cprint( "WAF Status: %sOffline%s" % (green, end), - "green", + color="green", file=sys.stderr, ) @@ -1423,7 +1448,7 @@ def attacker_crawler( # TODO add session proxies = prepare_proxies(proxy, config) cprint( - f"Testing attack for GET - Session (n° 0): {url} \n\t - parameters {paramsCopy} \n\t - headers {headers} \n\t - xss - with proxy {proxies} ...", + f"Testing attack for {'GET' if GET else 'POST'} - Session (n° 0): {url} \n\t - parameters {paramsCopy} \n\t - headers {headers} \n\t - xss - with proxy {proxies} ...", "yellow", file=sys.stderr, ) @@ -1431,6 +1456,7 @@ def attacker_crawler( response = start_request( config=config, proxies=proxies, + data=[paramsCopy], base_url=url, params=paramsCopy, headers=headers, @@ -1512,7 +1538,7 @@ def attacker_crawler( for blindPayload in blindPayloads: paramsCopy[paramName] = blindPayload cprint( - f"Testing attack for GET with blind payload - Session (n° 0): {url} \n\t - parameters {paramsCopy} \n\t - headers {headers} \n\t - xss - with proxy {proxies} ...", + f"Testing blind XSS attack for {'GET' if GET else 'POST'} with blind payload - Session (n° 0): {url} \n\t - parameters {paramsCopy} \n\t - headers {headers} \n\t - xss - with proxy {proxies} ...", "yellow", file=sys.stderr, ) @@ -1525,11 +1551,18 @@ def attacker_crawler( headers=headers, GET=GET, ) - cprint( - "Response: %s" % response.text, - "green", - file=sys.stderr, - ) + if response: + cprint( + "Response blind XSS: %s" % response.text, + "green", + file=sys.stderr, + ) + else: + cprint( + "Response blind XSS: %s" % response, + "green", + file=sys.stderr, + ) # def xss_attack( diff --git a/bounty_drive/bounty_drive.py b/bounty_drive/bounty_drive.py index 72c50c3..18f1908 100755 --- a/bounty_drive/bounty_drive.py +++ b/bounty_drive/bounty_drive.py @@ -47,6 +47,7 @@ import configparser os.system("clear") +csv.field_size_limit(100000000) ######################################################################################### # Main function diff --git a/bounty_drive/bypasser/waf_mitigation.py b/bounty_drive/bypasser/waf_mitigation.py index 0f52c1e..cfda71b 100644 --- a/bounty_drive/bypasser/waf_mitigation.py +++ b/bounty_drive/bypasser/waf_mitigation.py @@ -64,11 +64,19 @@ def waf_detector(proxies, url, config, mode="xss"): config=config, bypassed_403=True, ) - page = response.text + if response: + if hasattr(response, "text"): + page = response.text + else: + page = response.read().decode("utf-8") + else: + cprint(f"Waf Detector: No response {response}", "blue", file=sys.stderr) + return None + code = str(response.status_code) headers = str(response.headers) cprint("Waf Detector code: {}".format(code), "blue", file=sys.stderr) - cprint("Waf Detector headers:", response.headers, "blue", file=sys.stderr) + cprint(f"Waf Detector headers: {response.headers}", "blue", file=sys.stderr) waf_signatures_files = glob.glob("bypasser/waf_signature/*.json", recursive=True) bestMatch = [0, None] diff --git a/bounty_drive/configs/config.ini b/bounty_drive/configs/config.ini index 43ee746..645c591 100644 --- a/bounty_drive/configs/config.ini +++ b/bounty_drive/configs/config.ini @@ -2,7 +2,7 @@ extension = subdomain = true do_web_scap = true -target_file = configs/target_toolsforhumanity.txt +target_file = configs/target_att.txt exclusion_file = configs/exclusion_pornbox.txt target_login = [] logging=DEBUG diff --git a/bounty_drive/configs/target_rustica.txt b/bounty_drive/configs/target_rustica.txt new file mode 100644 index 0000000..5bf7d3a --- /dev/null +++ b/bounty_drive/configs/target_rustica.txt @@ -0,0 +1 @@ +rustica.fr \ No newline at end of file diff --git a/bounty_drive/reporting/results_manager.py b/bounty_drive/reporting/results_manager.py index 7bd0339..b72462f 100644 --- a/bounty_drive/reporting/results_manager.py +++ b/bounty_drive/reporting/results_manager.py @@ -1,6 +1,7 @@ import csv import json import os +import re import sys from termcolor import cprint @@ -258,12 +259,47 @@ def get_crawling_results(settings): """ crawling_results = [] + color_codes = [ + "\033[97m", # white + "\033[92m", # green + "\033[91m", # red + "\033[93m", # yellow + "\033[0m", # end + "\033[7;91m", # back + "\033[93m[!]\033[0m", # info + "\033[94m[?]\033[0m", # que + "\033[91m[-]\033[0m", # bad + "\033[92m[+]\033[0m", # good + "\033[97m[~]\033[0m", # run + ] + if os.path.exists(settings["crawl_csv"]): with open(settings["crawl_csv"], mode="r", newline="") as file: reader = csv.DictReader(file) for row in reader: + cprint( + f"Getting {row} to experiment list...", + "blue", + file=sys.stderr, + ) + dom_parsed = re.sub(r"\\u001b\[93m|\\u001b\[0m", "", str(row["doms"])) + dom_parsed = dom_parsed.replace("\\\\\\\\", "\\\\") + + for color in color_codes: + dom_parsed = dom_parsed.replace(color, "") + + forms_parsed = str(row["forms"]).strip("'<>() ").replace("'", '"') + cprint( + f"Getting {dom_parsed} & {forms_parsed} to experiment list under category crawl DOM", + "blue", + file=sys.stderr, + ) crawling_results.append( - (row["seedUrl"], json.loads(row["doms"]), json.loads(row["forms"])) + ( + row["seedUrl"], + [] if dom_parsed == "no" else json.loads(dom_parsed), + [] if forms_parsed == "no" else json.loads(forms_parsed), + ) ) return crawling_results @@ -289,8 +325,8 @@ def save_crawling_query(result, settings): crawl_id, seedUrl, "yes", - domURLs_temps, - forms_temps, + json.dumps(domURLs_temps), + json.dumps(forms_temps), ] # Success and payload columns are initially empty writer.writerow(row) cprint( diff --git a/bounty_drive/requester/request_manager.py b/bounty_drive/requester/request_manager.py index f332718..90b4a2f 100644 --- a/bounty_drive/requester/request_manager.py +++ b/bounty_drive/requester/request_manager.py @@ -43,7 +43,7 @@ def headers_403_bypass(): "X-Forwarded-Port": "80", "X-Forwarded-Port": "8080", "X-Forwarded-Port": "8443", - "X-Forwarded-Scheme": ["http", "https"], + "X-Forwarded-Scheme": random.choice(["http", "https"]), "X-Forwarded-Server": "127.0.0.1", "X-Forwarded": "127.0.0.1", "X-Forwarder-For": "127.0.0.1", @@ -356,7 +356,7 @@ def start_request( # Parse Google response if response.status_code != 200: cprint( - f"Error in request ... - status code = {response.status_code}", + f"Error in {'GET' if GET else 'POST'} request {base_url} with params - {params}/data - {data} ... - status code = {response.status_code}", color="red", file=sys.stderr, ) @@ -401,7 +401,7 @@ def start_request( time.sleep(config["waf_delay"]) else: cprint( - f"Error in request ... - status code = {response.status_code}", + f"Error in {'GET' if GET else 'POST'} request {base_url} with params - {params}/data - {data} ... - status code = {response.status_code}", color="red", file=sys.stderr, ) diff --git a/test.html b/test.html deleted file mode 100644 index 0bd0121..0000000 --- a/test.html +++ /dev/null @@ -1,318 +0,0 @@ -
Suggestions: