Skip to content

Commit

Permalink
fix bugs in xss module
Browse files Browse the repository at this point in the history
  • Loading branch information
ElNiak committed Jul 7, 2024
1 parent 1a02f51 commit 8981eb8
Show file tree
Hide file tree
Showing 14 changed files with 149 additions and 383 deletions.
8 changes: 5 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@ potential*.txt
__pycache__/
nordvpn_login.csv
nordvpn_login_token.csv
bounty_drive/outputs/reports/*.csv
bounty_drive/outputs/reports/*.zip
bounty_drive/outputs/reports/*
bounty_drive/outputs/html_google_todo/*.html
pypy3-venv/*
python3-venv/*
python3-venv/*
.VSCodeCounter/*
.mypy_cache/*-
.mypy_cache
24 changes: 0 additions & 24 deletions Makefile

This file was deleted.

7 changes: 6 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -168,4 +168,9 @@ Also watch module for more specfic TODOs:
* Change the color used
* Implement the login module in website to attacks with Cookie & co.
* Add similar page detector to avoid duplicate crawling
* implement asyncio
* implement asyncio
* robot.txt: | http-robots.txt: 69 disallowed entries (15 shown)
| /_admrus/ /_admrusr/ /publicite/www/delivery/
| /signaler-contenu-illicite.html* /desabo/ /optins/preferences/ /php/ /php/ajax/*
| /feuilletables/ /iframe/ /newsletters/
|_/jeu-nouveau-rustica-bien-etre/ /concours/ /popunder/ /popup/
21 changes: 16 additions & 5 deletions bounty_drive/attacks/crawl/crawling.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,11 +175,22 @@ def launch_crawling_attack(config):
save_crawling_query(result, config)
# TODO with attacks
exit(1)
# except Exception as e:
# exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
# cprint(f"{exc_type}, {fname}, {exc_tb.tb_lineno}", "red", file=sys.stderr)
# cprint(f"Error: {e}", color="red", file=sys.stderr)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
cprint(f"{exc_type}, {fname}, {exc_tb.tb_lineno}", "red", file=sys.stderr)
cprint(f"Error: {e}", color="red", file=sys.stderr)
end_time = time.time()
cprint(
"Total time taken: " + str(end_time - start_time),
"green",
file=sys.stderr,
)
executor._threads.clear()
concurrent.futures.thread._threads_queues.clear()
# https://stackoverflow.com/questions/49992329/the-workers-in-threadpoolexecutor-is-not-really-daemon
for result, config in crawling_results:
save_crawling_query(result, config)
finally:
end_time = time.time()
cprint(
Expand Down
Empty file.
15 changes: 13 additions & 2 deletions bounty_drive/attacks/xss/xss.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from tqdm import tqdm

from attacks.dorks.search_engine_dorking import get_proxies_and_cycle
from attacks.xss.xss_striker import attacker_crawler
from attacks.xss.xss_striker import attacker_crawler, base64_encoder
from reporting.results_manager import (
get_crawling_results,
get_xss_links,
Expand Down Expand Up @@ -195,6 +195,10 @@ def launch_xss_attack(config):
scheme = urlparse(website).scheme
host = urlparse(website).netloc
main_url = scheme + "://" + host
print(f"Main URL: {main_url}")
print(f"Forms: {forms}")
print(f"DOM URLS: {domUrls}")
print(f"zip(forms, domUrls): {list(zip(forms, domUrls))}")
for form, domURL in list(zip(forms, domUrls)):
search_tasks_with_proxy.append(
{
Expand All @@ -207,13 +211,20 @@ def launch_xss_attack(config):
}
)

cprint(
f"Total XSS Targets: {len(search_tasks_with_proxy)}",
color="yellow",
file=sys.stderr,
)

if config["fuzz_xss"]:
raise NotImplementedError("Fuzzing is not implemented yet")
else:
blindPayloads = []
with open("attacks/xss/payloads/blind-xss-payload-list.txt", "r") as f:
blindPayloads = f.readlines()
encoding = base64 if config["encode_xss"] else False

encoding = base64_encoder if config["encode_xss"] else False
with concurrent.futures.ThreadPoolExecutor(
max_workers=number_of_worker
) as executor:
Expand Down
75 changes: 54 additions & 21 deletions bounty_drive/attacks/xss/xss_striker.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import base64
import concurrent.futures
import copy
import os
Expand Down Expand Up @@ -328,9 +329,9 @@ def generate_xss_urls(url):

def base64_encoder(string):
if re.match(r"^[A-Za-z0-9+\/=]+$", string) and (len(string) % 4) == 0:
return bs4.b64decode(string.encode("utf-8")).decode("utf-8")
return base64.b64decode(string.encode("utf-8")).decode("utf-8")
else:
return bs4.b64encode(string.encode("utf-8")).decode("utf-8")
return base64.b64encode(string.encode("utf-8")).decode("utf-8")


def dom(response):
Expand Down Expand Up @@ -527,8 +528,9 @@ def recursive_crawl(target):
params = get_params(target, "", True)
if "=" in target: # if there's a = in the url, there should be GET parameters
inps = []
for name, value in params.items():
inps.append({"name": name, "value": value})
if params:
for name, value in params.items():
inps.append({"name": name, "value": value})
forms.append({0: {"action": url, "method": "get", "inputs": inps}})

headers = {
Expand Down Expand Up @@ -572,9 +574,17 @@ def recursive_crawl(target):
with lock_processed:
for p in processed_content:
if processed_content[p] and response:
similarity = html_similarity.structural_similarity(
processed_content[p], response
)
try:
similarity = html_similarity.structural_similarity(
processed_content[p], response
)
except Exception as e:
cprint(
f"Error while comparing HTML content similarity: {e}",
"red",
file=sys.stderr,
)
similarity = 0
if similarity > threshold:
cprint(
f"Skipping already processed URL: {target} - similarity ratio: {similarity} with {p}",
Expand Down Expand Up @@ -723,7 +733,10 @@ def html_xss_parser(response, encoding):
}
"""
rawResponse = response # raw response returned by requests
response = response.text # response content
if response:
response = response.text # response content
else:
response = ""
if encoding: # if the user has specified an encoding, encode the probe in that
response = response.replace(encoding(xsschecker), xsschecker)
reflections = response.count(xsschecker)
Expand Down Expand Up @@ -874,18 +887,30 @@ def checker(config, proxy, url, params, GET, payload, positions, encoding):
}
response = start_request(
proxies=proxies,
config=config,
base_url=url,
params=replace_value(params, xsschecker, checkString, copy.deepcopy),
headers=headers,
GET=GET,
)

if hasattr(response, "text"):
response = response.text.lower()
if response:
if hasattr(response, "text"):
response = response.text.lower()
else:
response = response.read().decode("utf-8")

reflectedPositions = []
for match in re.finditer("st4r7s", response):
reflectedPositions.append(match.start())
try:
for match in re.finditer("st4r7s", response):
reflectedPositions.append(match.start())
except Exception as e:
cprint(
"An error occurred while processing the st4r7s, response - {e}",
color="red",
file=sys.stderr,
)
pass
filledPositions = fill_holes(positions, reflectedPositions)
# Itretating over the reflections
num = 0
Expand Down Expand Up @@ -1399,13 +1424,13 @@ def attacker_crawler(
if is_waffed:
cprint(
"WAF detected: %s%s%s" % (green, is_waffed, end),
"red",
color="red",
file=sys.stderr,
)
else:
cprint(
"WAF Status: %sOffline%s" % (green, end),
"green",
color="green",
file=sys.stderr,
)

Expand All @@ -1423,14 +1448,15 @@ def attacker_crawler(
# TODO add session
proxies = prepare_proxies(proxy, config)
cprint(
f"Testing attack for GET - Session (n° 0): {url} \n\t - parameters {paramsCopy} \n\t - headers {headers} \n\t - xss - with proxy {proxies} ...",
f"Testing attack for {'GET' if GET else 'POST'} - Session (n° 0): {url} \n\t - parameters {paramsCopy} \n\t - headers {headers} \n\t - xss - with proxy {proxies} ...",
"yellow",
file=sys.stderr,
)

response = start_request(
config=config,
proxies=proxies,
data=[paramsCopy],
base_url=url,
params=paramsCopy,
headers=headers,
Expand Down Expand Up @@ -1512,7 +1538,7 @@ def attacker_crawler(
for blindPayload in blindPayloads:
paramsCopy[paramName] = blindPayload
cprint(
f"Testing attack for GET with blind payload - Session (n° 0): {url} \n\t - parameters {paramsCopy} \n\t - headers {headers} \n\t - xss - with proxy {proxies} ...",
f"Testing blind XSS attack for {'GET' if GET else 'POST'} with blind payload - Session (n° 0): {url} \n\t - parameters {paramsCopy} \n\t - headers {headers} \n\t - xss - with proxy {proxies} ...",
"yellow",
file=sys.stderr,
)
Expand All @@ -1525,11 +1551,18 @@ def attacker_crawler(
headers=headers,
GET=GET,
)
cprint(
"Response: %s" % response.text,
"green",
file=sys.stderr,
)
if response:
cprint(
"Response blind XSS: %s" % response.text,
"green",
file=sys.stderr,
)
else:
cprint(
"Response blind XSS: %s" % response,
"green",
file=sys.stderr,
)


# def xss_attack(
Expand Down
1 change: 1 addition & 0 deletions bounty_drive/bounty_drive.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
import configparser

os.system("clear")
csv.field_size_limit(100000000)

#########################################################################################
# Main function
Expand Down
12 changes: 10 additions & 2 deletions bounty_drive/bypasser/waf_mitigation.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,19 @@ def waf_detector(proxies, url, config, mode="xss"):
config=config,
bypassed_403=True,
)
page = response.text
if response:
if hasattr(response, "text"):
page = response.text
else:
page = response.read().decode("utf-8")
else:
cprint(f"Waf Detector: No response {response}", "blue", file=sys.stderr)
return None

code = str(response.status_code)
headers = str(response.headers)
cprint("Waf Detector code: {}".format(code), "blue", file=sys.stderr)
cprint("Waf Detector headers:", response.headers, "blue", file=sys.stderr)
cprint(f"Waf Detector headers: {response.headers}", "blue", file=sys.stderr)

waf_signatures_files = glob.glob("bypasser/waf_signature/*.json", recursive=True)
bestMatch = [0, None]
Expand Down
2 changes: 1 addition & 1 deletion bounty_drive/configs/config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
extension =
subdomain = true
do_web_scap = true
target_file = configs/target_toolsforhumanity.txt
target_file = configs/target_att.txt
exclusion_file = configs/exclusion_pornbox.txt
target_login = []
logging=DEBUG
Expand Down
1 change: 1 addition & 0 deletions bounty_drive/configs/target_rustica.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
rustica.fr
42 changes: 39 additions & 3 deletions bounty_drive/reporting/results_manager.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import csv
import json
import os
import re
import sys

from termcolor import cprint
Expand Down Expand Up @@ -258,12 +259,47 @@ def get_crawling_results(settings):
"""
crawling_results = []

color_codes = [
"\033[97m", # white
"\033[92m", # green
"\033[91m", # red
"\033[93m", # yellow
"\033[0m", # end
"\033[7;91m", # back
"\033[93m[!]\033[0m", # info
"\033[94m[?]\033[0m", # que
"\033[91m[-]\033[0m", # bad
"\033[92m[+]\033[0m", # good
"\033[97m[~]\033[0m", # run
]

if os.path.exists(settings["crawl_csv"]):
with open(settings["crawl_csv"], mode="r", newline="") as file:
reader = csv.DictReader(file)
for row in reader:
cprint(
f"Getting {row} to experiment list...",
"blue",
file=sys.stderr,
)
dom_parsed = re.sub(r"\\u001b\[93m|\\u001b\[0m", "", str(row["doms"]))
dom_parsed = dom_parsed.replace("\\\\\\\\", "\\\\")

for color in color_codes:
dom_parsed = dom_parsed.replace(color, "")

forms_parsed = str(row["forms"]).strip("'<>() ").replace("'", '"')
cprint(
f"Getting {dom_parsed} & {forms_parsed} to experiment list under category crawl DOM",
"blue",
file=sys.stderr,
)
crawling_results.append(
(row["seedUrl"], json.loads(row["doms"]), json.loads(row["forms"]))
(
row["seedUrl"],
[] if dom_parsed == "no" else json.loads(dom_parsed),
[] if forms_parsed == "no" else json.loads(forms_parsed),
)
)

return crawling_results
Expand All @@ -289,8 +325,8 @@ def save_crawling_query(result, settings):
crawl_id,
seedUrl,
"yes",
domURLs_temps,
forms_temps,
json.dumps(domURLs_temps),
json.dumps(forms_temps),
] # Success and payload columns are initially empty
writer.writerow(row)
cprint(
Expand Down
Loading

0 comments on commit 8981eb8

Please sign in to comment.