log.debug(f"Error parsing beautifulsoup: {e}")returnFalse
- user_keywords=[re.compile(r,re.I)forrin["user","login","email"]]
- pass_keywords=[re.compile(r,re.I)forrin["pass"]]
-
- defis_login_page(self,html):
-"""
- TODO: convert this into an excavate YARA rule
-
- Determines if the provided HTML content contains a login page.
-
- This function parses the HTML to search for forms with input fields typically used for
- authentication. If it identifies password fields or a combination of username and password
- fields, it returns True.
-
- Args:
- html (str): The HTML content to analyze.
-
- Returns:
- bool: True if the HTML contains a login page, otherwise False.
-
- Examples:
- >>> is_login_page('<form><input type="text" name="username"><input type="password" name="password"></form>')
- True
-
- >>> is_login_page('<form><input type="text" name="search"></form>')
- False
- """
- try:
- soup=BeautifulSoup(html,"html.parser")
- exceptExceptionase:
- log.debug(f"Error parsing html: {e}")
- returnFalse
-
- forms=soup.find_all("form")
-
- # first, check for obvious password fields
- forforminforms:
- ifform.find_all("input",{"type":"password"}):
- returnTrue
-
- # next, check for forms that have both a user-like and password-like field
- forforminforms:
- user_fields=sum(bool(form.find_all("input",{"name":r}))forrinself.user_keywords)
- pass_fields=sum(bool(form.find_all("input",{"name":r}))forrinself.pass_keywords)
- ifuser_fieldsandpass_fields:
- returnTrue
- returnFalse
-
defresponse_to_json(self,response):""" Convert web response to JSON object, similar to the output of `httpx -irr -json`
@@ -1821,7 +1727,8 @@
asyncdefcurl(self,*args,**kwargs):""" An asynchronous function that runs a cURL command with specified arguments and options.
@@ -2399,7 +2305,8 @@
Source code in bbot/core/helpers/web/web.py
-
186
+
185
+186187188189
@@ -2447,8 +2354,7 @@
231232233
-234
-235
asyncdefdownload(self,url,**kwargs):
+234
asyncdefdownload(self,url,**kwargs):""" Asynchronous function for downloading files from a given URL. Supports caching with an optional time period in hours via the "cache_hrs" keyword argument. In case of successful download,
@@ -2503,136 +2409,6 @@
-
-is_login_page
-
-
is_login_page(html)
-
-
-
TODO: convert this into an excavate YARA rule
-
Determines if the provided HTML content contains a login page.
-
This function parses the HTML to search for forms with input fields typically used for
-authentication. If it identifies password fields or a combination of username and password
-fields, it returns True.
-
Parameters:
-
-
-html
- (str)
- –
-
-
The HTML content to analyze.
-
-
-
-
Returns:
-
-
-bool –
-
-
True if the HTML contains a login page, otherwise False.
defis_login_page(self,html):
-"""
- TODO: convert this into an excavate YARA rule
-
- Determines if the provided HTML content contains a login page.
-
- This function parses the HTML to search for forms with input fields typically used for
- authentication. If it identifies password fields or a combination of username and password
- fields, it returns True.
-
- Args:
- html (str): The HTML content to analyze.
-
- Returns:
- bool: True if the HTML contains a login page, otherwise False.
-
- Examples:
- >>> is_login_page('<form><input type="text" name="username"><input type="password" name="password"></form>')
- True
-
- >>> is_login_page('<form><input type="text" name="search"></form>')
- False
- """
- try:
- soup=BeautifulSoup(html,"html.parser")
- exceptExceptionase:
- log.debug(f"Error parsing html: {e}")
- returnFalse
-
- forms=soup.find_all("form")
-
- # first, check for obvious password fields
- forforminforms:
- ifform.find_all("input",{"type":"password"}):
- returnTrue
-
- # next, check for forms that have both a user-like and password-like field
- forforminforms:
- user_fields=sum(bool(form.find_all("input",{"name":r}))forrinself.user_keywords)
- pass_fields=sum(bool(form.find_all("input",{"name":r}))forrinself.pass_keywords)
- ifuser_fieldsandpass_fields:
- returnTrue
- returnFalse
-
-
-
-
-
request
@@ -2823,7 +2599,8 @@
Source code in bbot/core/helpers/web/web.py
-
79
+
78
+ 79 80 81 82
@@ -2875,8 +2652,7 @@
128129130
-131
-132
asyncdefrequest(self,*args,**kwargs):
+131
asyncdefrequest(self,*args,**kwargs):""" Asynchronous function for making HTTP requests, intended to be the most basic web request function used widely across BBOT and within this helper class. Handles various exceptions and timeouts
@@ -2977,7 +2753,8 @@
asyncdefrequest_batch(self,urls,*args,**kwargs):""" Given a list of URLs, request them in parallel and yield responses as they come in.
@@ -3060,7 +2836,8 @@
asyncdefrequest_custom_batch(self,urls_and_kwargs):""" Make web requests in parallel with custom options for each request. Yield responses as they come in.
@@ -3131,60 +2907,60 @@
asyncdefextract_in_scope_hostnames(self,s):""" Given a string, uses yara to extract hostnames matching scan targets
@@ -4208,9 +4212,7 @@
Source code in bbot/scanner/scanner.py
-
745
-746
-747
+
747748749750
@@ -4233,7 +4235,9 @@
767768769
-770
asyncdeffinish(self):
+770
+771
+772
asyncdeffinish(self):"""Finalizes the scan by invoking the `finished()` method on all active modules if new activity is detected. The method is idempotent and will return False if no new activity has been recorded since the last invocation.
@@ -4335,9 +4339,7 @@
Source code in bbot/scanner/scanner.py
-
503
-504
-505
+
505506507508
@@ -4424,7 +4426,9 @@
589590591
-592
asyncdefload_modules(self):
+592
+593
+594
asyncdefload_modules(self):"""Asynchronously import and instantiate all scan modules, including internal and output modules. This method is automatically invoked by `setup_modules()`. It performs several key tasks in the following sequence:
@@ -4570,9 +4574,7 @@
asyncdefsetup_modules(self,remove_failed=True):"""Asynchronously initializes all loaded modules by invoking their `setup()` methods. Args:
@@ -4677,9 +4681,7 @@