diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..e71410b --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,39 @@ +## Issue + +### Description + +Please provide a detailed description of the issue or feature request. Include any relevant information, such as the context in which the issue occurs or the feature is needed. + +### Steps to Reproduce (for bug reports) + +1. Go to '...' +2. Click on '...' +3. Scroll down to '...' +4. See error + +### Expected Behavior + +A clear and concise description of what you expected to happen. + +### Screenshots + +If applicable, add screenshots to help explain your problem. + +### Environment + +- OS: [e.g., Windows, macOS, Linux] +- Burp Suite Version: [e.g., 2023.1] +- Jython Version: [e.g., 2.7.4] +- Other relevant environment details + +### Additional Context + +Add any other context about the problem here. + +### Feature Request + +If you are requesting a new feature, please describe the feature in detail and provide any relevant examples or use cases. + +### Contribution + +We welcome any forks and contributions, especially those that increase the number of supported "configs" through additional inference providers. Please ensure that your contributions follow the project's guidelines and include relevant tests and documentation. \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..69c314b --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,5 @@ +🏴‍☠️ Burpference + +## Ahoy, Mateys! + +Ahoy, ye scurvy dogs, and welcome aboard Burpference! For non-forks, leave that there pull request description blank, and let [rigging](https://github.com/dreadnode/rigging) work its sorcery like a true sea wizard. Arrr! \ No newline at end of file diff --git a/.github/scripts/rigging_pr_decorator.py b/.github/scripts/rigging_pr_decorator.py new file mode 100644 index 0000000..feb4bd6 --- /dev/null +++ b/.github/scripts/rigging_pr_decorator.py @@ -0,0 +1,142 @@ +import asyncio +import base64 +import os +import typing as t + +from pydantic import ConfigDict, StringConstraints + +import rigging as rg +from rigging import logger +from rigging.generator import GenerateParams, Generator, register_generator + +logger.enable("rigging") + +MAX_TOKENS = 8000 +TRUNCATION_WARNING = "\n\n**Note**: Due to the large size of this diff, some content has been truncated." +str_strip = t.Annotated[str, StringConstraints(strip_whitespace=True)] + + +class PRDiffData(rg.Model): + """XML model for PR diff data""" + + content: str_strip = rg.element() + + @classmethod + def xml_example(cls) -> str: + return """example diff content""" + + +class PRDecorator(Generator): + """Generator for creating PR descriptions""" + + model_config = ConfigDict(arbitrary_types_allowed=True, validate_assignment=True) + + api_key: str = "" + max_tokens: int = MAX_TOKENS + + def __init__(self, model: str, params: rg.GenerateParams) -> None: + api_key = params.extra.get("api_key") + if not api_key: + raise ValueError("api_key is required in params.extra") + + super().__init__(model=model, params=params, api_key=api_key) + self.api_key = api_key + self.max_tokens = params.max_tokens or MAX_TOKENS + + async def generate_messages( + self, + messages: t.Sequence[t.Sequence[rg.Message]], + params: t.Sequence[GenerateParams], + ) -> t.Sequence[rg.GeneratedMessage]: + responses = [] + for message_seq, p in zip(messages, params): + base_generator = rg.get_generator(self.model, params=p) + llm_response = await base_generator.generate_messages([message_seq], [p]) + responses.extend(llm_response) + return responses + + +register_generator("pr_decorator", PRDecorator) + + +async def generate_pr_description(diff_text: str) -> str: + """Generate a PR description from the diff text""" + diff_tokens = len(diff_text) // 4 + if diff_tokens >= MAX_TOKENS: + char_limit = (MAX_TOKENS * 4) - len(TRUNCATION_WARNING) + diff_text = diff_text[:char_limit] + TRUNCATION_WARNING + + diff_data = PRDiffData(content=diff_text) + params = rg.GenerateParams( + extra={ + "api_key": os.environ["OPENAI_API_KEY"], + "diff_text": diff_text, + }, + temperature=0.1, + max_tokens=500, + ) + + generator = rg.get_generator("pr_decorator!gpt-4-turbo-preview", params=params) + prompt = f"""You are a helpful AI that generates clear and concise PR descriptions with some pirate tongue. + Analyze the provided git diff and create a summary, specifically focusing on the elements of the code that + has changed, high severity functions etc using exactly this format: + + ### PR Summary + + #### Overview of Changes + + + #### Key Modifications + 1. ****: + (continue as needed) + + #### Potential Impact + - + (continue as needed) + + Here is the PR diff to analyze: + {diff_data.to_xml()}""" + + chat = await generator.chat(prompt).run() + return chat.last.content.strip() + + +async def main(): + """Main function for CI environment""" + if not os.environ.get("OPENAI_API_KEY"): + raise ValueError("OPENAI_API_KEY environment variable must be set") + + try: + diff_text = os.environ.get("GIT_DIFF", "") + if not diff_text: + raise ValueError("No diff found in GIT_DIFF environment variable") + + try: + diff_text = base64.b64decode(diff_text).decode("utf-8") + except Exception: + padding = 4 - (len(diff_text) % 4) + if padding != 4: + diff_text += "=" * padding + diff_text = base64.b64decode(diff_text).decode("utf-8") + + logger.debug(f"Processing diff of length: {len(diff_text)}") + description = await generate_pr_description(diff_text) + + with open(os.environ["GITHUB_OUTPUT"], "a") as f: + f.write("content<> $GITHUB_OUTPUT + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b #v5.0.3 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip cache purge + pip install pydantic + pip install rigging[all] + # Generate the description using the diff + - name: Generate PR Description + id: description + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + PR_NUMBER: ${{ github.event.pull_request.number }} + GIT_DIFF: ${{ steps.diff.outputs.diff }} + run: | + python .github/scripts/rigging_pr_decorator.py + # Update the PR description + - name: Update PR Description + uses: nefrob/pr-description@4dcc9f3ad5ec06b2a197c5f8f93db5e69d2fdca7 #v1.2.0 + with: + content: | + ## AI-Generated Summary + ${{ steps.description.outputs.content }} + --- + This summary was generated with ❤️ by [rigging](https://rigging.dreadnode.io/) + regex: ".*" + regexFlags: s + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a0dd9e8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,20 @@ +.DS_Store +logs/ +.idea/workspace.xml +.vscode/ +.env +archive/autogpt/.gradle/* +archive/autogpt/.gradle/buildOutputCleanup/cache.properties +.lock + +# Ignore Gradle project-specific cache directory +.gradle + +# Ignore Gradle build output directory +build + +# Ignore $py.class files (generated when running burp) + +.*$py.*class +burpference/api_adapters$py.class +burpference/consts$py.class diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..3bf7003 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,67 @@ +repos: + # Standard pre-commit hooks + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: cef0300fd0fc4d2a87a85fa2093c6b283ea36f4b #v5.0.0 + hooks: + - id: check-added-large-files + args: [--maxkb=36000] + - id: check-executables-have-shebangs + - id: check-shebang-scripts-are-executable + - id: check-json + - id: check-yaml + - id: trailing-whitespace + + # Github actions + - repo: https://github.com/rhysd/actionlint + rev: 5db9d9cde2f3deb5035dea3e45f0a9fff2f29448 #v1.7.4 + hooks: + - id: actionlint + name: Check Github Actions + + # Secrets detection + - repo: https://github.com/Yelp/detect-secrets + rev: 01886c8a910c64595c47f186ca1ffc0b77fa5458 #v1.5.0 + hooks: + - id: detect-secrets + name: Detect secrets + args: + - '--baseline' + - '.secrets.baseline' + - '--exclude-files' + - 'components/api/migrations/*' + - '--exclude-files' + - 'components/api/app/assets/*' + - '--exclude-files' + - '\.sops\.yaml$' + - '--exclude-files' + - 'secrets\.enc\.yaml$' + - '--exclude-files' + - 'components/strikes/*' + + # Python linting + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: 8b76f04e7e5a9cd259e9d1db7799599355f97cdf # v0.8.2 + hooks: + # Run the linter. + - id: ruff + # Run the formatter. + - id: ruff-format + + # Python code security + - repo: https://github.com/PyCQA/bandit + rev: 8fd258abbac759d62863779f946d6a88e8eabb0f #1.8.0 + hooks: + - id: bandit + name: Code security checks + args: ["-c", "pyproject.toml"] + additional_dependencies: ["bandit[toml]"] + + - repo: local + hooks: + # Ensure our GH actions are pinned to a specific hash + - id: check-github-actions + name: Check GitHub Actions for Pinned Dependencies + entry: python .scripts/check_pinned_hash_dependencies.py + language: python + files: \.github/.*\.yml$ \ No newline at end of file diff --git a/.scripts/check_pinned_hash_dependencies.py b/.scripts/check_pinned_hash_dependencies.py new file mode 100644 index 0000000..f662169 --- /dev/null +++ b/.scripts/check_pinned_hash_dependencies.py @@ -0,0 +1,123 @@ +import re +import sys +from pathlib import Path +from typing import List, Tuple + + +class GitHubActionChecker: + def __init__(self): + # Pattern for actions with SHA-1 hashes (pinned) + self.pinned_pattern = re.compile(r"uses:\s+([^@\s]+)@([a-f0-9]{40})") + + # Pattern for actions with version tags (unpinned) + self.unpinned_pattern = re.compile( + r"uses:\s+([^@\s]+)@(v\d+(?:\.\d+)*(?:-[a-zA-Z0-9]+(?:\.\d+)*)?)" + ) + + # Pattern for all uses statements + self.all_uses_pattern = re.compile(r"uses:\s+([^@\s]+)@([^\s\n]+)") + + def get_line_numbers( + self, content: str, pattern: re.Pattern + ) -> List[Tuple[str, int]]: + """Find matches with their line numbers.""" + matches = [] + for i, line in enumerate(content.splitlines(), 1): + for match in pattern.finditer(line): + matches.append((match.group(0), i)) + return matches + + def check_file(self, file_path: str) -> bool: + """Check a single file for unpinned dependencies.""" + try: + content = Path(file_path).read_text() + except Exception as e: + print(f"\033[91mError reading file {file_path}: {e}\033[0m") + return False + + # Get matches with line numbers + pinned_matches = self.get_line_numbers(content, self.pinned_pattern) + unpinned_matches = self.get_line_numbers(content, self.unpinned_pattern) + all_matches = self.get_line_numbers(content, self.all_uses_pattern) + + print(f"\n\033[1m[=] Checking file: {file_path}\033[0m") + + # Print pinned dependencies + if pinned_matches: + print("\033[92m[+] Pinned:\033[0m") + for match, line_num in pinned_matches: + print(f" |- {match} \033[90m({file_path}:{line_num})\033[0m") + + # Track all found actions for validation + found_actions = set() + for match, _ in pinned_matches + unpinned_matches: + action_name = self.pinned_pattern.match( + match + ) or self.unpinned_pattern.match(match) + if action_name: + found_actions.add(action_name.group(1)) + + has_errors = False + + # Check for unpinned dependencies + if unpinned_matches: + has_errors = True + print("\033[93m[!] Unpinned (using version tags):\033[0m") + for match, line_num in unpinned_matches: + print(f" |- {match} \033[90m({file_path}:{line_num})\033[0m") + + # Check for completely unpinned dependencies (no SHA or version) + unpinned_without_hash = [ + (match, line_num) + for match, line_num in all_matches + if not any(match in pinned[0] for pinned in pinned_matches) + and not any(match in unpinned[0] for unpinned in unpinned_matches) + ] + + if unpinned_without_hash: + has_errors = True + print("\033[91m[!] Completely unpinned (no SHA or version):\033[0m") + for match, line_num in unpinned_without_hash: + print( + f" |- {match} \033[90m({self.format_terminal_link(file_path, line_num)})\033[0m" + ) + + # Print summary + total_actions = ( + len(pinned_matches) + len(unpinned_matches) + len(unpinned_without_hash) + ) + if total_actions == 0: + print("\033[93m[!] No GitHub Actions found in this file\033[0m") + else: + print("\n\033[1mSummary:\033[0m") + print(f"Total actions: {total_actions}") + print(f"Pinned: {len(pinned_matches)}") + print(f"Unpinned with version: {len(unpinned_matches)}") + print(f"Completely unpinned: {len(unpinned_without_hash)}") + + return not has_errors + + +def main(): + checker = GitHubActionChecker() + files_to_check = sys.argv[1:] + + if not files_to_check: + print("\033[91mError: No files provided to check\033[0m") + print("Usage: python script.py ...") + sys.exit(1) + + results = {file: checker.check_file(file) for file in files_to_check} + + # Print final summary + print("\n\033[1mFinal Results:\033[0m") + for file, passed in results.items(): + status = "\033[92m✓ Passed\033[0m" if passed else "\033[91m✗ Failed\033[0m" + print(f"{status} {file}") + + if not all(results.values()): + sys.exit(1) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..033d561 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @GangGreenTemperTatum \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..d579497 --- /dev/null +++ b/README.md @@ -0,0 +1,152 @@ +# burpference + +Experimenting with yarrr' Burp Proxy tab going brrrrrrrrrrrrr. + +- [burpference](#burpference) + - [Prerequisites](#prerequisites) + - [Setup Guide](#setup-guide) + - [1. Download and Install Burp Suite](#1-download-and-install-burp-suite) + - [2. Download and import Jython standalone JAR file](#2-download-and-import-jython-standalone-jar-file) + - [Steps to Download and Set Up Jython:](#steps-to-download-and-set-up-jython) + - [3. Add the Burpference Extension to Burp Suite](#3-add-the-burpference-extension-to-burp-suite) + - [Steps to Add the Extension:](#steps-to-add-the-extension) + - [4. Setup your configs](#4-setup-your-configs) + - [Checkout the config docs:](#checkout-the-config-docs) + - [5. Additional options:](#5-additional-options) + - [Development and known bugs:](#development-and-known-bugs) + - [Support the Project and Contributing](#support-the-project-and-contributing) + +"_burpference_" started as a research idea of offensive agent capabilities and is a fun take on Burp Suite and running inference. The extension is open-source and designed to capture in-scope HTTP requests and responses from Burp's proxy history and ship them to a remote LLM API in JSON format. It's designed with a flexible approach where you can configure custom system prompts, store API keys and select remote hosts from numerous model providers as well as the ability for you to create your own API configuration. The idea is for an LLM to act as an agent in an offensive web application engagement to leverage your skills and surface findings and lingering vulnerabilities. By being able to create your own configuration and model provider allows you to also host models locally via Ollama to prevent potential high inference costs and potential network delays or rate limits. + +Some key features: + +- **Automated Response Capture**: Burp Suite acts as your client monitor, automatically capturing responses that fall within your defined scope. This extension listens for, captures, and processes these details with an offensive-focused agent. +- **API Integration**: Once requests and response streams are captured, they are packaged and forwarded to your configured API endpoint in JSON format, including any necessary system-level prompts or authentication tokens. + - Only in-scope items are sent, optimizing resource usage and avoiding unnecessary API calls. + - By default, [certain MIME types are excluded](https://github.com/dreadnode/burpference/blob/779d42fc31c5414b64ed857ed0e2770d5c278e4b/burpference/burpference.py#L606). + - Color-coded tabs display `critical/high/medium/low/informational` findings from your model for easy visualization. +- **Comprehensive Logging**: A logging system allows you to review intercepted responses, API requests sent, and replies received—all clearly displayed for analysis. + - A clean table interface displaying all logs, intercepted responses, API calls, and status codes for comprehensive engagement tracking. + - Stores inference logs in both the "_Inference Logger_" tab as a live preview and a timestamped file in the /logs directory. +- **Flexible Configuration**: Customize system prompts, API keys, or remote hosts as needed. Use your own configuration files for seamless integration with your workflow. + - Supports custom configurations, allowing you to load and switch between system prompts, API keys, and remote hosts + - [Several examples](configs/README.md) are provided in the repository, and contributions for additional provider plugins are welcome. + +So grab yer compass, hoist the mainsail, and let **burpference** be yer guide as ye plunder the seven seas of HTTP traffic! Yarrr'! + +--- + +## Prerequisites + +Before using **Burpference**, ensure you have the following: + +1. Due to it's awesomeness, burpference may require higher system resources to run optimally, especially if using local models. Trust the process and make the machines go brrrrrrrrrrrrr! +2. Installed Burp Suite (Community or Professional edition). +3. Downloaded and set up Jython standalone `.jar` file (a Python interpreter compatible with Java) to run Python-based extensions in Burp Suite. + 1. You do not need Python2.x runtime in your environment for this to work. +4. The [`registerExtenderCallbacks`](https://github.com/dreadnode/burpference/blob/779d42fc31c5414b64ed857ed0e2770d5c278e4b/burpference/burpference.py#L47) reads a configuration file specific to the remote endpoint's input requirements. Ensure this exists in your environment and Burp has the necessary permissions to access it's location on the filesystem. + 1. **Important**: as Burp Suite cannot read from a filesystem's `os` environment, you will need to explicitly include API key values in the configuration `.json` files per-provider. + 2. If you intend to fork or contribute to burpference, ensure that you have excluded the files from git tracking via `.gitignore`. + 3. There's also a pre-commit hook in the repo as an additional safety net. Install pre-commit hooks [here](https://pre-commit.com/#install). +5. Setup relevant directory permissions for burpference to create log files: + +`chmod -R 755 logs configs` + +**In some cases when loading the extension you may experience directory permission write issues and as such its recommended to restart Burp Suite following the above.** + +6. Ollama locally installed if using this provider plugin, [example config](configs/ollama_mistral-small.json) and the model running locally - ie `ollama run mistral-small` ([model docs](https://ollama.com/library/mistral-small)). + 1. Ollama is [now](https://huggingface.co/docs/hub/en/ollama) compatible with any HuggingFace GGUF model which expands the capabilities for using this provider plugin. + 1. There's a template config [here](configs/README.md#ollama-gguf) for you to clone and add your own models. + +--- + +## Setup Guide + +### 1. Download and Install Burp Suite + +If Burp Suite is not already installed, download it from: +[Burp Suite Community/Professional](https://portswigger.net/burp/communitydownload) + +--- + +### 2. Download and import Jython standalone JAR file + +Jython enables Burp Suite to run Python-based extensions. You will need to download and configure it within Burp Suite. + +#### Steps to Download and Set Up Jython: + +1. Go to the [Jython Downloads Page](https://www.jython.org/download). +2. Download the standalone Jython `.jar` file (e.g., `jython-standalone-2.7.4.jar`). +3. Open Burp Suite. +4. Go to the `Extensions` tab in Burp Suite. +5. Under the `Options` tab, scroll down to the **Python Environment** section. +6. Click **Select File**, and choose the `jython-standalone-2.7.4.jar` file you just downloaded. +7. Click **Apply** to load the Jython environment into Burp Suite. + +--- + +### 3. Add the Burpference Extension to Burp Suite + +#### Steps to Add the Extension: + +Download the latest supported [release](https://github.com/dreadnode/burpference/releases) from the repo, unzip it and add it as a python-based extension in Burp Suite. **It's recommended to save this in a `~/git` directory based on the current code and how the logs and configs are structured.** + +1. Open Burp Suite. +2. Navigate to the Extensions tab. +3. Click on Add to install a new extension. +4. In the dialog box: + 1. Extension Type: Choose Python and the `burpference/burpference.py` file, this will instruct Burp Suite to initialize the extension by invoking the `registerExtenderCallbacks` method. + Click Next and the extension will be loaded. 🚀 + +If you prefer to build from source, clone the repo and follow the steps above: + +1. Download or clone the **Burpference** project from GitHub: + + ```bash + git clone https://github.com/dreadnode/burpference.git + ``` + +### 4. Setup your configs + +#### Checkout the config docs: + +Head over to the [configuration docs](./configs/README.md)! + +### 5. Additional options: + +We also recommend setting up a custom [hotkey](https://portswigger.net/burp/documentation/desktop/settings/ui/hotkeys) in Burp to save clicks. + +--- + +## Development and known bugs: + +Longer-term roadmap is a potential Kotlin-based successor (mainly due to the limitations of Jython with the [Extender API](https://portswigger.net/burp/extender/api/)) or additionally, compliment burpference. + +The below bullets are cool ideas for the repo at a further stage or still actively developing. + +- **Scanner** + - An additional custom one-click "scanner" tab which scans an API target/schema with a selected model and reports findings/payloads and PoCs. +- **Conversations** + - Enhanced conversation turns with the model to reflect turns for both HTTP requests and responses to build context. +- **Prompt Tuning**: + - Modularize a centralized source of prompts sent to all models. + - Grounding and context: Equip the model with context, providing links to OpenAPI schemas and developer documentation. +- **Offensive Agents and Tool Use** + - Equip agents with burpference results detail and tool use for weaponization and exploitation phase. +- **Optimization**: + - Extend functionality of selecting multiple configurations and sending results across multiple endpoints for optimal results. + - Introduce judge reward systems for findings. + +The following known issues are something that have been reported so far and marked against issues in the repo. + +--- + +## Support the Project and Contributing + +We welcome any issues or contributions to the project, share the treasure! If you like our project, please feel free to drop us some love <3 + +[![GitHub stars](https://img.shields.io/github/stars/dreadnode/burpference?style=social)](https://github.com/dreadnode/burpference/stargazers) + +By watching the repo, you can also be notified of any upcoming releases. + + diff --git a/assets/squid_ascii.txt b/assets/squid_ascii.txt new file mode 100644 index 0000000..89b22bb --- /dev/null +++ b/assets/squid_ascii.txt @@ -0,0 +1,33 @@ + :=+*#%@@%%#*=-. + -*%@@@@@@@@@@@@@@@#=. + -%@@@@#+-:. ..:=*%@@@@*. + .#@@@%%*. -#@@@@= + :%@@@*. :+*=. -%@@@+ + %@@@= :+*- .%@@@- + -@@@# .. -**:.=+*=: -@@@% + *@@@= *@@@%- #@@@@@@+ %@@@. + *@@@- .@@@@@# #@@@@@@% %@@@: + *@@@- :+**=. .*%@@@#*+:%@@@: + *@@@- .. .=%@@@: + *@@@- %@@@: + *@@@- %@@@: + *@@@*: =%@@@: + -#@@@@#= -*%@@@%*. + . :+%@@@%*- :+%@@@@#= . + =@%+. -*@@@@%+: .*@@@@%=. =#@* + %@@@: :: .=#@@@@#=. :++: : %@@@- + %@@@= .=#@@#= .+%@@@@#- :*%@%+: :%@@@: + :%@@@@%@@@@%*: -#@@@@%+. .+%@@@@%%@@@@= + .=#%@@@%*- -=: .=#@@@@#=. :+%@@@%#+. + :+%@@@#: :+%@@@@*- + :#=. =#@@@@%+. :: -#@@@@%+. .+%= + %@@%: :*%@@@%*: #@+ .=%@@@@#- .%@@@: + .@@@% .+%@@@@#- =#: #@* := :*%@@@@*: #@@@- + *@@@%**#@@@@#=. @@: #@* -@@ -*@@@@%**%@@@# + -#@@@@@@%+: .+% @@: ... -@@ +: .=#@@@@@@#- + .--=-: =@@ @@: +*= -@@ @@- .:--:. + =@@ ##: --: -## @@- + =@@ @@- + =%% @@: -@@ %%- + + =@@ @@- \ No newline at end of file diff --git a/burpference/api_adapters.py b/burpference/api_adapters.py new file mode 100644 index 0000000..5be8ff5 --- /dev/null +++ b/burpference/api_adapters.py @@ -0,0 +1,243 @@ +import json +from abc import ABCMeta, abstractmethod +import urllib2 + +# Base class API adapter + + +class BaseAPIAdapter(object): + __metaclass__ = ABCMeta + + def __init__(self, config): + self.config = config + + @abstractmethod + def prepare_request(self, user_content, system_content=None): + pass + + @abstractmethod + def process_response(self, response_data): + pass + +# Ollama /generate API adapter class + + +class OllamaGenerateAPIAdapter(BaseAPIAdapter): + def prepare_request(self, system_content, user_content): + prompt = "{0}\n\nUser request:\n{1}".format( + system_content, user_content) + return { + "model": self.config.get("model", "llama3.2"), + "prompt": prompt, + "format": self.config.get("format", "json"), + "stream": self.config.get("stream", False) + } + + def process_response(self, response_data): + return json.loads(response_data) + + +# Ollama /chat API adapter class + +class OllamaChatAPIAdapter(BaseAPIAdapter): + def prepare_request(self, system_content, user_content): + total_input_size = len(system_content) + len(user_content) + max_tokens = self.config.get("max_input_size", 32000) # Default to 32k if not specified + + if total_input_size > max_tokens: + raise ValueError("Input size ({total_input_size} chars) exceeds maximum allowed ({max_tokens})") + + model = self.config.get("model", "llama3.2") + quantization = self.config.get("quantization") + if model.startswith("hf.co/") or model.startswith("huggingface.co/"): + if quantization: + model = "{0}:{1}".format(model, quantization) + + try: + system_content = system_content.encode('utf-8', errors='replace').decode('utf-8') + user_content = user_content.encode('utf-8', errors='replace').decode('utf-8') + except Exception as e: + raise ValueError("Error encoding content: {str(e)}") + + return { + "model": model, + "messages": [ + {"role": "system", "content": system_content}, + {"role": "user", "content": user_content} + ], + "stream": self.config.get("stream", False) + } + + def process_response(self, response_data): + return json.loads(response_data) + +# OpenAI /v1/chat/completions API adapter class + + +class OpenAIChatAPIAdapter(BaseAPIAdapter): + def prepare_request(self, user_content, system_content=None): + return { + "model": self.config.get("model", "gpt-4o-mini"), + "messages": [ + {"role": "system", "content": system_content}, + {"role": "user", "content": user_content} + ] + } + + def process_response(self, response_data): + response = json.loads(response_data) + if 'choices' in response and len(response['choices']) > 0: + if 'message' in response['choices'][0]: + return response['choices'][0]['message']['content'] + else: + raise ValueError("Unexpected response format: {response}") + else: + raise ValueError("No choices in response: {response}") + + def send_request(self, request_payload): + headers = { + "Authorization": "Bearer {0}".format(self.config.get("api_key", "")), + "Content-Type": "application/json" + } + req = urllib2.Request(self.config.get( + "host"), json.dumps(request_payload), headers=headers) + req.get_method = lambda: 'POST' + response = urllib2.urlopen(req) + return response.read() + + +# Anthropic /v1/messages API adapter class + +class AnthropicAPIAdapter(BaseAPIAdapter): + def prepare_request(self, user_content, system_content=None): + return { + "model": self.config.get("model", "claude-3-5-sonnet-20241022"), + "max_tokens": int(self.config.get("max_tokens", 1020)), + "system": system_content, + "messages": [ + {"role": "user", "content": user_content} + ] + } + + def send_request(self, request_payload): + headers = { + "x-api-key": self.config.get("headers", {}).get("x-api-key", ""), + "content-type": "application/json", + "anthropic-version": self.config.get("headers", {}).get("anthropic-version", "2023-06-01") + } + req = urllib2.Request(self.config.get("host"), + data=json.dumps(request_payload).encode('utf-8'), + headers=headers) + req.get_method = lambda: 'POST' + try: + response = urllib2.urlopen(req) + return response.read() + except urllib2.HTTPError as e: + error_message = e.read().decode('utf-8') + raise ValueError("HTTP Error {e.code}: {error_message}") + except Exception as e: + raise ValueError("Error sending request: {str(e)}") + + def process_response(self, response_data): + response = json.loads(response_data) + if 'message' in response: + return response['message']['content'] + elif 'content' in response: + return response['content'] + else: + raise ValueError("Unexpected response format: {response}") + +# Groq openai/v1/chat/completions + + +class GroqOpenAIChatAPIAdapter(BaseAPIAdapter): + def prepare_request(self, user_content, system_content=None): + return { + "model": self.config.get("model", "mixtral-8x7b-32768"), + "max_tokens": int(self.config.get("max_tokens", 1020)), + "messages": [ + {"role": "system", "content": system_content}, + {"role": "user", "content": user_content} + ] + } + + def process_response(self, response_data): + response = json.loads(response_data) + return response['choices'][0]['message']['content'] + + def send_request(self, request_payload): + headers = { + "x-api-key": "{0}".format(self.config.get("api_key", "")), + "Content-Type": "application/json" + } + req = urllib2.Request(self.config.get( + "host"), json.dumps(request_payload), headers=headers) + req.get_method = lambda: 'POST' + response = urllib2.urlopen(req) + return response.read() + + +class GroqOpenAIChatAPIStreamAdapter(BaseAPIAdapter): + def prepare_request(self, system_content, user_content): + return { + "model": self.config.get("model", "llama3-8b-8192"), + "max_tokens": int(self.config.get("max_tokens", 1020)), + "messages": [ + {"role": "system", "content": system_content}, + {"role": "user", "content": user_content} + ] + } + + def process_response(self, response_data): + response = json.loads(response_data) + return response['choices'][0]['message']['content'] + + def send_request(self, request_payload): + headers = { + "x-api-key": "{0}".format(self.config.get("api_key", "")), + "Content-Type": "application/json" + } + req = urllib2.Request(self.config.get( + "host"), json.dumps(request_payload), headers=headers) + req.get_method = lambda: 'POST' + response = urllib2.urlopen(req) + return response.read() + +# Generic other API base adapter + + +class OtherAPIAdapter(BaseAPIAdapter): + def prepare_request(self, system_content, user_content): + # Implement for other API types + pass + + def process_response(self, response_data): + # Implement for other API types + pass + + +# Function to define and load the API adapter + +def get_api_adapter(config): + api_type = config.get("api_type", "").lower() + endpoint = config.get("host", "").lower() + + if api_type == "ollama": + if "/generate" in endpoint: + return OllamaGenerateAPIAdapter(config) + elif "/chat" in endpoint: + return OllamaChatAPIAdapter(config) + else: + raise ValueError("Unsupported Ollama endpoint: %s" % endpoint) + elif api_type == "openai": + return OpenAIChatAPIAdapter(config) + elif api_type == "anthropic": + return AnthropicAPIAdapter(config) + elif api_type == "groq-openai": + return GroqOpenAIChatAPIAdapter(config) + elif api_type == "groq-openai-stream": + return GroqOpenAIChatAPIStreamAdapter(config) + elif api_type == "other": + return OtherAPIAdapter(config) + else: + raise ValueError("Unsupported API type: %s" % api_type) diff --git a/burpference/burpference.py b/burpference/burpference.py new file mode 100644 index 0000000..94be978 --- /dev/null +++ b/burpference/burpference.py @@ -0,0 +1,779 @@ +# -*- coding: utf-8 -*- +# type: ignore[import] +from burp import IBurpExtender, ITab, IHttpListener +from java.awt import BorderLayout, GridBagLayout, GridBagConstraints, Font +from javax.swing import ( + JPanel, JTextArea, JScrollPane, + BorderFactory, JSplitPane, JButton, JComboBox, + JTable, table, ListSelectionModel, JOptionPane, JTextField, JTabbedPane) +from javax.swing.table import DefaultTableCellRenderer, TableRowSorter +from javax.swing.border import TitledBorder +from java.util import Comparator +import json +import urllib2 +import os +from datetime import datetime +from consts import * +from api_adapters import get_api_adapter + + +def load_ascii_art(file_path): + try: + with open(file_path, 'r') as file: + return file.read() + except IOError: + return "Failed to load ASCII art" + + +SQUID_ASCII = load_ascii_art(SQUID_ASCII_FILE) + + +class BurpExtender(IBurpExtender, ITab, IHttpListener): + + def __init__(self): + self.popupShown = False + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + try: + if not os.path.exists(LOG_DIR): + os.makedirs(LOG_DIR, 0755) + except OSError as e: + print("Failed to create log directory: %s" % str(e)) + + self.log_file_path = os.path.join( + LOG_DIR, + "burpference_log_{}.txt".format(timestamp) + ) + self.config = None + self.api_adapter = None + self.is_running = True + self.logArea = None + self.temp_log_messages = [] + self.request_counter = 0 + self.log_message("Extension initialized and running.") + + def registerExtenderCallbacks(self, callbacks): + self._callbacks = callbacks + self._helpers = callbacks.getHelpers() + callbacks.setExtensionName("burpference") + + # Create main panel + self._panel = JPanel(BorderLayout()) + self._panel.setBackground(DARK_BACKGROUND) + + # Create input panel + inputPanel = JPanel(GridBagLayout()) + inputPanel.setBackground(DARK_BACKGROUND) + + outerBorder = BorderFactory.createTitledBorder( + BorderFactory.createLineBorder(DREADNODE_ORANGE), + "burpference, made with <3 by @dreadnode" + ) + outerBorder.setTitleColor(DREADNODE_PURPLE) + outerBorder.setTitleFont(Font(Font.SANS_SERIF, Font.BOLD, 12)) + + inputPanel.setBorder(outerBorder) + + c = GridBagConstraints() + c.fill = GridBagConstraints.HORIZONTAL + c.weightx = 1 + c.gridx = 0 + c.gridy = 0 + + # Load configuration files dynamically from directory + self.configFiles = self.loadConfigFiles() + self.configSelector = JComboBox(self.configFiles) + self.configSelector.setBackground(LIGHTER_BACKGROUND) + self.configSelector.setForeground(DREADNODE_GREY) + self.configSelector.addActionListener(self.loadConfiguration) + c.gridy += 1 + inputPanel.add(self.configSelector, c) + + # stopButton + c.gridy += 1 + self.stopButton = JButton("Stop Extension") + self.stopButton.setBackground(DREADNODE_ORANGE) + self.stopButton.setForeground(DREADNODE_GREY) + self.stopButton.addActionListener(self.stopExtension) + inputPanel.add(self.stopButton, c) + + # Log area + self.logArea = JTextArea(10, 30) + self.logArea.setEditable(False) + self.logArea.setBackground(LIGHTER_BACKGROUND) + self.logArea.setForeground(DREADNODE_GREY) + self.logArea.setCaretColor(DREADNODE_GREY) + logScrollPane = JScrollPane(self.logArea) + border = BorderFactory.createTitledBorder( + BorderFactory.createLineBorder(DREADNODE_ORANGE), + "Extension Log Output" + ) + border.setTitleColor(DREADNODE_ORANGE) + boldFont = border.getTitleFont().deriveFont(Font.BOLD, 14) + border.setTitleFont(boldFont) + logScrollPane.setBorder(border) + + # Add any temporary log messages + for log_entry in self.temp_log_messages: + self.logArea.append(log_entry) + self.temp_log_messages = [] + + # Create a split pane for input and log + splitPane = JSplitPane(JSplitPane.VERTICAL_SPLIT, + inputPanel, logScrollPane) + splitPane.setBackground(DARK_BACKGROUND) + splitPane.setDividerSize(0) + splitPane.setEnabled(False) + splitPane.setResizeWeight(0.1) + + # Create HTTP history table + self.historyTable = JTable() + self.historyTable.setBackground(LIGHTER_BACKGROUND) + self.historyTable.setForeground(DREADNODE_GREY) + self.historyTable.setSelectionBackground(DREADNODE_ORANGE) + self.historyTable.setSelectionForeground(DREADNODE_GREY) + self.historyTable.setGridColor(DREADNODE_GREY) + self.historyTableModel = table.DefaultTableModel( + ["#", "Timestamp", "Host", "URL", "Request", "Response"], 0) + self.historyTable.setModel(self.historyTableModel) + + class NumericComparator(Comparator): + def compare(self, s1, s2): + # Convert strings to integers for comparison + try: + n1 = int(s1) + n2 = int(s2) + return n1 - n2 + except: + return 0 # Return 0 if conversion fails + + # Add sorting capability + sorter = TableRowSorter(self.historyTableModel) + self.historyTable.setRowSorter(sorter) + + # Set the numeric comparator for the ID column + sorter.setComparator(0, NumericComparator()) + + # Set selection mode and listener + self.historyTable.setSelectionMode(ListSelectionModel.SINGLE_SELECTION) + self.historyTable.getSelectionModel().addListSelectionListener( + self.historyTableSelectionChanged) + historyScrollPane = JScrollPane(self.historyTable) + border = BorderFactory.createTitledBorder( + BorderFactory.createLineBorder(DREADNODE_ORANGE), + "HTTP History" + ) + border.setTitleColor(DREADNODE_ORANGE) + boldFont = border.getTitleFont().deriveFont(Font.BOLD, 14) + border.setTitleFont(boldFont) + historyScrollPane.setBorder(border) + + # Create request and response areas + self.requestArea = JTextArea(10, 30) + self.requestArea.setEditable(False) + self.requestArea.setLineWrap(True) + self.requestArea.setWrapStyleWord(True) + self.requestArea.setBackground(LIGHTER_BACKGROUND) + self.requestArea.setForeground(DREADNODE_ORANGE) + self.requestArea.setCaretColor(DREADNODE_GREY) + requestScrollPane = JScrollPane(self.requestArea) + border = BorderFactory.createTitledBorder( + BorderFactory.createLineBorder(DREADNODE_ORANGE), + "Inference Request - Live View" + ) + border.setTitleColor(DREADNODE_ORANGE) + boldFont = border.getTitleFont().deriveFont(Font.BOLD, 14) + border.setTitleFont(boldFont) + requestScrollPane.setBorder(border) + + self.responseArea = JTextArea(10, 30) + self.responseArea.setEditable(False) + self.responseArea.setLineWrap(True) + self.responseArea.setWrapStyleWord(True) + self.responseArea.setBackground(LIGHTER_BACKGROUND) + self.responseArea.setForeground(DREADNODE_ORANGE) + self.responseArea.setCaretColor(DREADNODE_GREY) + responseScrollPane = JScrollPane(self.responseArea) + border = BorderFactory.createTitledBorder( + BorderFactory.createLineBorder(DREADNODE_ORANGE), + "Inference Response - Live View" + ) + border.setTitleColor(DREADNODE_ORANGE) + boldFont = border.getTitleFont().deriveFont(Font.BOLD, 14) + border.setTitleFont(boldFont) + responseScrollPane.setBorder(border) + + # Create split panes + diffSplitPane = JSplitPane( + JSplitPane.HORIZONTAL_SPLIT, requestScrollPane, responseScrollPane) + diffSplitPane.setBackground(DARK_BACKGROUND) + diffSplitPane.setDividerSize(2) + diffSplitPane.setResizeWeight(0.5) + + # Selected request/response areas + self.selectedRequestArea = JTextArea(10, 30) + self.selectedRequestArea.setEditable(False) + self.selectedRequestArea.setLineWrap(True) + self.selectedRequestArea.setWrapStyleWord(True) + self.selectedRequestArea.setBackground(LIGHTER_BACKGROUND) + self.selectedRequestArea.setForeground(DREADNODE_ORANGE) + self.selectedRequestArea.setCaretColor(DREADNODE_GREY) + selectedRequestScrollPane = JScrollPane(self.selectedRequestArea) + border = BorderFactory.createTitledBorder( + BorderFactory.createLineBorder(DREADNODE_ORANGE), + "Selected HTTP Request & Response" + ) + border.setTitleColor(DREADNODE_ORANGE) + boldFont = border.getTitleFont().deriveFont(Font.BOLD, 14) + border.setTitleFont(boldFont) + selectedRequestScrollPane.setBorder(border) + + self.selectedResponseArea = JTextArea(10, 30) + self.selectedResponseArea.setEditable(False) + self.selectedResponseArea.setLineWrap(True) + self.selectedResponseArea.setWrapStyleWord(True) + self.selectedResponseArea.setBackground(LIGHTER_BACKGROUND) + self.selectedResponseArea.setForeground(DREADNODE_ORANGE) + self.selectedResponseArea.setCaretColor(DREADNODE_GREY) + selectedResponseScrollPane = JScrollPane(self.selectedResponseArea) + border = BorderFactory.createTitledBorder( + BorderFactory.createLineBorder(DREADNODE_ORANGE), + "Selected Inference Response" + ) + border.setTitleColor(DREADNODE_ORANGE) + boldFont = border.getTitleFont().deriveFont(Font.BOLD, 14) + border.setTitleFont(boldFont) + selectedResponseScrollPane.setBorder(border) + + selectedDiffSplitPane = JSplitPane( + JSplitPane.HORIZONTAL_SPLIT, selectedRequestScrollPane, selectedResponseScrollPane) + selectedDiffSplitPane.setBackground(DARK_BACKGROUND) + selectedDiffSplitPane.setDividerSize(2) + selectedDiffSplitPane.setResizeWeight(0.5) + + # Main split pane for history and selected entry + mainSplitPane = JSplitPane( + JSplitPane.VERTICAL_SPLIT, historyScrollPane, selectedDiffSplitPane) + mainSplitPane.setBackground(DARK_BACKGROUND) + mainSplitPane.setDividerSize(2) + mainSplitPane.setResizeWeight(0.5) + + # Add components to main panel + self._panel.add(mainSplitPane, BorderLayout.CENTER) + self._panel.add(splitPane, BorderLayout.SOUTH) + self._panel.add(diffSplitPane, BorderLayout.NORTH) + + self.inference_tab = self.create_inference_logger_tab() + self.tabbedPane = JTabbedPane() + self.tabbedPane.setBackground(DARK_BACKGROUND) + self.tabbedPane.setForeground(DREADNODE_GREY) + self.tabbedPane.addTab("burpference", self._panel) + self.tabbedPane.addTab("Inference Logger", self.inference_tab) + + for i in range(self.tabbedPane.getTabCount()): + self.tabbedPane.setBackgroundAt(i, DREADNODE_GREY) + self.tabbedPane.setForegroundAt(i, DREADNODE_ORANGE) + + # Register with Burp + callbacks.customizeUiComponent(self.tabbedPane) + callbacks.addSuiteTab(self) + callbacks.registerHttpListener(self) + + # Initialize + self.is_running = True + self.updateUIState() + self.log_message("Extension initialized and running.") + + callbacks.printOutput(SQUID_ASCII + "\n\n") + callbacks.printOutput( + "Yer configs be stowed and ready from " + CONFIG_DIR) + callbacks.printOutput( + "\nNow ye be speakin' the pirate's tongue, savvy?") + + self.promptForConfiguration() + self.applyDarkTheme(self.tabbedPane) + + def getTabCaption(self): + return "burpference" + + def getUiComponent(self): + return self.tabbedPane + + def stopExtension(self, event): + if self.is_running: + self.is_running = False + self._callbacks.removeHttpListener( + self) + self.log_message( + "Extension stopped. No further traffic will be processed.") + self.updateUIState() + + def updateUIState(self): + if self.is_running: + self.stopButton.setText("Stop Extension") + for listener in self.stopButton.getActionListeners(): + self.stopButton.removeActionListener(listener) + self.stopButton.addActionListener(self.stopExtension) + else: + self.stopButton.setText( + "Extension Stopped - Unload and reload if required, doing so will remove displayed logs and state") + for listener in self.stopButton.getActionListeners(): + self.stopButton.removeActionListener(listener) + + def loadConfigFiles(self): + if not os.path.exists(CONFIG_DIR): + self.log_message("Config directory not found: {CONFIG_DIR}") + return [] + return [f for f in os.listdir(CONFIG_DIR) if f.endswith('.json')] + + def loadConfiguration(self, event): + selected_config = self.configSelector.getSelectedItem() + config_path = os.path.join(CONFIG_DIR, selected_config) + if os.path.exists(config_path): + try: + with open(config_path, 'r') as config_file: + self.config = json.load(config_file) + self.log_message("Loaded configuration: %s" % + json.dumps(self.config, indent=2)) + try: + self.api_adapter = get_api_adapter(self.config) + self.log_message("API adapter initialized successfully") + except ValueError as e: + self.log_message("Error initializing API adapter: %s" % str(e)) + self.api_adapter = None + except Exception as e: + self.log_message( + "Unexpected error initializing API adapter: %s" % str(e)) + self.api_adapter = None + except ValueError as e: + self.log_message( + "Error parsing JSON in configuration file: %s" % str(e)) + self.config = None + self.api_adapter = None + except Exception as e: + self.log_message( + "Unexpected error loading configuration: %s" % str(e)) + self.config = None + self.api_adapter = None + else: + self.log_message( + "Configuration file %s not found." % selected_config) + self.config = None + self.api_adapter = None + + def create_inference_logger_tab(self): + panel = JPanel(BorderLayout()) + panel.setBackground(DARK_BACKGROUND) + + self.inferenceLogTable = JTable() + self.inferenceLogTable.setBackground(LIGHTER_BACKGROUND) + self.inferenceLogTable.setForeground(DREADNODE_GREY) + self.inferenceLogTable.setSelectionBackground(DREADNODE_PURPLE) + self.inferenceLogTable.setSelectionForeground(DREADNODE_GREY) + self.inferenceLogTable.setGridColor(DREADNODE_GREY) + + self.inferenceLogTableModel = table.DefaultTableModel( + ["Timestamp", "API Endpoint", "Proxy Request", "Model Response", "Status"], 0) + self.inferenceLogTable.setModel(self.inferenceLogTableModel) + self.inferenceLogTable.setSelectionMode( + ListSelectionModel.SINGLE_SELECTION) + + self.inferenceLogTable.getSelectionModel().addListSelectionListener( + self.inferenceLogSelectionChanged) + + inferenceLogScrollPane = JScrollPane(self.inferenceLogTable) + border = BorderFactory.createTitledBorder( + BorderFactory.createLineBorder(DREADNODE_ORANGE), + "API Requests Log" + ) + border.setTitleColor(DREADNODE_ORANGE) + border.setTitleFont(border.getTitleFont().deriveFont(Font.BOLD, 14)) + inferenceLogScrollPane.setBorder(border) + + self.inferenceRequestDetail = JTextArea(10, 30) + self.inferenceRequestDetail.setEditable(False) + self.inferenceRequestDetail.setLineWrap(True) + self.inferenceRequestDetail.setWrapStyleWord(True) + self.inferenceRequestDetail.setBackground(LIGHTER_BACKGROUND) + self.inferenceRequestDetail.setForeground(DREADNODE_ORANGE) + requestDetailPane = JScrollPane(self.inferenceRequestDetail) + requestDetailPane.setBorder(BorderFactory.createTitledBorder( + BorderFactory.createLineBorder(DREADNODE_ORANGE), + "Inference Request Detail" + )) + + self.inferenceResponseDetail = JTextArea(10, 30) + self.inferenceResponseDetail.setEditable(False) + self.inferenceResponseDetail.setLineWrap(True) + self.inferenceResponseDetail.setWrapStyleWord(True) + self.inferenceResponseDetail.setBackground(LIGHTER_BACKGROUND) + self.inferenceResponseDetail.setForeground(DREADNODE_ORANGE) + responseDetailPane = JScrollPane(self.inferenceResponseDetail) + responseDetailPane.setBorder(BorderFactory.createTitledBorder( + BorderFactory.createLineBorder(DREADNODE_ORANGE), + "Inference Response Detail" + )) + + # Create split pane for details + detailsSplitPane = JSplitPane( + JSplitPane.HORIZONTAL_SPLIT, + requestDetailPane, + responseDetailPane + ) + detailsSplitPane.setResizeWeight(0.5) + + # Create main split pane + mainSplitPane = JSplitPane( + JSplitPane.VERTICAL_SPLIT, + inferenceLogScrollPane, + detailsSplitPane + ) + mainSplitPane.setResizeWeight(0.5) + + panel.add(mainSplitPane, BorderLayout.CENTER) + + return panel + + def inferenceLogSelectionChanged(self, event): + selectedRow = self.inferenceLogTable.getSelectedRow() + if selectedRow != -1: + try: + # Get the request and response data from columns + request = self.inferenceLogTableModel.getValueAt(selectedRow, 2) # Proxy Request column + response = self.inferenceLogTableModel.getValueAt(selectedRow, 3) # Model Response column + + # Format the request JSON + try: + if isinstance(request, dict): + formatted_request = json.dumps(request, indent=2) + else: + formatted_request = json.dumps(json.loads(request), indent=2) + except (ValueError, TypeError): + formatted_request = str(request) + + # For response, try to extract the message content if it's a model response + try: + # Handle case where response is already a dict + if isinstance(response, dict): + response_obj = response + else: + response_obj = json.loads(response) + + if isinstance(response_obj, dict) and 'message' in response_obj and 'content' in response_obj['message']: + # Get just the content string and handle unicode + content = response_obj['message']['content'] + # Handle unicode strings and newlines + formatted_response = content.replace('\\n', '\n') + else: + formatted_response = json.dumps(response_obj, indent=2) + except (ValueError, AttributeError, TypeError): + formatted_response = str(response) + + self.inferenceRequestDetail.setText(formatted_request) + self.inferenceResponseDetail.setText(formatted_response) + + self.inferenceRequestDetail.setCaretPosition(0) + self.inferenceResponseDetail.setCaretPosition(0) + + except Exception as e: + self.log_message("Error updating inference details: %s" % str(e)) + + def historyTableSelectionChanged(self, event): + selectedRow = self.historyTable.getSelectedRow() + if selectedRow != -1: + try: + # Get the stored HTTP pair and model analysis + http_pair_json = self.historyTableModel.getValueAt(selectedRow, 4) + model_analysis = self.historyTableModel.getValueAt(selectedRow, 5) + + try: + # Parse and show the HTTP pair + http_pair = json.loads(http_pair_json) + self.selectedRequestArea.setText(json.dumps(http_pair, indent=2)) + + model_text = model_analysis.strip('"').decode('unicode_escape') + model_text = model_text.replace('\\n', '\n') + self.selectedResponseArea.setText(model_text) + + self.selectedRequestArea.setCaretPosition(0) + self.selectedResponseArea.setCaretPosition(0) + except Exception as e: + self.log_message("Error parsing stored data: %s" % str(e)) + self.selectedRequestArea.setText(http_pair_json) + self.selectedResponseArea.setText(model_analysis) + + except Exception as e: + self.log_message("Error updating history details: %s" % str(e)) + + def colorizeHistoryTable(self): + renderer = self.SeverityCellRenderer() + for column in range(self.historyTable.getColumnCount()): + self.historyTable.getColumnModel().getColumn(column).setCellRenderer(renderer) + + class SeverityCellRenderer(DefaultTableCellRenderer): + def getTableCellRendererComponent(self, table, value, isSelected, hasFocus, row, column): + component = super(BurpExtender.SeverityCellRenderer, self).getTableCellRendererComponent( + table, value, isSelected, hasFocus, row, column) + + component.setForeground(DREADNODE_GREY) + + try: + response = str(table.getModel().getValueAt(row, 5)) + + if "**CRITICAL**" in response: + component.setBackground(CRITICAL_COLOR) + component.setForeground(Color.WHITE) + elif "**HIGH**" in response: + component.setBackground(HIGH_COLOR) + component.setForeground(Color.WHITE) + elif "**MEDIUM**" in response: + component.setBackground(MEDIUM_COLOR) + component.setForeground(Color.BLACK) + elif "**LOW**" in response: + component.setBackground(LOW_COLOR) + component.setForeground(Color.BLACK) + elif "**INFORMATIONAL**" in response: + component.setBackground(INFORMATIONAL_COLOR) + component.setForeground(Color.BLACK) + else: + component.setBackground(LIGHTER_BACKGROUND) + component.setForeground(DREADNODE_GREY) + + if isSelected: + component.setBackground(DREADNODE_ORANGE) + component.setForeground(DREADNODE_GREY) + + except Exception as e: + component.setBackground(LIGHTER_BACKGROUND) + component.setForeground(DREADNODE_GREY) + + component.setOpaque(True) + return component + + def log_message(self, message): + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + log_entry = "[{0}] {1}\n".format(timestamp, message) # Python2 format strings + + if self.logArea is None: + self.temp_log_messages.append(log_entry) + else: + self.logArea.append(log_entry) + self.logArea.setCaretPosition( + self.logArea.getDocument().getLength()) + + try: + # Try to create/write to log file with explicit permissions + log_dir = os.path.dirname(self.log_file_path) + if not os.path.exists(log_dir): + os.makedirs(log_dir, 0755) # Python2 octal notation + + # Open with explicit write permissions + with open(self.log_file_path, 'a+') as log_file: + log_file.write(log_entry) + except (IOError, OSError) as e: + print("Warning: Could not write to log file: %s" % str(e)) + + def applyDarkTheme(self, component): + if isinstance(component, JPanel) or isinstance(component, JScrollPane): + component.setBackground(DARK_BACKGROUND) + border = component.getBorder() + if isinstance(border, TitledBorder): + border.setTitleColor(DREADNODE_ORANGE) + boldFont = Font(Font.SANS_SERIF, Font.BOLD, 14) + border.setTitleFont(boldFont) + + component.setForeground(DREADNODE_GREY) + + if isinstance(component, JTextArea) or isinstance(component, JTextField): + component.setCaretColor(DREADNODE_GREY) + component.setBackground(LIGHTER_BACKGROUND) + component.setForeground(DREADNODE_ORANGE) + + if isinstance(component, JSplitPane): + component.setBackground(DARK_BACKGROUND) + component.setForeground(DREADNODE_GREY) + component.setDividerSize(2) + component.setDividerLocation(0.5) + + if hasattr(component, 'getComponents'): + for child in component.getComponents(): + self.applyDarkTheme(child) + + def processHttpMessage(self, toolFlag, messageIsRequest, messageInfo): + if not self.is_running: + return + if not self.api_adapter: + if not hasattr(self, '_no_adapter_logged'): + self.log_message("No API adapter configured. Please select a configuration file.") + self._no_adapter_logged = True + return + if messageIsRequest: + # Store the request for later use + self.current_request = messageInfo + else: + request = self.current_request + response = messageInfo + + # Filter MIME content types to reduce noise and exceeding tokens + responseInfo = self._helpers.analyzeResponse( + response.getResponse()) + contentType = responseInfo.getStatedMimeType().lower() + if contentType in ['css', 'image', 'script', 'video', 'audio', 'font']: + return + + # Filter request size + if len(request.getRequest()) > MAX_REQUEST_SIZE: + return + try: + analyzed_request = self._helpers.analyzeRequest(request) + analyzed_response = self._helpers.analyzeResponse(response.getResponse()) + + # Get the body bytes + request_body = request.getRequest()[analyzed_request.getBodyOffset():] + response_body = response.getResponse()[analyzed_response.getBodyOffset():] + + # Try to safely decode bodies, fallback to hex for binary data + try: + request_body_str = request_body.tostring().decode('utf-8') + except UnicodeDecodeError: + request_body_str = "" % len(request_body) + + try: + response_body_str = response_body.tostring().decode('utf-8') + except UnicodeDecodeError: + response_body_str = "" % len(response_body) + + self.request_counter += 1 + + # Table display data + table_metadata = { + "id": str(self.request_counter), + "host": request.getHttpService().getHost(), + "url": str(request.getUrl()), + } + + # Data for model analysis + request_data = { + "method": analyzed_request.getMethod(), + "url": str(request.getUrl()), + "headers": dict(header.split(': ', 1) for header in analyzed_request.getHeaders()[1:] if ': ' in header), + "body": request_body_str + } + + # Package the original HTTP response + response_data = { + "status_code": analyzed_response.getStatusCode(), + "headers": dict(header.split(': ', 1) for header in analyzed_response.getHeaders()[1:] if ': ' in header), + "body": response_body_str + } + + # Create the HTTP pair to send to the model + http_pair = { + "request": request_data, + "response": response_data + } + + # Load prompt template for system role + if os.path.exists(PROXY_PROMPT): + with open(PROXY_PROMPT, 'r') as prompt_file: + system_content = prompt_file.read().strip() + # Only log if there's an issue or if it's different from last time + if not hasattr(self, '_last_system_content') or system_content != self._last_system_content: + self.log_message("Custom prompt loaded from " + PROXY_PROMPT) + self._last_system_content = system_content + else: + if not hasattr(self, '_prompt_missing_logged'): + self.log_message("No prompt file found. Using default prompt.") + self._prompt_missing_logged = True + system_content = "Examine this request and response pair for any security issues:" + + # Prepare the request using the adapter + remote_request = self.api_adapter.prepare_request( + user_content=json.dumps(http_pair, indent=2), + system_content=system_content + ) + + # Send request to model + req = urllib2.Request(self.config.get("host", "")) + for header, value in self.config.get("headers", {}).items(): + req.add_header(header, value) + + try: + response = urllib2.urlopen(req, json.dumps(remote_request)) + response_data = response.read() + processed_response = self.api_adapter.process_response(response_data) + status = "Success" + except urllib2.HTTPError as e: + error_message = e.read().decode('utf-8') + error_details = { + "status_code": e.code, + "headers": dict(e.headers), + "body": error_message + } + processed_response = json.dumps(error_details, indent=2) + self.log_message("API Error Response:\nStatus Code: %d\nHeaders: %s\nBody: %s" % + (e.code, dict(e.headers), error_message)) + status = "Failed (%d)" % e.code + except Exception as e: + processed_response = "Error: %s" % str(e) + self.log_message("General Error: %s" % str(e)) + status = "Failed" + + # Always log to inference logger + if self.is_running: + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + self.inferenceLogTableModel.addRow([ + timestamp, + self.config.get("host", ""), + json.dumps(remote_request), + processed_response, + status + ]) + + # Only update the main UI if we got a successful response + if status == "Success" and self.is_running: + # Add to history table with metadata + self.historyTableModel.addRow([ + table_metadata.get("id", ""), + timestamp, + table_metadata.get("host", ""), + table_metadata.get("url", ""), + json.dumps(http_pair, indent=2), # Store the HTTP request/response pair + json.dumps(processed_response, indent=2) # Store the model's analysis + ]) + self.colorizeHistoryTable() + + self.requestArea.append("\n\n=== Request #" + str(self.request_counter) + " ===\n") + try: + # Format the request nicely + formatted_request = json.dumps(http_pair, indent=2) + formatted_request = formatted_request.replace('\\n', '\n') + formatted_request = formatted_request.replace('\\"', '"') + self.requestArea.append(formatted_request) + except Exception as e: + self.requestArea.append(str(http_pair)) + self.requestArea.setCaretPosition(self.requestArea.getDocument().getLength()) + + self.responseArea.append("\n\n=== Response #" + str(self.request_counter) + " ===\n") + try: + # Format the response nicely + if isinstance(processed_response, dict) and 'message' in processed_response and 'content' in processed_response['message']: + formatted_response = processed_response['message']['content'] + else: + formatted_response = json.dumps(processed_response, indent=2) + formatted_response = formatted_response.replace('\\n', '\n') + formatted_response = formatted_response.replace('\\"', '"') + self.responseArea.append(formatted_response) + except Exception as e: + self.responseArea.append(str(processed_response)) + self.responseArea.setCaretPosition(self.responseArea.getDocument().getLength()) + + except Exception as e: + self.log_message("Error processing request: %s" % str(e)) + + def promptForConfiguration(self): + JOptionPane.showMessageDialog( + self._panel, + "Select a configuration file to load in the burpference extension" + " tab and go brrr", + "burpference Configuration Required", + JOptionPane.INFORMATION_MESSAGE) \ No newline at end of file diff --git a/burpference/consts.py b/burpference/consts.py new file mode 100644 index 0000000..6d403ee --- /dev/null +++ b/burpference/consts.py @@ -0,0 +1,31 @@ +import os +from java.awt import Color + +# Get the directory containing burpference.py +EXTENSION_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = os.path.dirname(EXTENSION_DIR) # Parent directory of burpference folder +# Define paths relative to ROOT_DIR +CONFIG_DIR = os.path.join(ROOT_DIR, "configs") +LOG_DIR = os.path.join(ROOT_DIR, "logs") +PROXY_PROMPT = os.path.join(ROOT_DIR, "prompts", "proxy_prompt.txt") +SQUID_ASCII_FILE = os.path.join(ROOT_DIR, "assets", "squid_ascii.txt") + +# Color constants +DREADNODE_ORANGE = Color(255, 140, 0) +DREADNODE_PURPLE = Color(128, 0, 128) +DREADNODE_GREY = Color(245, 245, 245) +DREADNODE_RED = Color(239, 86, 47) +DARK_BACKGROUND = Color(40, 44, 52) +LIGHTER_BACKGROUND = Color(50, 55, 65) +TEXT_COLOR = Color(171, 178, 191) +HIGHLIGHT_COLOR = Color(97, 175, 239) + +# Severity colors rendered in the 'burpference' tab from the model responses +CRITICAL_COLOR = Color(255, 0, 0) +HIGH_COLOR = Color(255, 69, 0) +MEDIUM_COLOR = Color(255, 165, 0) +LOW_COLOR = Color(255, 255, 0) +INFORMATIONAL_COLOR = Color(200, 200, 200) + +# Recommended to tweak this value accordingly to your remote endpoint to prevent breaking token lengths and also maxing local inference resource consumption +MAX_REQUEST_SIZE = 1048576 # 1MB in bytes diff --git a/configs/.gitkeep b/configs/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/configs/README.md b/configs/README.md new file mode 100644 index 0000000..90ff1d1 --- /dev/null +++ b/configs/README.md @@ -0,0 +1,104 @@ +## Model Configs + +This directory provides some examples from current supported provider plugins for the burpference tool. + +**Important**: as Burp Suite cannot read from a filesystem's `os` environment, you will need to explicitly define API key values in the configuration `.json` files per-provider (ie, [here](https://github.com/dreadnode/burpference/blob/aafd5ec63af2d658cac2235c5d61ef6238fa6501/configs/anthropic_claude_3_sonnet_20240229.json#L4)). To illustrate only, mimic environment variables are set as placeholders. + +If you intend to fork or contribute to burpference, ensure that you have excluded the files from git tracking via `.gitignore`. There's also a pre-commit hook in the repo as an additional safety net. Install pre-commit hooks [here](https://pre-commit.com/#install). + +- [Model Configs](#model-configs) + - [Ollama GGUF](#ollama-gguf) + - [Example Ollama `/chat` GGUF model:](#example-ollama-chat-gguf-model) + - [Ollama Inference](#ollama-inference) + - [Example Ollama `/generate`/`/chat` inference model:](#example-ollama-generatechat-inference-model) + - [Anthropic Inference](#anthropic-inference) + - [Example Anthropic `/messages` inference with `claude-3-5-sonnet-20241022`:](#example-anthropic-messages-inference-with-claude-3-5-sonnet-20241022) + - [OpenAI Inference](#openai-inference) + - [Example OpenAI `/completions` inference with `gpt-4o-mini`:](#example-openai-completions-inference-with-gpt-4o-mini) +- [Model System Prompts](#model-system-prompts) + +--- + +### Ollama GGUF + +#### Example Ollama `/chat` GGUF model: + +In order to serve inference as part of burpference, the model must be running on the API endpoint (your local host), ie: "`ollama run hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M`". + +Ensure to follow steps guidance [here](https://huggingface.co/docs/hub/en/ollama) as a pre-requisite. + +```json +{ + "api_type": "ollama", + "stream": false, + "host": "http://localhost:11434/api/chat", + "model": "hf.co/{username}/{repository}", <-- ensure to replace these variables + "quantization": "Q4_K_M" <-- optional + "max_input_size": 32000 <-- recommended to adjust based on model loaded and ollama restrictions +} +``` + +--- + +### Ollama Inference + +#### Example Ollama `/generate`/`/chat` inference model: + +In order to serve inference as part of burpference, the model must be running on the API endpoint (your local host), ie: `ollama run mistral-small`. + +```json +{ + "api_type": "ollama", + "format": "json", + "stream": false, + "host": "http://localhost:11434/api/generate", <-- adjust based on Ollama API settings, ie: http://localhost:11434/api/chat + "model": "mistral-small" <-- insert any models from Ollama that are on your local machine +} +``` + +--- + +### Anthropic Inference + +#### Example Anthropic `/messages` inference with `claude-3-5-sonnet-20241022`: + +```json +{ + "api_type": "anthropic", + "headers": { + "x-api-key": "{$ANTHROPIC_API_KEY}", <-- replace with your API key in the local config file + "Content-Type": "application/json", + "anthropic-version": "2023-06-01" + }, + "max_tokens": 1020, <-- adjust based on your required usage + "host": "https://api.anthropic.com/v1/messages", + "model": "claude-3-5-sonnet-20241022" <-- adjust based on your required usage +} +``` + +--- + +### OpenAI Inference + +#### Example OpenAI `/completions` inference with `gpt-4o-mini`: + +```json +{ + "api_type": "openai", + "headers": { + "Authorization": "Bearer {$OPENAI_API_KEY}", <-- replace with your API key in the local config file + "Content-Type": "application/json" + }, + "stream": false, + "host": "https://api.openai.com/v1/chat/completions", + "model": "gpt-4o-mini", <-- adjust based on your required usage + "temperature": 0.1 <-- adjust based on your required usage +} +``` + +## Model System Prompts + +By default, the system prompt sent as pretext to the model is defined [here](../prompts/proxy_prompt.txt), feel free to edit, tune and tweak as you see fit. + + +--- \ No newline at end of file diff --git a/configs/anthropic_claude_3_sonnet_20240229.json b/configs/anthropic_claude_3_sonnet_20240229.json new file mode 100644 index 0000000..7ca7e97 --- /dev/null +++ b/configs/anthropic_claude_3_sonnet_20240229.json @@ -0,0 +1,11 @@ +{ + "api_type": "anthropic", + "headers": { + "x-api-key": "{$ANTHROPIC_API_KEY}", + "Content-Type": "application/json", + "anthropic-version": "2023-06-01" + }, + "max_tokens": 1020, + "host": "https://api.anthropic.com/v1/messages", + "model": "claude-3-5-sonnet-20241022" +} diff --git a/configs/groq_mixtral_8x7b_32768.json b/configs/groq_mixtral_8x7b_32768.json new file mode 100644 index 0000000..025ba7c --- /dev/null +++ b/configs/groq_mixtral_8x7b_32768.json @@ -0,0 +1,9 @@ +{ + "api_type": "groq-openai", + "headers": { + "x-api-key": "{$GROQ_API_KEY}", + "Content-Type": "application/json" + }, + "host": "https://api.groq.com/openai/v1/chat/completions", + "model": "mixtral-8x7b-32768" +} diff --git a/configs/groq_openai_llama3-8b-8192.json b/configs/groq_openai_llama3-8b-8192.json new file mode 100644 index 0000000..f038564 --- /dev/null +++ b/configs/groq_openai_llama3-8b-8192.json @@ -0,0 +1,12 @@ +{ + "api_type": "groq-openai-stream", + "headers": { + "x-api-key": "{$GROQ_API_KEY}", + "Content-Type": "application/json" + }, + "stream": true, + "max_tokens": 1020, + "host": "https://api.groq.com/openai/v1/chat/completions", + "model": "llama3-8b-8192", + "temperature": 0.1 +} diff --git a/configs/ollama_huggingface_gguf_bartowski_Llama-3.2-1B-Instruct-GGUF.json b/configs/ollama_huggingface_gguf_bartowski_Llama-3.2-1B-Instruct-GGUF.json new file mode 100644 index 0000000..4917651 --- /dev/null +++ b/configs/ollama_huggingface_gguf_bartowski_Llama-3.2-1B-Instruct-GGUF.json @@ -0,0 +1,8 @@ +{ + "api_type": "ollama", + "stream": false, + "host": "http://localhost:11434/api/chat", + "model": "hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF", + "quantization": "Q4_K_M", + "max_input_size": 32000 +} diff --git a/configs/ollama_llama3.2_chat.json b/configs/ollama_llama3.2_chat.json new file mode 100644 index 0000000..29955e3 --- /dev/null +++ b/configs/ollama_llama3.2_chat.json @@ -0,0 +1,6 @@ +{ + "api_type": "ollama", + "stream": false, + "host": "http://localhost:11434/api/chat", + "model": "llama3.2" +} diff --git a/configs/ollama_llama3.2_generate.json b/configs/ollama_llama3.2_generate.json new file mode 100644 index 0000000..efcab5a --- /dev/null +++ b/configs/ollama_llama3.2_generate.json @@ -0,0 +1,8 @@ +{ + "api_type": "ollama", + "format": "json", + "api_type": "ollama", + "stream": false, + "host": "http://localhost:11434/api/generate", + "model": "llama3.2" +} diff --git a/configs/ollama_mistral-small.json b/configs/ollama_mistral-small.json new file mode 100644 index 0000000..ddebe7f --- /dev/null +++ b/configs/ollama_mistral-small.json @@ -0,0 +1,7 @@ +{ + "api_type": "ollama", + "format": "json", + "stream": false, + "host": "http://localhost:11434/api/generate", + "model": "mistral-small" +} diff --git a/configs/ollama_rfc_whiterabbitneo.json b/configs/ollama_rfc_whiterabbitneo.json new file mode 100644 index 0000000..5723563 --- /dev/null +++ b/configs/ollama_rfc_whiterabbitneo.json @@ -0,0 +1,7 @@ +{ + "api_type": "ollama", + "format": "json", + "stream": false, + "host": "http://localhost:11434/api/generate", + "model": "rfc/whiterabbitneo" +} diff --git a/configs/openai_gpt4o_mini.json b/configs/openai_gpt4o_mini.json new file mode 100644 index 0000000..0ae6d43 --- /dev/null +++ b/configs/openai_gpt4o_mini.json @@ -0,0 +1,11 @@ +{ + "api_type": "openai", + "headers": { + "Authorization": "Bearer {$OPENAI_API_KEY}", + "Content-Type": "application/json" + }, + "stream": false, + "host": "https://api.openai.com/v1/chat/completions", + "model": "gpt-4o-mini", + "temperature": 0.1 +} diff --git a/prompts/proxy_prompt.txt b/prompts/proxy_prompt.txt new file mode 100644 index 0000000..71866dd --- /dev/null +++ b/prompts/proxy_prompt.txt @@ -0,0 +1,22 @@ +You are a web application penetration tester conducting a comprehensive operation on an application in the offensive stage of the engagement and focused on leveraging security flaws. +Your objective is to examine the HTTP requests and responses that are available through the burp suite proxy history from the web application as we test the application. + +This analysis will focus on: + +- Request and Response Evaluation: Scrutinizing HTTP requests and responses for security misconfigurations, sensitive data exposure, and other vulnerabilities. +- Authentication and Session Management: Assessing the effectiveness of authentication mechanisms and session handling practices. +- Input Validation and Output Encoding: Identifying weaknesses related to input validation that may lead to injection attacks or cross-site scripting (XSS). + +Use reasoning and context to find potential flaws in the application by providing example payloads and PoCs that could lead to a successful exploit. + +If you deem any vulnerabilities, include the severity of the finding as prepend (case-sensitive) in your response with any of the levels: + +- "CRITICAL" +- "HIGH" +- "MEDIUM" +- "LOW" +- "INFORMATIONAL" for any informational-level findings or observations, for example of a "secure" flag missing from a cookie. + +Not every request and response may have any indicators, be concise yet deterministic and creative in your approach. + +The HTTP request and and response pair are provided below this line: