Skip to content

Commit

Permalink
Support for --model option to allow for selecting a specific OpenAI m…
Browse files Browse the repository at this point in the history
…odel
  • Loading branch information
relston committed Aug 14, 2024
1 parent f353b58 commit c0aefac
Show file tree
Hide file tree
Showing 7 changed files with 38 additions and 19 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

### [0.8.0] - 2024-08-13
#### Added
- Support for `--model` option to allow for selecting a specific OpenAI model

### [0.7.3] - 2024-07-24
#### Added
- Support for `--version` option in the CLI
Expand Down
10 changes: 8 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,14 @@ cat path/to/markdown.md | mark
# LLM response....
```

## Use a specific LLM model
You can specify a different LLM model to use with the `--model` (or `-m`) flag
```bash
mark path/to/markdown.md --model gpt-4o-2024-05-13
```

## Custom system prompts
The system prompts folder is located at `~/.mark/system_prompts` and it includes a `default.md` prompt. You can add any additional system prompts you'd like to use in this folder and use them with the `--system` flag.
The system prompts folder is located at `~/.mark/system_prompts` and it includes a `default.md` prompt. You can add any additional system prompts you'd like to use in this folder and use them with the `--system` (or `-s`) flag.
```bash
# ~/.mark/system_prompts/custom.md
mark path/to/markdown.md --system custom
Expand All @@ -76,7 +82,7 @@ mark path/to/markdown.md --system custom
If you want to use a different LLM API endpoint that is fully compatible with the OpenAI API, set the `OPENAI_API_BASE_URL` environment variable to that endpoint value. This should enable you to use OpenAI proxy services like [credal.ai](https://www.credal.ai/), or other LLMs that are compatible with the OpenAI SDK.

## Image Generation
To generate an image based on the input just add the `--generate-image` flag to the command
To generate an image based on the input just add the `--generate-image` (or `-i`) flag to the command
```bash
mark path/to/markdown.md --generate-image
```
Expand Down
14 changes: 11 additions & 3 deletions mark/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,18 @@
except PackageNotFoundError:
package_version = "unknown"

DEFAULT_MODEL = "gpt-4o"
DALL_E_MODEL = "dall-e-3"

@click.command()
@click.argument('file', type=click.File())
@click.option('--system', '-s', type=click.STRING, default='default', help='The system prompt to use')
@click.option('--system', '-s', type=click.STRING,
default='default', help='The system prompt to use')
@click.option('--model', '-m', type=click.STRING, help='The llm model')
@click.option('--generate-image', '-i', is_flag=True, default=False,
help='EXPERIMENTAL: Generate an image using DALL-E.')
@click.version_option(version=package_version)
def command(file, system, generate_image):
def command(file, system, model, generate_image):
"""
Markdown powered LLM CLI - Multi-modal AI text generation tool
Expand All @@ -29,7 +33,11 @@ def command(file, system, generate_image):
"""
system_prompt = get_config().system_prompts().get(system, 'default')
markdown_file = MarkdownFile(file)
request = LLMRequest() \

if not model:
model = DALL_E_MODEL if generate_image else DEFAULT_MODEL

request = LLMRequest(model) \
.with_prompt(markdown_file.content) \
.with_system_message(system_prompt)

Expand Down
14 changes: 7 additions & 7 deletions mark/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@
from mark.config import get_config
from mark.llm_response import LLMResponse, LLMImageResponse

MODEL = "gpt-4o-2024-05-13"
DALL_E_MODEL = "dall-e-3"

# TODO: Move this config logic to the config class
OPENAI_BASE_URL = os.getenv('OPENAI_API_BASE_URL', openai.base_url)
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
Expand Down Expand Up @@ -51,19 +48,22 @@ def get_completion(llm_request):
"""
get_config().log(llm_request.to_log())

response_text = _call_completion(llm_request.to_payload(), MODEL)
response_text = _call_completion(
llm_request.to_payload(), llm_request.model)

return LLMResponse(response_text, MODEL)
return LLMResponse(response_text, llm_request.model)


def generate_image(llm_request):
get_config().log(llm_request.to_log())

response = _call_generate_image(llm_request.to_flat_prompt(), DALL_E_MODEL)
response = _call_generate_image(
llm_request.to_flat_prompt(),
llm_request.model)

return LLMImageResponse(
response.url,
DALL_E_MODEL,
llm_request.model,
response.revised_prompt)


Expand Down
3 changes: 2 additions & 1 deletion mark/llm_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,13 @@


class LLMRequest:
def __init__(self):
def __init__(self, model):
"""
Can serialize itself into a payload that can be sent to the OpenAI API (potentially others in the future)
"""
self.system_message = None
self.prompt = None
self.model = model
self.images = []
self.links = []

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "mark"
version = "0.7.3"
version = "0.8.0"
description = ""
authors = ["Ryan Elston <[email protected]>"]

Expand Down
10 changes: 5 additions & 5 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,12 +143,12 @@ def test_command_default(self, mock_llm_response):
command([str(self.markdown_file)], None, None, False)

mock_llm_response.assert_called_once_with(
self.default_expected_llm_request, 'gpt-4o-2024-05-13')
self.default_expected_llm_request, 'gpt-4o')

# The markdown file will be updated with the response
expected_markdown_file_content = self.mock_markdown_file_content + \
dedent("""
# GPT Response (model: gpt-4o-2024-05-13, system: default)
# GPT Response (model: gpt-4o, system: default)
Test completion
# User Response
Expand All @@ -164,7 +164,7 @@ def test_command_with_stdin(self, mock_llm_response, mock_stdout):
command(['-'], None, None, False)

mock_llm_response.assert_called_once_with(
self.default_expected_llm_request, 'gpt-4o-2024-05-13')
self.default_expected_llm_request, 'gpt-4o')
mock_stdout.assert_called_once_with("Test completion")

def test_command_custom_agent(self, create_file, mock_llm_response):
Expand Down Expand Up @@ -192,12 +192,12 @@ def test_command_custom_agent(self, create_file, mock_llm_response):
}
]
mock_llm_response.assert_called_once_with(
expected_llm_request, 'gpt-4o-2024-05-13')
expected_llm_request, 'gpt-4o')

# The markdown file will be updated indicating the custom agent
expected_markdown_file_content = self.mock_markdown_file_content + \
dedent("""
# GPT Response (model: gpt-4o-2024-05-13, system: custom)
# GPT Response (model: gpt-4o, system: custom)
Test completion
# User Response
Expand Down

0 comments on commit c0aefac

Please sign in to comment.