Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Make model configurable #6

Merged
merged 1 commit into from
Jan 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 16 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,15 @@ languages.
- **User-Friendly CLI**: Simple and intuitive command-line interface
for easy usage.
- **Markdown Reports**: Outputs code reviews in markdown for flexible viewing
and integration.
and integration.
- **Configurable Model**: Use the default OpenAI model, or choose another

![Screenshot](./assets/cadre-screenshot.png)

## Review Details

Each diff file is reviewed in isolation. Code reviews are broken into several sections:
Each diff file is reviewed in isolation. Code reviews are broken into several
sections:

- Technical Accuracy
- Best Practices
Expand Down Expand Up @@ -81,15 +83,27 @@ or
```bash
# Run the application with a GitHub pull request URL and API key
./cadre --url=https://github.com/user/repo/pull/123 --key=your_api_key
./cadre --url=https://github.com/a/repo/pull/123 --model gpt-3.5-turbo-instruct

```

### Command-Line Switches

- `--url`: The GitHub pull request URL. Example: `--url=https://github.com/user/repo/pull/123`
- `--key`: Your OpenAI API key. You can also set this using the `OPENAI_API_KEY`
environment variable. Example: `--key=your_api_key`
- `--model`: You can specify the (OpenAI Model)[https://platform.openai.
com/docs/models] to use by passing it here.
- `--help`: Show help information.

## Configuring The Model

You can tell Cadre what OpenAI Model to use by passing it via
the command line argument `--model` or by setting the `CADRE_COMPLETION_MODEL`
environment variable.

The most common models are: `gpt-4` and `gpt-3.5-turbo-1106`

### Running Tests ✔️

```bash
Expand Down
81 changes: 55 additions & 26 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,15 @@ import (
"github.com/mkideal/cli"
)

const (
DefaultFilePerms = 0o644
DefaultOpenAIModel = "gpt-4-1106-preview"
)

type argT struct {
URL string `cli:"*url" usage:"The GitHub pull request URL"`
ApiKey string `cli:"key" env:"OPENAI_API_KEY" usage:"Your OpenAI API key. Leave this blank to use environment variable OPENAI_API_KEY"`
URL string `cli:"*url" usage:"The GitHub pull request URL"`
ApiKey string `cli:"key" env:"OPENAI_API_KEY" usage:"Your OpenAI API key. Leave this blank to use environment variable OPENAI_API_KEY"`
CompletionModel string `cli:"model" env:"CADRE_COMPLETION_MODEL" usage:"The OpenAI API model to use for code reviews."`
}

type ReviewedDiff struct {
Expand Down Expand Up @@ -43,50 +49,68 @@ func main() {
}

fmt.Printf("📡 Getting pull request from GitHub...\n")

parsedDiffFiles, err := processPullRequest(mergedArgs.URL, &GithubDiffClient{})

if err != nil {
return err
}

fmt.Printf("\n⌛ Processing %d diff files. This may take a while...\n\n", len(parsedDiffFiles))

reviews, err := getCodeReviews(parsedDiffFiles, "gpt-4-1106-preview", mergedArgs.ApiKey, &OpenAICompletionService{})
reviews, err := getCodeReviews(
parsedDiffFiles,
argv.CompletionModel,
mergedArgs.ApiKey,
&OpenAICompletionService{},
)
if err != nil {
return err
}

for _, review := range reviews {
saveReviews(reviews)
fmt.Println("Done! 🏁")

if review.Error != nil {
fmt.Printf("⚠️ couldn't get the review for %s: %s\n",
path.Base(review.Diff.FilePathNew),
review.Error,
)
return nil
}))
}

continue
}
func saveReviews(reviews []ReviewedDiff) {
for _, review := range reviews {

filename := path.Base(review.Diff.FilePathNew) + ".md"
if review.Error != nil {
fmt.Printf("⚠️ couldn't get the review for %s: %s\n",
path.Base(review.Diff.FilePathNew),
review.Error,
)

err := saveReviewToFile(filename, review.Review)
continue
}

if err != nil {
fmt.Printf("⚠️ couldn't save the review for %s: %s\n",
filename,
err,
)
filename := path.Base(review.Diff.FilePathNew) + ".md"

// Ensure the directory exists
dir := path.Dir(filename)

// If it doesn't exist, create it
if _, err := os.Stat(dir); os.IsNotExist(err) {
if err := os.MkdirAll(dir, DefaultFilePerms); err != nil {
fmt.Printf("⚠️ couldn't create directory for %s: %s\n", dir, err)

continue
}
}

fmt.Printf("💾 Saved review to %s\n", filename)
// Save the review to disk
err := saveReviewToFile(filename, review.Review)
if err != nil {
fmt.Printf("⚠️ couldn't save the review for %s: %s\n",
filename,
err,
)

continue
}

fmt.Println("Done! 🏁")
return nil
}))
fmt.Printf("💾 Saved review to %s\n", filename)
}
}

func getCodeReviews(diffs []*gh.GitDiff, model, apiKey string, svc CompletionServiceInterface) ([]ReviewedDiff, error) {
Expand Down Expand Up @@ -157,7 +181,7 @@ func saveReviewToFile(filename, reviewContent string) error {
}

// Write the review content to the file
err := os.WriteFile(filename, []byte(reviewContent), 0644)
err := os.WriteFile(filename, []byte(reviewContent), DefaultFilePerms)
if err != nil {
return fmt.Errorf("failed to write review to file: %s", err)
}
Expand All @@ -180,6 +204,11 @@ func coalesceConfiguration(cliArgs *argT) (*argT, error) {
cliArgs.ApiKey = envArgs.ApiKey
}

// If no model is provided, use the default model
if cliArgs.CompletionModel == "" {
cliArgs.CompletionModel = DefaultOpenAIModel
}

return cliArgs, nil
}

Expand Down
15 changes: 15 additions & 0 deletions main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,21 @@ func TestCoalesceConfiguration_Key_CLIOverride(t *testing.T) {
assert.Equal(t, "overridden_key", result.ApiKey)
}

func TestCoalesceConfiguration_Model_CLIOverride(t *testing.T) {
// Set up to restore the value later
currentEnvVar := os.Getenv("CADRE_COMPLETION_MODEL")
defer os.Setenv("CADRE_COMPLETION_MODEL", currentEnvVar)

// Set up an override
err := os.Setenv("CADRE_COMPLETION_MODEL", "gpt-4")
require.NoError(t, err)

args := &argT{CompletionModel: "overridden_model"}
result, _ := coalesceConfiguration(args)

assert.Equal(t, "overridden_model", result.CompletionModel)
}

func TestProcessPullRequest(t *testing.T) {
// Create a mock GitHub client
mockClient := &MockGithubClient{}
Expand Down
Loading