-
Notifications
You must be signed in to change notification settings - Fork 16
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
37 changed files
with
3,330 additions
and
1,034 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
::: rigging.data |
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,31 +1,77 @@ | ||
/* Main color overrides */ | ||
[data-md-color-scheme="slate"] { | ||
--md-primary-fg-color: #EAEAEA; | ||
--md-primary-fg-color--dark: var(--md-primary-fg-color); | ||
--md-accent-fg-color: rgb(149, 133, 227); | ||
|
||
--md-primary-color: #EAEAEA; | ||
--md-primary-bg-color: #191919; | ||
--md-default-bg-color: #191919; | ||
|
||
--md-default-fg-color: hsla(0, 0%, 100%, 0.90); | ||
--md-default-fg-color--light: hsla(0, 0%, 100%, 0.70); | ||
--md-default-fg-color--lighter: hsla(0, 0%, 100%, 0.60); | ||
--md-default-fg-color--lightest: hsla(0, 0%, 100%, 0.40); | ||
|
||
--md-footer-bg-color: hsla(0, 0%, 10%, 0.87); | ||
--md-footer-bg-color--dark: hsla(0, 0%, 8%, 1); | ||
|
||
--md-typeset-a-color: var(--md-accent-fg-color); | ||
|
||
--md-code-hl-number-color: rgb(231, 107, 93); | ||
--md-code-hl-special-color: hsla(340, 83%, 66%, 1); | ||
--md-code-hl-function-color: hsla(291, 57%, 65%, 1); | ||
--md-code-hl-constant-color: hsla(250, 62%, 70%, 1); | ||
--md-code-hl-keyword-color: hsla(219, 66%, 64%, 1); | ||
--md-code-hl-string-color: var(--md-accent-fg-color); | ||
--md-code-hl-name-color: var(--md-default-fg-color--light); | ||
--md-code-hl-operator-color: var(--md-default-fg-color--light); | ||
--md-code-hl-punctuation-color: var(--md-default-fg-color--light); | ||
--md-code-hl-comment-color: rgb(55, 161, 108); | ||
--md-code-hl-generic-color: var(--md-default-fg-color--light); | ||
--md-code-hl-variable-color: var(--md-default-fg-color--light); | ||
--md-default-fg-color: hsla(0, 0%, 100%, 0.90); | ||
--md-default-fg-color--light: hsla(0, 0%, 100%, 0.70); | ||
--md-default-fg-color--lighter: hsla(0, 0%, 100%, 0.60); | ||
--md-default-fg-color--lightest: hsla(0, 0%, 100%, 0.40); | ||
|
||
--md-footer-bg-color: hsla(0, 0%, 10%, 0.87); | ||
--md-footer-bg-color--dark: hsla(0, 0%, 8%, 1); | ||
|
||
--md-typeset-a-color: var(--md-accent-fg-color); | ||
|
||
--md-code-hl-number-color: rgb(231, 107, 93); | ||
--md-code-hl-special-color: hsla(340, 83%, 66%, 1); | ||
--md-code-hl-function-color: hsla(291, 57%, 65%, 1); | ||
--md-code-hl-constant-color: hsla(250, 62%, 70%, 1); | ||
--md-code-hl-keyword-color: hsla(219, 66%, 64%, 1); | ||
--md-code-hl-string-color: var(--md-accent-fg-color); | ||
--md-code-hl-name-color: var(--md-default-fg-color--light); | ||
--md-code-hl-operator-color: var(--md-default-fg-color--light); | ||
--md-code-hl-punctuation-color: var(--md-default-fg-color--light); | ||
--md-code-hl-comment-color: rgb(55, 161, 108); | ||
--md-code-hl-generic-color: var(--md-default-fg-color--light); | ||
--md-code-hl-variable-color: var(--md-default-fg-color--light); | ||
} | ||
|
||
/* Indentation. */ | ||
div.doc-contents:not(.first) { | ||
padding-left: 25px; | ||
border-left: .05rem solid var(--md-typeset-table-color); | ||
} | ||
|
||
/* Mark external links as such. */ | ||
a.external::after, | ||
a.autorefs-external::after { | ||
/* https://primer.style/octicons/arrow-up-right-24 */ | ||
mask-image: url('data:image/svg+xml,<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M18.25 15.5a.75.75 0 00.75-.75v-9a.75.75 0 00-.75-.75h-9a.75.75 0 000 1.5h7.19L6.22 16.72a.75.75 0 101.06 1.06L17.5 7.56v7.19c0 .414.336.75.75.75z"></path></svg>'); | ||
-webkit-mask-image: url('data:image/svg+xml,<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M18.25 15.5a.75.75 0 00.75-.75v-9a.75.75 0 00-.75-.75h-9a.75.75 0 000 1.5h7.19L6.22 16.72a.75.75 0 101.06 1.06L17.5 7.56v7.19c0 .414.336.75.75.75z"></path></svg>'); | ||
content: ' '; | ||
|
||
display: inline-block; | ||
vertical-align: middle; | ||
position: relative; | ||
|
||
height: 1em; | ||
width: 1em; | ||
background-color: currentColor; | ||
} | ||
|
||
a.external:hover::after, | ||
a.autorefs-external:hover::after { | ||
background-color: var(--md-accent-fg-color); | ||
} | ||
|
||
/* Fancier color for operators such as * and |. */ | ||
.doc-signature .o { | ||
color: var(--md-code-hl-special-color); | ||
} | ||
|
||
/* Fancier color for constants such as None, True, and False. */ | ||
.doc-signature .kc { | ||
color: var(--md-code-hl-constant-color); | ||
} | ||
|
||
/* Fancier color for built-in types (only useful when cross-references are used). */ | ||
.doc-signature .n>a[href^="https://docs.python.org/"][href*="/functions.html#"], | ||
.doc-signature .n>a[href^="https://docs.python.org/"][href*="/stdtypes.html#"] { | ||
color: var(--md-code-hl-constant-color); | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
!!! info | ||
|
||
This page is under construction. | ||
|
||
# Agents | ||
|
||
Building agents in Rigging is a straightforward process. You allow to model to emit a structured model for | ||
a variety of actions you wish to support, and loop over those generation steps executing code as the | ||
actions are selected. | ||
|
||
The first instinct might be to use [tools][topics/tools.md] for this process, but this might abstract too much | ||
control away from the generation process. | ||
|
||
## Basic Example | ||
|
||
todo. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,177 @@ | ||
# Async and Batching | ||
|
||
Rigging has good support for handling async generation and large batching of requests. How efficiently | ||
these mechanisms operates is dependent on the underlying generator that's being used, but Rigging has | ||
been developed with scale in mind. | ||
|
||
## Multiple Generations | ||
|
||
The [`.run_many`][rigging.chat.PendingChat.run_many] and [`.arun_many`][rigging.chat.PendingChat.arun_many] functions | ||
let you take the same inputs and generation parameters, and simply run the generation multiple times. | ||
|
||
=== "Run Many Code" | ||
|
||
```py | ||
import rigging as rg | ||
|
||
def check_animal(chats: list[rg.Chat]) -> list[rg.Chat]: | ||
return [ | ||
chat.continue_(f"Why did you pick that animal?").meta(questioned=True).run() | ||
if any(a in chat.last.content.lower() for a in ["cat", "dog", "cow", "mouse"]) | ||
else chat | ||
for chat in chats | ||
] | ||
|
||
chats = ( | ||
rg.get_generator("gpt-3.5-turbo") | ||
.chat("Tell me a joke about an animal.") | ||
.map(check_animal) | ||
.run_many(3) | ||
) | ||
|
||
for i, chat in enumerate(chats): | ||
questioned = chat.metadata.get("questioned", False) | ||
print(f"--- Chat {i+1} (?: {questioned}) ---") | ||
print(chat.conversation) | ||
print() | ||
``` | ||
|
||
=== "Output" | ||
|
||
``` | ||
--- Chat 1 (?: False) --- | ||
[user]: Tell me a joke about an animal. | ||
|
||
[assistant]: Why did the spider go to the computer? | ||
|
||
To check his website! | ||
|
||
--- Chat 2 (?: False) --- | ||
[user]: Tell me a joke about an animal. | ||
|
||
[assistant]: Why did the chicken join a band? Because it had the drumsticks! | ||
|
||
--- Chat 3 (?: True) --- | ||
[user]: Tell me a joke about an animal. | ||
|
||
[assistant]: Why don't elephants use computers? | ||
|
||
Because they're afraid of the mouse! | ||
|
||
[user]: Why did you pick that animal? | ||
|
||
[assistant]: I chose an elephant because they are known for their intelligence and gentle nature, making them a popular subject for jokes and humorous anecdotes. Plus, imagining an elephant trying to use a computer and being scared of a tiny mouse is a funny visual image! | ||
``` | ||
|
||
## Batching Inputs | ||
|
||
You can use the [`.run_batch`][rigging.chat.PendingChat.run_batch] and [`.arun_batch`][rigging.chat.PendingChat.arun_batch] | ||
functions to batch accross a set of inputs and collect all the chats. As processing proceeds with things like | ||
[`.then`][rigging.chat.PendingChat.then] or [`.until_parsed_as`][rigging.chat.PendingChat.until_parsed_as], that chats | ||
will resolve individually and collapse into the final results. | ||
|
||
=== "Batching Inputs Code" | ||
|
||
```py | ||
import rigging as rg | ||
from rigging.model import CommaDelimitedAnswer | ||
|
||
pending = ( | ||
rg.get_generator('gpt-3.5-turbo') | ||
.chat({ | ||
"role": "system", | ||
"content": f"Always respond with {CommaDelimitedAnswer.xml_tags()} tags."} | ||
) | ||
.until_parsed_as(CommaDelimitedAnswer, attempt_recovery=True) | ||
) | ||
|
||
many = [f"Give me 3 famous {thing}" for thing in ["authors", "painters", "musicians", "hackers"]] | ||
|
||
chats = await pending.arun_batch(many, skip_failed=True) | ||
|
||
for i, chat in enumerate(chats): | ||
print(f"--- Chat {i+1} ({len(chat)}) ---") | ||
print(chat.last.parse(CommaDelimitedAnswer).items) | ||
print() | ||
``` | ||
|
||
=== "Outputs" | ||
|
||
``` | ||
--- Chat 1 (2) --- | ||
['Leonardo da Vinci', 'Vincent van Gogh', 'Pablo Picasso'] | ||
|
||
--- Chat 2 (2) --- | ||
['Michael Jackson', 'Beyoncé', 'The Beatles'] | ||
``` | ||
|
||
!!! tip "Skipping failed results" | ||
|
||
Passing `skip_failed=True` to [`.run_batch`][rigging.chat.PendingChat.run_batch] will cause the function to | ||
ignore any parsing errors like [`ExhaustedMaxRoundsError`][rigging.error.ExhaustedMaxRoundsError] and only | ||
return the chats that were successful. | ||
|
||
|
||
## Batching Parameters | ||
|
||
In addition to batching against input messages or strings, you can fix a single input | ||
and build a batch accross a set of generation parameters. The inputs to | ||
[`.run_batch`][rigging.chat.PendingChat.run_batch] and [`.arun_batch`][rigging.chat.PendingChat.arun_batch] | ||
will scale either the generate parameters or the input messages if either is a single item. | ||
|
||
=== "Batching Code" | ||
|
||
```py | ||
import rigging as rg | ||
|
||
pending = rg.get_generator("gpt-3.5-turbo").chat() | ||
|
||
chats = await pending.arun_batch( | ||
["Tell me a short fact about an japanese city."], | ||
[rg.GenerateParams(temperature=t) for t in [0.6, 0.9, 1.2, 1.5, 1.8]] | ||
) | ||
|
||
for i, chat in enumerate(chats): | ||
print(f"--- Chat {i+1} ---") | ||
print(chat.generator_id) | ||
print() | ||
print(chat.conversation) | ||
print() | ||
``` | ||
|
||
=== "Outputs" | ||
|
||
``` | ||
--- Chat 1 --- | ||
litellm!gpt-3.5-turbo,temperature=0.6 | ||
|
||
[assistant]: Tokyo, the capital city of Japan, is the most populous | ||
metropolitan area in the world, with over 37 million residents. | ||
|
||
--- Chat 2 --- | ||
litellm!gpt-3.5-turbo,temperature=0.9 | ||
|
||
[assistant]: Tokyo is the largest metropolitan area in the world, | ||
with a population of over 37 million people. | ||
|
||
--- Chat 3 --- | ||
litellm!gpt-3.5-turbo,temperature=1.2 | ||
|
||
[assistant]: Kyoto, a city in Japan known for its historic temples | ||
and gardens, was once the capital of Japan for over 1,000 years from | ||
794 until the capital was moved to Tokyo in 1869. | ||
|
||
--- Chat 4 --- | ||
litellm!gpt-3.5-turbo,temperature=1.5 | ||
|
||
[assistant]: Nagoya, Japan is known for being one of the leading | ||
manufacturing and industrial regions in the country, with a strong | ||
automotive presence including major factories for Toyota, Honda, and Mitsubishi. | ||
|
||
--- Chat 5 --- | ||
litellm!gpt-3.5-turbo,temperature=1.8 | ||
|
||
[assistant]: Sendai is the largest city in the Tohoku region of | ||
Japan and is known for its incredible natural scenery, such as the | ||
nearby Sendai Bay and Zuihoden mausoleum. | ||
``` |
Oops, something went wrong.