Skip to content

Commit

Permalink
Merge pull request #42 from microsoft/0.1.3.6
Browse files Browse the repository at this point in the history
Update to v0.1.3.6
  • Loading branch information
chinganc authored Feb 21, 2025
2 parents 62a2747 + 3c3478f commit feb6748
Show file tree
Hide file tree
Showing 6 changed files with 65 additions and 20 deletions.
34 changes: 28 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -274,17 +274,38 @@ with TraceGraph coming soon).
## LLM API Setup

Currently we rely on [LiteLLM](https://github.com/BerriAI/litellm) or [AutoGen v0.2](https://github.com/microsoft/autogen/tree/0.2) for LLM caching and API-Key management.
By default, LiteLLM is used. To use it, set the keys as the environment variables, e.g.

By default, LiteLLM is used. To change the default backend, set the environment variable `TRACE_DEFAULT_LLM_BACKEND` on terminal
```bash
export TRACE_DEFAULT_LLM_BACKEND="<your LLM backend here>" # 'LiteLLM' or 'AutoGen`
```
or in python before importing `opto`
```python
import os
os.environ["TRACE_DEFAULT_LLM_BACKEND"] = "<your LLM backend here>" # 'LiteLLM' or 'AutoGen`
import opto
```



### Using LiteLLM as Backend

Set the keys as the environment variables, following the [documentation of LiteLLM](https://docs.litellm.ai/docs/providers). For example,

```python
import os
os.environ["OPENAI_API_KEY"] = "your-openai-key"
os.environ["ANTHROPIC_API_KEY"] = "your-anthropic-key"
os.environ["OPENAI_API_KEY"] = "<your OpenAI API key here>"
os.environ["ANTHROPIC_API_KEY"] = "<your Anthropic API key here>"
```
In Trace, we add another environment variable `TRACE_LITELLM_MODEL` to set the default model name used by LiteLLM for convenience, e.g.,
```bash
export TRACE_LITELLM_MODEL='gpt-4o'
```
will set all LLM instances in Trace to use `gpt-4o` by default.

Please see the [documentation of LiteLLM](https://docs.litellm.ai/docs/providers) for more details on setting keys and end-point urls.

On the other hand, to use AutoGen, install Trace with autogen flag, `pip install trace-opt[autogen]`. AutoGen relies on `OAI_CONFIG_LIST`, which is a file you put in your working directory. It has the format of:
### Using AutoGen as Backend
First install Trace with autogen flag, `pip install trace-opt[autogen]`. AutoGen relies on `OAI_CONFIG_LIST`, which is a file you put in your working directory. It has the format of:

```json lines
[
Expand All @@ -298,7 +319,8 @@ On the other hand, to use AutoGen, install Trace with autogen flag, `pip install
}
]
```
You switch between different LLM models by changing the `model` field in this configuration file.
You can switch between different LLM models by changing the `model` field in this configuration file.
Note AutoGen by default will use the first model available in this config file.

You can also set an `os.environ` variable `OAI_CONFIG_LIST` to point to the location of this file or directly set a JSON string as the value of this variable.

Expand Down
4 changes: 2 additions & 2 deletions opto/trace/iterators.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@
from opto.trace.bundle import bundle
import opto.trace.operators as ops
from opto.trace.errors import ExecutionError

import numpy as np

# List[Nodes], Node[List]
def iterate(x: Any):
"""Return an iterator object for node of list, tuple, set, or dict."""
if not isinstance(x, Node):
x = node(x)
if issubclass(x.type, list) or issubclass(x.type, tuple) or issubclass(x.type, str):
if issubclass(x.type, list) or issubclass(x.type, tuple) or issubclass(x.type, str) or issubclass(x.type, np.ndarray):
return SeqIterable(x)
elif issubclass(x.type, set):
converted_list = ops.to_list(x)
Expand Down
30 changes: 23 additions & 7 deletions opto/utils/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import litellm
import os
import openai
import warnings

try:
import autogen # We import autogen here to avoid the need of installing autogen
Expand Down Expand Up @@ -153,15 +154,19 @@ class LiteLLM(AbstractModel):
To use this, set the credentials through the environment variable as
instructed in the LiteLLM documentation. For convenience, you can set the
default model name through the environment variable DEFAULT_LITELLM_MODEL.
default model name through the environment variable TRACE_LITELLM_MODEL.
When using Azure models via token provider, you can set the Azure token
provider scope through the environment variable AZURE_TOKEN_PROVIDER_SCOPE.
"""

def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None,
cache=True) -> None:
if model is None:
model = os.environ.get('DEFAULT_LITELLM_MODEL', 'gpt-4o')
model = os.environ.get('TRACE_LITELLM_MODEL')
if model is None:
warnings.warn("TRACE_LITELLM_MODEL environment variable is not found when loading the default model for LiteLLM. Attempt to load the default model from DEFAULT_LITELLM_MODEL environment variable. The usage of DEFAULT_LITELLM_MODEL will be deprecated. Please use the environment variable TRACE_LITELLM_MODEL for setting the default model name for LiteLLM.")
model = os.environ.get('DEFAULT_LITELLM_MODEL', 'gpt-4o')

self.model_name = model
self.cache = cache
factory = lambda: self._factory(self.model_name) # an LLM instance uses a fixed model
Expand Down Expand Up @@ -198,9 +203,9 @@ class CustomLLM(AbstractModel):
def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None,
cache=True) -> None:
if model is None:
model = os.environ.get('DEFAULT_LITELLM_CUSTOM_MODEL', 'gpt-4o')
base_url = os.environ.get('DEFAULT_LITELLM_CUSTOM_URL', 'http://xx.xx.xxx.xx:4000')
server_api_key = os.environ.get('DEFAULT_LITELLM_CUSTOM_API',
model = os.environ.get('TRACE_CUSTOMLLM_MODEL', 'gpt-4o')
base_url = os.environ.get('TRACE_CUSTOMLLM_URL', 'http://xx.xx.xxx.xx:4000')
server_api_key = os.environ.get('TRACE_CUSTOMLLM_API_KEY',
'sk-Xhg...') # we assume the server has an API key
# the server API is set through `master_key` in `config.yaml` for LiteLLM proxy server

Expand All @@ -224,5 +229,16 @@ def create(self, **config: Any):
return self._model.chat.completions.create(**config)


# Set Default LLM class
LLM = LiteLLM # synonym

TRACE_DEFAULT_LLM_BACKEND = os.getenv('TRACE_DEFAULT_LLM_BACKEND', 'LiteLLM')
if TRACE_DEFAULT_LLM_BACKEND == 'AutoGen':
print("Using AutoGen as the default LLM backend.")
LLM = AutoGenLLM
elif TRACE_DEFAULT_LLM_BACKEND == 'CustomLLM':
print("Using CustomLLM as the default LLM backend.")
LLM = CustomLLM
elif TRACE_DEFAULT_LLM_BACKEND == 'LiteLLM':
print("Using LiteLLM as the default LLM backend.")
LLM = LiteLLM
else:
raise ValueError(f"Unknown LLM backend: {TRACE_DEFAULT_LLM_BACKEND}")
2 changes: 1 addition & 1 deletion tests/unit_tests/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from opto.optimizers.utils import print_color
import os

if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("DEFAULT_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
llm = LLM()
system_prompt = 'You are a helpful assistant.'
user_prompt = "Hello world."
Expand Down
11 changes: 9 additions & 2 deletions tests/unit_tests/test_nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from opto.trace import node
from opto.trace import operators as ops
from opto.trace.utils import contain

import numpy as np

# Sum of str
x = node("NodeX")
Expand Down Expand Up @@ -151,4 +151,11 @@ def fun(x):
assert x.description == "[ParameterNode] x"

x = node(1, trainable=True)
assert x.description == "[ParameterNode] This is a ParameterNode in a computational graph."
assert x.description == "[ParameterNode] This is a ParameterNode in a computational graph."


# Test iterating numpy array
x = node(np.array([1, 2, 3]))
for i, v in enumerate(x):
assert isinstance(v, type(x))
assert v.data == x.data[i]
4 changes: 2 additions & 2 deletions tests/unit_tests/test_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def user(x):
else:
return "Success."

if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("DEFAULT_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
# One-step optimization example
x = node(-1.0, trainable=True)
optimizer = OptoPrime([x])
Expand Down Expand Up @@ -124,7 +124,7 @@ def foobar_text(x):
GRAPH.clear()
x = node("negative point one", trainable=True)

if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("DEFAULT_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
optimizer = OptoPrime([x])
output = foobar_text(x)
feedback = user(output.data)
Expand Down

0 comments on commit feb6748

Please sign in to comment.