Skip to content

Commit

Permalink
format code
Browse files Browse the repository at this point in the history
  • Loading branch information
raywhoelse committed Jul 17, 2024
1 parent 755aead commit 1b357bd
Show file tree
Hide file tree
Showing 6 changed files with 67 additions and 59 deletions.
21 changes: 6 additions & 15 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,25 +23,16 @@
from camel.agents.base import BaseAgent
from camel.configs import ChatGPTConfig
from camel.configs.gemini_config import GeminiConfig
from camel.toolkits import OpenAIFunction
from camel.memories import (
AgentMemory,
ChatHistoryMemory,
MemoryRecord,
ScoreBasedContextCreator,
)

from vertexai.generative_models import (
Content,
FunctionDeclaration,
GenerativeModel,
Part,
Tool,
)

from camel.messages import BaseMessage, FunctionCallingMessage, OpenAIMessage
from camel.models import BaseModelBackend, ModelFactory
from camel.responses import ChatAgentResponse
from camel.toolkits import OpenAIFunction
from camel.types import (
ChatCompletion,
ChatCompletionChunk,
Expand Down Expand Up @@ -498,7 +489,7 @@ async def step_async(
tool_calls = self._add_tools_for_func_call(
response, tool_calls
)

# Function calling disabled or not a function calling
info = self._step_get_info(
output_messages,
Expand Down Expand Up @@ -537,7 +528,7 @@ def _add_tools_for_func_call(
function calls.
Returns:
List[FunctionCallingRecord]: The updated list of function call
List[FunctionCallingRecord]: The updated list of function call
records.
"""
# Do function calling
Expand All @@ -554,10 +545,10 @@ def _add_tools_for_func_call(
return tool_calls

def _structured_output_openai_response(self, output_schema: BaseModel):
r"""Handles the structured output response
r"""Handles the structured output response
for OpenAI.
This method processes the given output schema and integrates the
This method processes the given output schema and integrates the
resulting function
into the tools for the OpenAI model configuration.
Expand Down Expand Up @@ -800,7 +791,7 @@ def step_tool_call(
args = json.loads(args_str)

try:
# If the func name is return_json_format_response,
# If the func name is return_json_format_response,
# the result is openai tools function args
if func_name == 'return_json_format_response':
result = args
Expand Down
2 changes: 1 addition & 1 deletion camel/toolkits/openai_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from pydantic import create_model
from pydantic.fields import FieldInfo

from camel.utils import to_pascal, get_pydantic_object_schema
from camel.utils import get_pydantic_object_schema, to_pascal


def _remove_a_key(d: Dict, remove_key: Any) -> None:
Expand Down
10 changes: 4 additions & 6 deletions camel/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,23 +18,21 @@
create_chunks,
dependencies_required,
download_tasks,
find_and_check_subset,
func_string_to_callable,
get_first_int,
get_prompt_template_key_words,
get_pydantic_major_version,
get_pydantic_object_schema,
get_system_information,
get_task_list,
json_to_function_code,
is_docker_running,
is_subset,
json_to_function_code,
parse_pydantic_model_as_openai_tools_schema,
print_text_animated,
text_extract_from_web,
to_pascal,
find_and_check_subset,
is_subset,
get_pydantic_object_schema,
get_pydantic_major_version,
parse_pydantic_model_as_openai_tools_schema,
)
from .constants import Constants
from .token_counting import (
Expand Down
28 changes: 17 additions & 11 deletions camel/utils/commons.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,7 @@ def to_pascal(snake: str) -> str:
snake.title(),
)


def find_and_check_subset(small, big) -> bool:
r"""
Recursively searches 'big' to find if 'small' is a subset at any level.
Expand Down Expand Up @@ -345,6 +346,7 @@ def find_and_check_subset(small, big) -> bool:
# If no match is found, return False
return False


def is_subset(small, big) -> bool:
r"""
Checks if 'small' is a subset of 'big'.
Expand Down Expand Up @@ -372,8 +374,10 @@ def is_subset(small, big) -> bool:
# For all other types, directly compare the values
return small == big

def parse_pydantic_model_as_openai_tools_schema(pydantic_params: BaseModel) \
-> Dict[str, Any]:

def parse_pydantic_model_as_openai_tools_schema(
pydantic_params: BaseModel,
) -> Dict[str, Any]:
r"""Parse Pydantic model into a JSON schema format.
Args:
Expand All @@ -393,17 +397,17 @@ def parse_pydantic_model_as_openai_tools_schema(pydantic_params: BaseModel) \
"properties": {},
},
"required": [],
}
},
}
pydantic_params_schema = get_pydantic_object_schema(pydantic_params)
source_dict["function"]["parameters"]["properties"] = \
pydantic_params_schema["properties"]
source_dict["function"]["required"] = \
pydantic_params_schema["required"]
source_dict["function"]["parameters"]["properties"] = (
pydantic_params_schema["properties"]
)
source_dict["function"]["required"] = pydantic_params_schema["required"]
return source_dict

def get_pydantic_object_schema(pydantic_params: BaseModel) \
-> Dict[str, Any]:

def get_pydantic_object_schema(pydantic_params: BaseModel) -> Dict[str, Any]:
r"""Get the JSON schema of a Pydantic model.
Args:
Expand All @@ -413,15 +417,16 @@ def get_pydantic_object_schema(pydantic_params: BaseModel) \
dict: The JSON schema of the given Pydantic model.
"""
PYDANTIC_MAJOR_VERSION = get_pydantic_major_version()

if PYDANTIC_MAJOR_VERSION == 2:
if issubclass(pydantic_params, pydantic.BaseModel):
return pydantic_params.model_json_schema()
elif issubclass(pydantic_params, pydantic.v1.BaseModel):
return pydantic_params.schema()

return pydantic_params.schema()


def get_pydantic_major_version() -> int:
r"""Get the major version of Pydantic.
Expand All @@ -433,6 +438,7 @@ def get_pydantic_major_version() -> int:
except ImportError:
return 0


def get_pydantic_object_schema(pydantic_params: BaseModel) -> Dict:
r"""Get the JSON schema of a Pydantic model.
Args:
Expand Down
22 changes: 14 additions & 8 deletions examples/format_output/format_openai_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,23 +12,27 @@
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========

import ast

from pydantic import BaseModel, Field

from camel.agents import ChatAgent
from camel.configs.openai_config import ChatGPTConfig
from camel.messages import BaseMessage
from camel.models import ModelFactory
from camel.types import ModelPlatformType, ModelType
from pydantic import BaseModel, Field
import ast


# Define the output schema
class JokeResponse(BaseModel):
joke: str = Field(description="a joke")
funny_level: str = Field(description="Funny level, from 1 to 10")


# Define system message
assistant_sys_msg = BaseMessage.make_assistant_message(
role_name="Assistant",
content="You are a helpful assistant.",
role_name="Assistant",
content="You are a helpful assistant.",
)

model = ModelFactory.create(
Expand All @@ -38,8 +42,9 @@ class JokeResponse(BaseModel):
)

# Set agent
camel_agent = ChatAgent(assistant_sys_msg, model=model, \
output_schema=JokeResponse)
camel_agent = ChatAgent(
assistant_sys_msg, model=model, output_schema=JokeResponse
)


user_msg = BaseMessage.make_user_message(
Expand All @@ -49,8 +54,9 @@ class JokeResponse(BaseModel):

# Get response information
response = camel_agent.step(user_msg)
json_output_response = ast.literal_eval(response.choices[0].
message.tool_calls[0].function.arguments)
json_output_response = ast.literal_eval(
response.choices[0].message.tool_calls[0].function.arguments
)
print(json_output_response)
"""
===============================================================================
Expand Down
43 changes: 25 additions & 18 deletions examples/structured_response/json_format_reponse_with_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,30 +12,31 @@
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========

from pydantic import BaseModel, Field

from camel.agents import ChatAgent
from camel.configs.openai_config import ChatGPTConfig
from camel.messages import BaseMessage
from camel.models import ModelFactory
from camel.types import ModelPlatformType, ModelType
from pydantic import BaseModel, Field
from camel.toolkits import (
MATH_FUNCS,
SEARCH_FUNCS,

)
from camel.types import ModelPlatformType, ModelType

function_list = [
*MATH_FUNCS,
*SEARCH_FUNCS,
]
*MATH_FUNCS,
*SEARCH_FUNCS,
]
assistant_model_config = ChatGPTConfig(
tools=function_list,
temperature=0.0,
)
tools=function_list,
temperature=0.0,
)

# Define system message
assistant_sys_msg = BaseMessage.make_assistant_message(
role_name="Assistant",
content="You are a helpful assistant.",
role_name="Assistant",
content="You are a helpful assistant.",
)

model = ModelFactory.create(
Expand All @@ -45,20 +46,26 @@
)

# Set agent
camel_agent = ChatAgent(assistant_sys_msg,
model=model,
tools=function_list,)
camel_agent = ChatAgent(
assistant_sys_msg,
model=model,
tools=function_list,
)


# pydantic basemodel as input params format
class Response(BaseModel):
current_age: str = Field(description=" the current age of University of Oxford")
current_age: str = Field(
description=" the current age of University of Oxford"
)
calculated_age: str = Field(description="the add more years of age")


user_msg = BaseMessage.make_user_message(
role_name="User",
content="Assume now is 2024 in the Gregorian calendar, " + \
"estimate the current age of University of Oxford " + \
"and then add 10 more years to this age, "
content="Assume now is 2024 in the Gregorian calendar, "
+ "estimate the current age of University of Oxford "
+ "and then add 10 more years to this age, ",
)

# Get response information
Expand Down

0 comments on commit 1b357bd

Please sign in to comment.