Skip to content

Commit

Permalink
chore: Support Gemini 2.0 flash thinking and pro (#1558)
Browse files Browse the repository at this point in the history
  • Loading branch information
Wendong-Fan authored Feb 5, 2025
1 parent bc46c44 commit d8fbe5d
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 32 deletions.
12 changes: 9 additions & 3 deletions camel/types/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,11 @@ class ModelType(UnifiedModelType, Enum):

# Gemini models
GEMINI_2_0_FLASH = "gemini-2.0-flash-exp"
GEMINI_2_0_FLASH_THINKING = "gemini-2.0-flash-thinking-exp"
GEMINI_2_0_PRO_EXP = "gemini-2.0-pro-exp-02-05"
GEMINI_2_0_FLASH_LITE_PREVIEW = "gemini-2.0-flash-lite-preview-02-05"
GEMINI_1_5_FLASH = "gemini-1.5-flash"
GEMINI_1_5_PRO = "gemini-1.5-pro"
GEMINI_EXP_1114 = "gemini-exp-1114"

# Mistral AI models
MISTRAL_3B = "ministral-3b-latest"
Expand Down Expand Up @@ -380,7 +382,9 @@ def is_gemini(self) -> bool:
ModelType.GEMINI_2_0_FLASH,
ModelType.GEMINI_1_5_FLASH,
ModelType.GEMINI_1_5_PRO,
ModelType.GEMINI_EXP_1114,
ModelType.GEMINI_2_0_FLASH_THINKING,
ModelType.GEMINI_2_0_PRO_EXP,
ModelType.GEMINI_2_0_FLASH_LITE_PREVIEW,
}

@property
Expand Down Expand Up @@ -662,7 +666,9 @@ def token_limit(self) -> int:
ModelType.GEMINI_2_0_FLASH,
ModelType.GEMINI_1_5_FLASH,
ModelType.GEMINI_1_5_PRO,
ModelType.GEMINI_EXP_1114, # Not given in docs, assuming the same
ModelType.GEMINI_2_0_FLASH_THINKING,
ModelType.GEMINI_2_0_FLASH_LITE_PREVIEW,
ModelType.GEMINI_2_0_PRO_EXP, # Not given in doc, assume the same
ModelType.GLM_4_LONG,
}:
return 1_048_576
Expand Down
75 changes: 47 additions & 28 deletions examples/models/gemini_model_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,50 +44,69 @@
===============================================================================
'''


# Example of using the Gemini-Exp-1114 model
model_exp = ModelFactory.create(
# Example of using the gemini-2.0-flash-exp model
model_2_0_flash = ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_EXP_1114,
model_type=ModelType.GEMINI_2_0_FLASH,
model_config_dict=GeminiConfig(temperature=0.2).as_dict(),
)
camel_agent_exp = ChatAgent(system_message=sys_msg, model=model_exp)
response_exp = camel_agent_exp.step(user_msg)
print(response_exp.msgs[0].content)
camel_agent_flash = ChatAgent(system_message=sys_msg, model=model_2_0_flash)
response_flash = camel_agent_flash.step(user_msg)
print(response_flash.msgs[0].content)

'''
===============================================================================
Hi CAMEL AI! It's great to connect with you, an open-source community
dedicated to the fascinating study of autonomous and communicative agents.
Hello! I'm happy to say hi to CAMEL AI, one open-source community dedicated to
the study of autonomous and communicative agents. It sounds like a fascinating
community!
===============================================================================
'''

Your work sounds incredibly exciting and important. The potential of
autonomous agents to collaborate and communicate effectively is truly
transformative. I'm eager to see the advancements and breakthroughs that come
from your community.
# Example of using the gemini-2.0-flash-thinking model
model_2_0_flash_thinking = ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_0_FLASH_THINKING,
model_config_dict=GeminiConfig(temperature=0.2).as_dict(),
)
camel_agent_thinking = ChatAgent(
system_message=sys_msg, model=model_2_0_flash_thinking
)
response_thinking = camel_agent_thinking.step(
"How many rs are there in 'starrary'?"
)
print(response_thinking.msgs[0].content)
'''
===============================================================================
Let's count them out!
Keep up the fantastic work! If there's anything I can assist with, please
don't hesitate to ask. Perhaps I can help with brainstorming ideas,
summarizing information, or even generating creative content related to your
research.
s - no r
t - no r
a - no r
r - yes, that's one!
r - yes, that's two!
a - no r
r - yes, that's three!
y - no r
Let me know how I can be of service!
There are **three** rs in "starrary".
===============================================================================
'''

# Example of using the gemini-2.0-flash-exp model
model_2_0_flash = ModelFactory.create(

# Example of using the gemini-2.0-pro model
model_2_0_pro = ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_0_FLASH,
model_type=ModelType.GEMINI_2_0_PRO_EXP,
model_config_dict=GeminiConfig(temperature=0.2).as_dict(),
)
camel_agent_exp = ChatAgent(system_message=sys_msg, model=model_2_0_flash)
response_exp = camel_agent_exp.step(user_msg)
print(response_exp.msgs[0].content)

camel_agent_pro = ChatAgent(system_message=sys_msg, model=model_2_0_pro)
response_pro = camel_agent_pro.step(user_msg)
print(response_pro.msgs[0].content)
'''
===============================================================================
Hello! I'm happy to say hi to CAMEL AI, one open-source community dedicated to
the study of autonomous and communicative agents. It sounds like a fascinating
community!
Hello CAMEL AI! It's great to connect with an open-source community focused on
the exciting field of autonomous and communicative agents. I'm very interested
in learning more about your work and contributions to this area of research.
Best of luck with your endeavors!
===============================================================================
'''
4 changes: 3 additions & 1 deletion test/models/test_gemini_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@
ModelType.GEMINI_2_0_FLASH,
ModelType.GEMINI_1_5_FLASH,
ModelType.GEMINI_1_5_PRO,
ModelType.GEMINI_EXP_1114,
ModelType.GEMINI_2_0_FLASH_THINKING,
ModelType.GEMINI_2_0_FLASH_LITE_PREVIEW,
ModelType.GEMINI_2_0_PRO_EXP,
],
)
def test_gemini_model(model_type: ModelType):
Expand Down

0 comments on commit d8fbe5d

Please sign in to comment.