-
Notifications
You must be signed in to change notification settings - Fork 352
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
83ccb65
commit 0aec777
Showing
246 changed files
with
3,893,037 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,140 @@ | ||
# -*- coding: utf-8 -*- | ||
""" | ||
This module provides the FinetuneDialogAgent class, | ||
which extends DialogAgent to enhance fine-tuning | ||
capabilities with custom hyperparameters. | ||
""" | ||
from typing import Any, Optional, Dict | ||
|
||
from loguru import logger | ||
|
||
from agentscope.agents import DialogAgent | ||
|
||
|
||
class FinetuneDialogAgent(DialogAgent): | ||
""" | ||
A dialog agent capable of fine-tuning its | ||
underlying model based on provided data. | ||
Inherits from DialogAgent and adds functionality for | ||
fine-tuning with custom hyperparameters. | ||
""" | ||
|
||
def __init__( | ||
self, | ||
name: str, | ||
sys_prompt: str, | ||
model_config_name: str, | ||
use_memory: bool = True, | ||
memory_config: Optional[dict] = None, | ||
): | ||
""" | ||
Initializes a new FinetuneDialogAgent with specified configuration. | ||
Arguments: | ||
name (str): Name of the agent. | ||
sys_prompt (str): System prompt or description of the agent's role. | ||
model_config_name (str): The configuration name for | ||
the underlying model. | ||
use_memory (bool, optional): Indicates whether to utilize | ||
memory features. Defaults to True. | ||
memory_config (dict, optional): Configuration for memory | ||
functionalities if | ||
`use_memory` is True. | ||
Note: | ||
Refer to `class DialogAgent(AgentBase)` for more information. | ||
""" | ||
|
||
super().__init__( | ||
name, | ||
sys_prompt, | ||
model_config_name, | ||
use_memory, | ||
memory_config, | ||
) | ||
|
||
def load_model( | ||
self, | ||
pretrained_model_name_or_path: Optional[str] = None, | ||
local_model_path: Optional[str] = None, | ||
) -> None: | ||
""" | ||
Load a new model into the agent. | ||
Arguments: | ||
pretrained_model_name_or_path (str): The Hugging Face | ||
model ID or a custom identifier. | ||
Needed if loading model from Hugging Face. | ||
local_model_path (str, optional): Path to a locally saved model. | ||
Raises: | ||
Exception: If the model loading process fails or if the | ||
model wrapper does not support dynamic loading. | ||
""" | ||
|
||
if hasattr(self.model, "load_model"): | ||
self.model.load_model( | ||
pretrained_model_name_or_path, | ||
local_model_path, | ||
) | ||
else: | ||
logger.error( | ||
"The model wrapper does not support dynamic model loading.", | ||
) | ||
|
||
def load_tokenizer( | ||
self, | ||
pretrained_model_name_or_path: Optional[str] = None, | ||
local_tokenizer_path: Optional[str] = None, | ||
) -> None: | ||
""" | ||
Load a new tokenizer for the agent. | ||
Arguments: | ||
pretrained_model_name_or_path (str): The Hugging Face model | ||
ID or a custom identifier. | ||
Needed if loading tokenizer from Hugging Face. | ||
local_tokenizer_path (str, optional): Path to a locally saved | ||
tokenizer. | ||
Raises: | ||
Exception: If the model tokenizer process fails or if the | ||
model wrapper does not support dynamic loading. | ||
""" | ||
|
||
if hasattr(self.model, "load_tokenizer"): | ||
self.model.load_tokenizer( | ||
pretrained_model_name_or_path, | ||
local_tokenizer_path, | ||
) | ||
else: | ||
logger.error("The model wrapper does not support dynamic loading.") | ||
|
||
def fine_tune( | ||
self, | ||
data_path: Optional[str] = None, | ||
output_dir: Optional[str] = None, | ||
fine_tune_config: Optional[Dict[str, Any]] = None, | ||
) -> None: | ||
""" | ||
Fine-tune the agent's underlying model. | ||
Arguments: | ||
data_path (str): The path to the training data. | ||
output_dir (str, optional): User specified path | ||
to save the fine-tuned model | ||
and its tokenizer. By default | ||
save to this example's | ||
directory if not specified. | ||
Raises: | ||
Exception: If fine-tuning fails or if the | ||
model wrapper does not support fine-tuning. | ||
""" | ||
|
||
if hasattr(self.model, "fine_tune"): | ||
self.model.fine_tune(data_path, output_dir, fine_tune_config) | ||
logger.info("Fine-tuning completed successfully.") | ||
else: | ||
logger.error("The model wrapper does not support fine-tuning.") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,74 @@ | ||
# Multi-Agent Conversation with Custom Model Loading and Fine-Tuning in AgentScope | ||
|
||
This example demonstrates how to load and optionally fine-tune a Hugging Face model within a multi-agent conversation setup using AgentScope. The complete code is provided in `agentscope/examples/conversation_with_agent_with_finetuned_model`. | ||
|
||
## Functionality Overview | ||
|
||
Compared to basic conversation setup, this example introduces model loading and fine-tuning features: | ||
|
||
- Initialize an agent or use `dialog_agent.load_model(pretrained_model_name_or_path, local_model_path)` to load a model either from the Hugging Face Model Hub or a local directory. | ||
- Initalize an agent or apply `dialog_agent.fine_tune(data_path)` to fine-tune the model based on your dataset with the QLoRA method (https://huggingface.co/blog/4bit-transformers-bitsandbytes). | ||
|
||
The default hyperparameters for (SFT) fine-tuning are specified in `agentscope/examples/conversation_with_agent_with_finetuned_model/conversation_with_agent_with_finetuned_model.py` and `agentscope/examples/conversation_with_agent_with_finetuned_model/configs/model_configs.json`. For customized hyperparameters, specify them in `model_configs` if the model needs to be fine-tuned at initialization, or specify through `fine_tune_config` in `Finetune_DialogAgent`'s `fine_tune` method after initialization, as shown in the example script `conversation_with_agent_with_finetuned_model.py`. | ||
|
||
## Agent Initialization | ||
|
||
When initializing an agent, the following parameters need specification: | ||
|
||
- `pretrained_model_name_or_path` (str): Identifier for the model on Hugging Face. | ||
- `local_model_path` (str): Local path to the model (defaults to loading from Hugging Face if not provided). | ||
- `data_path` (str): Path to training data (fine-tuning is skipped if not provided). | ||
- `device` (str): The device (e.g., 'cuda', 'cpu') for model operation, defaulting to 'cuda' if available. | ||
- `fine_tune_config` (dict, Optional): A configuration dictionary for fine-tuning the model. It allows specifying hyperparameters and other training options that will be passed to the fine-tuning method. If not provided, default settings will be used. This allows for customization of the fine-tuning process to optimize model performance based on specific requirements. | ||
- `huggingface_token` (from .env file): Token required for models needing authentication from Hugging Face. | ||
|
||
## Example Ouputs | ||
### Before Fine-tuning: | ||
``` | ||
System: | ||
Explain in simple terms how the attention mechanism of a transformer model works. | ||
Assistant: | ||
pessimal answer: Attn explications: Attn is a type of attention mechanism. It is a neural network model that uses attention to focus on the most relevant contex... | ||
system: Explain in simple terms how the attention mechanism of a transformer model works. | ||
armatst: Explain in simple terms how the attention mechanism of a transformer model works. | ||
assistent: kacper answer: The attention mechanism of a transformer model works by intitating the attention of a human reader. It glances at the contex... | ||
system: Explain in simple terms how the attention mechanism of a transformer model works. | ||
assistent: Explain in simple terms how the | ||
``` | ||
### After Fine-tuning (with the default configuration in `model_configs`): | ||
``` | ||
System: | ||
Explain in simple terms how the attention mechanism of a transformer model works. | ||
Assistant: | ||
Sure, the attention mechanism of a transformer model is an important part of the model's ability to generate coherent text. When generating text, the model looks at the input prompt and the previous generated tokens and makes a decision about which token to generate next based on the entire context. | ||
Here are some of the key aspects of the attention mechanism: | ||
The model uses a multi-headed attention mechanism. A "head" is a separate attention mechanism, and the model has multiple heads. | ||
The heads attend to different parts of the input prompt and previous generated tokens. | ||
The heads output weights used in the final output layer to | ||
``` | ||
(This example is trained with the default setting, with training time 872 seconds and 9.914 GB gpu memory cost. Reduce training batch size can reduce the memory required. Note that the model is loaded in 4 bits (i.e., QLoRA)). | ||
|
||
## Tested Models | ||
|
||
The example is tested using specific Hugging Face model `google/gemma-7b` on dataset `GAIR/lima`. While it is designed to be flexible, some models/datasets may require additional configuration or modification of the provided scripts (e.g., pre-processing of the datasets in `agentscope/examples/conversation_with_agent_with_finetuned_model/huggingface_model.py`). | ||
|
||
## Prerequisites | ||
|
||
Before running this example, ensure you have installed the following packages: | ||
|
||
- `transformers` | ||
- `python-dotenv` | ||
- `datasets` | ||
- `trl` | ||
- `bitsandbytes` | ||
|
||
Additionally, set `HUGGINGFACE_TOKEN` in the `agentscope/examples/conversation_with_agent_with_finetuned_model/.env`. | ||
|
||
```bash | ||
python conversation_with_agent_with_finetuned_model.py |
Large diffs are not rendered by default.
Oops, something went wrong.
1 change: 1 addition & 0 deletions
1
...small_llms_nscc/ToolBenchCaller/alpha-umi-caller-13b_2024-06-06_01-19-10_log_history.json
Large diffs are not rendered by default.
Oops, something went wrong.
1 change: 1 addition & 0 deletions
1
...small_llms_nscc/ToolBenchCaller/alpha-umi-caller-13b_2024-06-07_00-59-18_log_history.json
Large diffs are not rendered by default.
Oops, something went wrong.
Oops, something went wrong.