diff --git a/supervisely_integration/train/hyperparameters.yaml b/supervisely_integration/train/hyperparameters.yaml index d52de1ab..82751519 100644 --- a/supervisely_integration/train/hyperparameters.yaml +++ b/supervisely_integration/train/hyperparameters.yaml @@ -1,34 +1,67 @@ -general: - num_epochs: 20 # Total number of training epochs +HybridEncoder: + eval_spatial_size: + - 640 + - 640 +RTDETRTransformer: + eval_spatial_size: + - 640 + - 640 -data: # Section for data-related parameters - input_size: [640, 640] # Input image size (width, height) - train_batch_size: 2 # Batch size for training - val_batch_size: 1 # Batch size for validation - val_interval: 1 # Interval for running validation - -checkpoint: - interval: 1 # Interval for saving checkpoints - keep_checkpoints: true # Whether to keep saved checkpoints - max_checkpoints: 3 # Maximum number of checkpoints to retain - save_last: true # Save the last checkpoint after training - save_best: true # Save the best-performing checkpoint based on validation - save_optimizer_state: false # Save the optimizer state in checkpoints +checkpoint_step: 1 +clip_max_norm: -1 +ema: + decay: 0.9999 + type: ModelEMA + warmups: 2000 +epoches: 1 +lr_scheduler: null +lr_warmup: + end_factor: 1.0 + start_factor: 0.001 + total_iters: 25 + type: LinearLR optimizer: - frozen_stages_override: false # Override frozen stages if applicable - type: "AdamW" # Type of optimizer to use - learning_rate: 0.0001 # Initial learning rate - weight_decay: 0.0001 # Weight decay (L2 regularization factor) - clip_grad_norm: true # Enable gradient clipping - grad_norm_value: 0.1 # Maximum norm for gradient clipping + betas: + - 0.9 + - 0.999 + lr: 0.0002 + type: Adam + weight_decay: 0.0001 +remap_mscoco_category: false +save_ema: false +save_optimizer: false +# general: +# num_epochs: 20 # Total number of training epochs + +# data: # Section for data-related parameters +# input_size: [640, 640] # Input image size (width, height) +# train_batch_size: 2 # Batch size for training +# val_batch_size: 1 # Batch size for validation +# val_interval: 1 # Interval for running validation + +# checkpoint: +# interval: 1 # Interval for saving checkpoints +# keep_checkpoints: true # Whether to keep saved checkpoints +# max_checkpoints: 3 # Maximum number of checkpoints to retain +# save_last: true # Save the last checkpoint after training +# save_best: true # Save the best-performing checkpoint based on validation +# save_optimizer_state: false # Save the optimizer state in checkpoints + +# optimizer: +# frozen_stages_override: false # Override frozen stages if applicable +# type: "AdamW" # Type of optimizer to use +# learning_rate: 0.0001 # Initial learning rate +# weight_decay: 0.0001 # Weight decay (L2 regularization factor) +# clip_grad_norm: true # Enable gradient clipping +# grad_norm_value: 0.1 # Maximum norm for gradient clipping -lr_scheduler: - type: null # Type of learning rate scheduler - by_epoch: true # Schedule learning rate by epochs rather than steps - warmup: - enabled: true # Enable warmup phase for learning rate - steps: 1 # Number of warmup steps - ratio: 0.001 # Starting learning rate ratio for warmup - start_factor: 0.001 # Starting learning rate factor for warmup - end_factor: 1.0 # Ending learning rate factor for warmup +# lr_scheduler: +# type: null # Type of learning rate scheduler +# by_epoch: true # Schedule learning rate by epochs rather than steps +# warmup: +# enabled: true # Enable warmup phase for learning rate +# steps: 1 # Number of warmup steps +# ratio: 0.001 # Starting learning rate ratio for warmup +# start_factor: 0.001 # Starting learning rate factor for warmup +# end_factor: 1.0 # Ending learning rate factor for warmup diff --git a/supervisely_integration/train/main.py b/supervisely_integration/train/main.py index a3e21b70..1035d819 100644 --- a/supervisely_integration/train/main.py +++ b/supervisely_integration/train/main.py @@ -30,6 +30,7 @@ default_config_path = os.path.join(config_paths_dir, "placeholder.yml") app_options = { + "use_coco_annotation": True, "save_best_checkpoint": True, "save_last_checkpoint": True, "supported_train_modes": ["finetune", "scratch"], @@ -74,7 +75,9 @@ current_file_dir = os.path.dirname(os.path.abspath(__file__)) output_dir = os.path.join(current_file_dir, "output") -train = TrainApp(output_dir, models_path, hyperparameters_path, app_options) +train = TrainApp(models_path, hyperparameters_path, app_options, output_dir) + +train @train.start @@ -82,6 +85,12 @@ def start_training(): print("-----------------") print("Start training") print("-----------------") + + # Step 1. convert to COCO format + # sly.xxx.convert_to_coco() + # Step 2. prepare config.yml (hyperparameters + custom config) + # Step 3. train + import rtdetr_pytorch.train as train_cli custom_config_path = os.path.join(config_paths_dir, "custom.yml")