Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
cxnt committed Nov 5, 2024
1 parent 17886ae commit 2590a67
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 31 deletions.
93 changes: 63 additions & 30 deletions supervisely_integration/train/hyperparameters.yaml
Original file line number Diff line number Diff line change
@@ -1,34 +1,67 @@
general:
num_epochs: 20 # Total number of training epochs
HybridEncoder:
eval_spatial_size:
- 640
- 640
RTDETRTransformer:
eval_spatial_size:
- 640
- 640

data: # Section for data-related parameters
input_size: [640, 640] # Input image size (width, height)
train_batch_size: 2 # Batch size for training
val_batch_size: 1 # Batch size for validation
val_interval: 1 # Interval for running validation

checkpoint:
interval: 1 # Interval for saving checkpoints
keep_checkpoints: true # Whether to keep saved checkpoints
max_checkpoints: 3 # Maximum number of checkpoints to retain
save_last: true # Save the last checkpoint after training
save_best: true # Save the best-performing checkpoint based on validation
save_optimizer_state: false # Save the optimizer state in checkpoints
checkpoint_step: 1
clip_max_norm: -1
ema:
decay: 0.9999
type: ModelEMA
warmups: 2000
epoches: 1
lr_scheduler: null
lr_warmup:
end_factor: 1.0
start_factor: 0.001
total_iters: 25
type: LinearLR

optimizer:
frozen_stages_override: false # Override frozen stages if applicable
type: "AdamW" # Type of optimizer to use
learning_rate: 0.0001 # Initial learning rate
weight_decay: 0.0001 # Weight decay (L2 regularization factor)
clip_grad_norm: true # Enable gradient clipping
grad_norm_value: 0.1 # Maximum norm for gradient clipping
betas:
- 0.9
- 0.999
lr: 0.0002
type: Adam
weight_decay: 0.0001
remap_mscoco_category: false
save_ema: false
save_optimizer: false
# general:
# num_epochs: 20 # Total number of training epochs

# data: # Section for data-related parameters
# input_size: [640, 640] # Input image size (width, height)
# train_batch_size: 2 # Batch size for training
# val_batch_size: 1 # Batch size for validation
# val_interval: 1 # Interval for running validation

# checkpoint:
# interval: 1 # Interval for saving checkpoints
# keep_checkpoints: true # Whether to keep saved checkpoints
# max_checkpoints: 3 # Maximum number of checkpoints to retain
# save_last: true # Save the last checkpoint after training
# save_best: true # Save the best-performing checkpoint based on validation
# save_optimizer_state: false # Save the optimizer state in checkpoints

# optimizer:
# frozen_stages_override: false # Override frozen stages if applicable
# type: "AdamW" # Type of optimizer to use
# learning_rate: 0.0001 # Initial learning rate
# weight_decay: 0.0001 # Weight decay (L2 regularization factor)
# clip_grad_norm: true # Enable gradient clipping
# grad_norm_value: 0.1 # Maximum norm for gradient clipping

lr_scheduler:
type: null # Type of learning rate scheduler
by_epoch: true # Schedule learning rate by epochs rather than steps
warmup:
enabled: true # Enable warmup phase for learning rate
steps: 1 # Number of warmup steps
ratio: 0.001 # Starting learning rate ratio for warmup
start_factor: 0.001 # Starting learning rate factor for warmup
end_factor: 1.0 # Ending learning rate factor for warmup
# lr_scheduler:
# type: null # Type of learning rate scheduler
# by_epoch: true # Schedule learning rate by epochs rather than steps
# warmup:
# enabled: true # Enable warmup phase for learning rate
# steps: 1 # Number of warmup steps
# ratio: 0.001 # Starting learning rate ratio for warmup
# start_factor: 0.001 # Starting learning rate factor for warmup
# end_factor: 1.0 # Ending learning rate factor for warmup
11 changes: 10 additions & 1 deletion supervisely_integration/train/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
default_config_path = os.path.join(config_paths_dir, "placeholder.yml")

app_options = {
"use_coco_annotation": True,
"save_best_checkpoint": True,
"save_last_checkpoint": True,
"supported_train_modes": ["finetune", "scratch"],
Expand Down Expand Up @@ -74,14 +75,22 @@

current_file_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.join(current_file_dir, "output")
train = TrainApp(output_dir, models_path, hyperparameters_path, app_options)
train = TrainApp(models_path, hyperparameters_path, app_options, output_dir)

train


@train.start
def start_training():
print("-----------------")
print("Start training")
print("-----------------")

# Step 1. convert to COCO format
# sly.xxx.convert_to_coco()
# Step 2. prepare config.yml (hyperparameters + custom config)
# Step 3. train

import rtdetr_pytorch.train as train_cli

custom_config_path = os.path.join(config_paths_dir, "custom.yml")
Expand Down

0 comments on commit 2590a67

Please sign in to comment.