Skip to content

Commit

Permalink
Update inference settings file format and add model export options fo…
Browse files Browse the repository at this point in the history
…r ONNX and TensorRT
  • Loading branch information
cxnt committed Dec 4, 2024
1 parent d2242c9 commit 64d1861
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 4 deletions.
2 changes: 1 addition & 1 deletion supervisely_integration/serve/rtdetrv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class RTDETRv2(sly.nn.inference.ObjectDetection):
FRAMEWORK_NAME = "RT-DETRv2"
MODELS = "supervisely_integration/models_v2.json"
APP_OPTIONS = f"{SERVE_PATH}/app_options.yaml"
INFERENCE_SETTINGS = f"{SERVE_PATH}/inference_settings.json"
INFERENCE_SETTINGS = f"{SERVE_PATH}/inference_settings.yaml"
# TODO: may be do it auto?

def load_model(
Expand Down
4 changes: 4 additions & 0 deletions supervisely_integration/train/app_options.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@ device_selector: false
# Add options to run model benchmark after training
model_benchmark: true

# Export model
export_onnx_supported: true
export_tensorrt_supported: true

# Enable this option when using supervisely train logger
# train_logger: "tensorboard"

Expand Down
26 changes: 23 additions & 3 deletions supervisely_integration/train/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,16 @@

# For debug
# app_state = {
# "input": {"project_id": 42201},
# "input": {"project_id": 43192},
# "train_val_split": {"method": "random", "split": "train", "percent": 80},
# "classes": ["dog", "horse", "cat", "squirrel", "sheep"],
# "classes": ["apple"],
# "model": {"source": "Pretrained models", "model_name": "RT-DETRv2-S"},
# "hyperparameters": "epoches: 2\nbatch_size: 16\neval_spatial_size: [640, 640] # height, width\n\ncheckpoint_freq: 5\nsave_optimizer: false\nsave_ema: false\n\noptimizer:\n type: AdamW\n lr: 0.0001\n betas: [0.9, 0.999]\n weight_decay: 0.0001\n\nclip_max_norm: 0.1\n\nlr_scheduler:\n type: MultiStepLR # CosineAnnealingLR | OneCycleLR\n milestones: [35, 45] # epochs\n gamma: 0.1\n\nlr_warmup_scheduler:\n type: LinearWarmup\n warmup_duration: 1000 # steps\n\nuse_ema: True \nema:\n type: ModelEMA\n decay: 0.9999\n warmups: 2000\n\nuse_amp: True\n",
# "options": {"model_benchmark": {"enable": True, "speed_test": True}, "cache_project": True},
# "options": {
# "model_benchmark": {"enable": True, "speed_test": True},
# "cache_project": True,
# "export": {"onnx": True, "tensorrt": True},
# },
# }
# train.gui.load_from_app_state(app_state)

Expand Down Expand Up @@ -69,6 +73,22 @@ def start_training():
return experiment_info


@train.export_onnx
def export_onnx():
print("---------------------------------------------")
print("Exporting ONNX for RT-DETRv2")
print("---------------------------------------------")
return "path/to/onnx"


@train.export_tensorrt
def export_tensorrt():
print("---------------------------------------------")
print("Exporting TensorRT for RT-DETRv2")
print("---------------------------------------------")
return "path/to/tensorrt"


def convert_data():
project = train.sly_project
meta = project.meta
Expand Down

0 comments on commit 64d1861

Please sign in to comment.