diff --git a/.github/workflows/build_image.yml b/.github/workflows/build_image.yml index 0a56cc5..295c44d 100644 --- a/.github/workflows/build_image.yml +++ b/.github/workflows/build_image.yml @@ -4,7 +4,7 @@ on: workflow_dispatch: inputs: tag_version: - description: "Docker Image Tag" + description: 'Docker Image Tag (without "v")' required: true default: "" @@ -23,16 +23,16 @@ jobs: docker-images: false swap-storage: true - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to DockerHub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v5 with: push: true file: docker/Dockerfile diff --git a/dev_requirements.txt b/dev_requirements.txt index 5355c79..f8faf94 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,12 +1,7 @@ -supervisely==6.73.96 +supervisely==6.73.100 openmim ffmpeg-python==0.2.0 pyyaml==6.0 -# torch==1.13.0 -# torchvision==0.14.0 - -# mmcv-full==1.7.0 -# mmsegmentation==0.29.1 -# mmcls==0.24.1 +# yapf==0.40.1 diff --git a/docker/Dockerfile b/docker/Dockerfile index 8077c0d..4058e30 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -8,10 +8,10 @@ RUN pip3 install mmcv-full==1.4.4 -f https://download.openmmlab.com/mmcv/dist/cu RUN pip3 install mmsegmentation==0.23.0 RUN pip3 install mmcls==0.21.0 RUN pip3 install pyyaml==6.0 +RUN pip3 install yapf==0.40.1 # RUN pip3 install torch==1.13.0 # RUN pip3 install torchvision==0.14.0 -RUN pip3 install ffmpeg-python==0.2.0 COPY dev_requirements.txt dev_requirements.txt RUN pip3 install -r dev_requirements.txt diff --git a/serve/config.json b/serve/config.json index 613ca8b..5c899e7 100644 --- a/serve/config.json +++ b/serve/config.json @@ -11,7 +11,7 @@ "serve" ], "description": "Deploy model as REST API service", - "docker_image": "supervisely/mmseg:1.3.7", + "docker_image": "supervisely/mmseg:1.3.9", "instance_version": "6.8.88", "entrypoint": "python -m uvicorn main:m.app --app-dir ./serve/src --host 0.0.0.0 --port 8000 --ws websockets", "port": 8000, diff --git a/serve/dev_requirements.txt b/serve/dev_requirements.txt new file mode 100644 index 0000000..e6d338a --- /dev/null +++ b/serve/dev_requirements.txt @@ -0,0 +1,14 @@ +# git+https://github.com/supervisely/supervisely.git@some-test-branch + +supervisely==6.73.100 + +openmim +ffmpeg-python==0.2.0 +pyyaml==6.0 + +torch==1.13.0 +torchvision==0.14.0 + +mmcv-full==1.7.0 +mmsegmentation==0.29.1 +mmcls==0.24.1 diff --git a/serve/src/main.py b/serve/src/main.py index 8ef2c92..2d7a4b1 100644 --- a/serve/src/main.py +++ b/serve/src/main.py @@ -25,9 +25,13 @@ import supervisely as sly from serve.src import utils -from supervisely.app.widgets import (CustomModelsSelector, - PretrainedModelsSelector, RadioTabs, - Widget) +from supervisely.nn.artifacts.mmsegmentation import MMSegmentation +from supervisely.app.widgets import ( + CustomModelsSelector, + PretrainedModelsSelector, + RadioTabs, + Widget, +) from supervisely.io.fs import silent_remove root_source_path = str(Path(__file__).parents[2]) @@ -43,12 +47,14 @@ models_meta_path = os.path.join(root_source_path, "models", "model_meta.json") # for local debug -selected_checkpoint = None +selected_checkpoint = None selected_model_name = None + def str_to_class(classname): return getattr(sys.modules[__name__], classname) + configs_dir = os.path.join(root_source_path, "configs") mmseg_ver = pkg_resources.get_distribution("mmsegmentation").version if os.path.isdir(f"/tmp/mmseg/mmsegmentation-{mmseg_ver}"): @@ -66,7 +72,8 @@ def initialize_custom_gui(self) -> Widget: models = self.get_models() filtered_models = utils.filter_models_structure(models) self.pretrained_models_table = PretrainedModelsSelector(filtered_models) - custom_models = sly.nn.checkpoints.mmsegmentation.get_list(api, team_id) + sly_mmseg = MMSegmentation(team_id) + custom_models = sly_mmseg.get_list() self.custom_models_table = CustomModelsSelector( team_id, custom_models, @@ -114,7 +121,9 @@ def load_model_meta( self, model_source: str, cfg: Config, checkpoint_name: str = None, arch_type: str = None ): def set_common_meta(classes, palette): - obj_classes = [sly.ObjClass(name, sly.Bitmap, color) for name, color in zip(classes, palette)] + obj_classes = [ + sly.ObjClass(name, sly.Bitmap, color) for name, color in zip(classes, palette) + ] self.checkpoint_name = checkpoint_name self.dataset_name = cfg.dataset_type self.class_names = classes @@ -122,9 +131,9 @@ def set_common_meta(classes, palette): self._get_confidence_tag_meta() if model_source == "Custom models": - self.selected_model_name = cfg.pretrained_model + self.selected_model_name = cfg.pretrained_model classes = cfg.checkpoint_config.meta.CLASSES - palette = cfg.checkpoint_config.meta.PALETTE + palette = cfg.checkpoint_config.meta.PALETTE set_common_meta(classes, palette) elif model_source == "Pretrained models": @@ -137,7 +146,6 @@ def set_common_meta(classes, palette): self.model.CLASSES = classes self.model.PALETTE = palette - def load_model( self, device: Literal["cpu", "cuda", "cuda:0", "cuda:1", "cuda:2", "cuda:3"], @@ -199,23 +207,23 @@ def load_model( cfg = Config.fromfile(local_config_path) cfg.model.pretrained = None cfg.model.train_cfg = None - - self.model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg')) - checkpoint = load_checkpoint(self.model, local_weights_path, map_location='cpu') - + + self.model = build_segmentor(cfg.model, test_cfg=cfg.get("test_cfg")) + checkpoint = load_checkpoint(self.model, local_weights_path, map_location="cpu") + self.load_model_meta(model_source, cfg, checkpoint_name, arch_type) - + self.model.cfg = cfg # save the config in the model for convenience self.model.to(device) self.model.eval() self.model = revert_sync_batchnorm(self.model) - + except KeyError as e: raise KeyError(f"Error loading config file: {local_config_path}. Error: {e}") - + def load_on_device( self, - model_dir: str, + model_dir: str, device: Literal["cpu", "cuda", "cuda:0", "cuda:1", "cuda:2", "cuda:3"] = "cpu", ) -> None: self.device = device @@ -223,21 +231,26 @@ def load_on_device( model_source = self.gui.get_model_source() if model_source == "Pretrained models": selected_model = self.gui.get_checkpoint_info() - weights_path, config_path = self.download_pretrained_files(selected_model, model_dir) + weights_path, config_path = self.download_pretrained_files( + selected_model, model_dir + ) elif model_source == "Custom models": custom_weights_link = self.gui.get_custom_link() - weights_path, config_path = self.download_custom_files(custom_weights_link, model_dir) + weights_path, config_path = self.download_custom_files( + custom_weights_link, model_dir + ) else: # for local debug only model_source = "Pretrained models" - weights_path, config_path = self.download_pretrained_files(selected_checkpoint, model_dir) - - + weights_path, config_path = self.download_pretrained_files( + selected_checkpoint, model_dir + ) + cfg = Config.fromfile(config_path) cfg.model.pretrained = None cfg.model.train_cfg = None - model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg')) - checkpoint = load_checkpoint(model, weights_path, map_location='cpu') + model = build_segmentor(cfg.model, test_cfg=cfg.get("test_cfg")) + checkpoint = load_checkpoint(model, weights_path, map_location="cpu") if model_source == "Custom models": classes = cfg.checkpoint_config.meta.CLASSES palette = cfg.checkpoint_config.meta.PALETTE @@ -267,7 +280,9 @@ def load_on_device( self.model = model self.class_names = classes - obj_classes = [sly.ObjClass(name, sly.Bitmap, color) for name, color in zip(classes, palette)] + obj_classes = [ + sly.ObjClass(name, sly.Bitmap, color) for name, color in zip(classes, palette) + ] self._model_meta = sly.ProjectMeta(obj_classes=sly.ObjClassCollection(obj_classes)) print(f"✅ Model has been successfully loaded on {device.upper()} device") @@ -285,14 +300,16 @@ def get_models(self): for model_meta in model_yamls: mmseg_ver = pkg_resources.get_distribution("mmsegmentation").version model_yml_url = f"https://github.com/open-mmlab/mmsegmentation/tree/v{mmseg_ver}/configs/{model_meta['yml_file']}" - model_yml_local = os.path.join(configs_dir, model_meta['yml_file']) + model_yml_local = os.path.join(configs_dir, model_meta["yml_file"]) with open(model_yml_local, "r") as stream: model_info = yaml.safe_load(stream) model_config[model_meta["model_name"]] = {} model_config[model_meta["model_name"]]["checkpoints"] = [] model_config[model_meta["model_name"]]["paper_from"] = model_meta["paper_from"] model_config[model_meta["model_name"]]["year"] = model_meta["year"] - model_config[model_meta["model_name"]]["config_url"] = os.path.dirname(model_yml_url) + model_config[model_meta["model_name"]]["config_url"] = os.path.dirname( + model_yml_url + ) for model in model_info["Models"]: checkpoint_info = OrderedDict() checkpoint_info["Model"] = model["Name"] @@ -300,18 +317,22 @@ def get_models(self): checkpoint_info["Method"] = model["In Collection"] checkpoint_info["Dataset"] = model["Results"][0]["Dataset"] try: - checkpoint_info["Inference Time (ms/im)"] = model["Metadata"]["inference time (ms/im)"][0]["value"] + checkpoint_info["Inference Time (ms/im)"] = model["Metadata"][ + "inference time (ms/im)" + ][0]["value"] except KeyError: checkpoint_info["Inference Time (ms/im)"] = "-" checkpoint_info["Input Size (H, W)"] = model["Metadata"]["crop size"] checkpoint_info["LR scheduler (steps)"] = model["Metadata"]["lr schd"] try: - checkpoint_info["Memory (Training, GB)"] = model["Metadata"]["Training Memory (GB)"] + checkpoint_info["Memory (Training, GB)"] = model["Metadata"][ + "Training Memory (GB)" + ] except KeyError: checkpoint_info["Memory (Training, GB)"] = "-" for metric_name, metric_val in model["Results"][0]["Metrics"].items(): checkpoint_info[metric_name] = metric_val - #checkpoint_info["config_file"] = os.path.join(f"https://github.com/open-mmlab/mmsegmentation/tree/v{mmseg_ver}", model["Config"]) + # checkpoint_info["config_file"] = os.path.join(f"https://github.com/open-mmlab/mmsegmentation/tree/v{mmseg_ver}", model["Config"]) checkpoint_info["meta"] = { "task_type": None, "arch_type": None, @@ -335,10 +356,13 @@ def predict( if sly.is_production(): - sly.logger.info("Script arguments", extra={ - "context.teamId": sly.env.team_id(), - "context.workspaceId": sly.env.workspace_id(), - }) + sly.logger.info( + "Script arguments", + extra={ + "context.teamId": sly.env.team_id(), + "context.workspaceId": sly.env.workspace_id(), + }, + ) m = MMSegmentationModel(use_gui=True) @@ -360,4 +384,3 @@ def predict( vis_path = "./demo_data/image_01_prediction.jpg" m.visualize(results, image_path, vis_path, thickness=0) print(f"predictions and visualization have been saved: {vis_path}") - diff --git a/train/config.json b/train/config.json index b999c07..3bbd7bb 100644 --- a/train/config.json +++ b/train/config.json @@ -1,35 +1,33 @@ { - "name": "Train MMSegmentation", - "type": "app", - "categories": [ - "neural network", - "images", - "videos", - "semantic segmentation", - "segmentation & tracking", - "train" - ], - "description": "Dashboard to configure, start and monitor training", - "docker_image": "supervisely/mmseg:1.3.4", - "min_instance_version": "6.8.48", - "main_script": "train/src/main.py", - "gui_template": "train/src/gui.html", - "task_location": "workspace_tasks", - "need_gpu": true, - "gpu": "required", - "isolate": true, - "icon": "https://i.imgur.com/GaEFmkH.png", - "icon_cover": true, - "context_menu": { - "target": [ - "images_project" - ], - "context_root": "Neural Networks", - "context_category": "MM Segmentation" - }, - "poster": "https://user-images.githubusercontent.com/48245050/182847473-9a35f213-c27b-4abd-bd64-c73bf80fb056.jpg", - "community_agent": false, - "license": { - "type": "Apache-2.0" - } + "name": "Train MMSegmentation", + "type": "app", + "categories": [ + "neural network", + "images", + "videos", + "semantic segmentation", + "segmentation & tracking", + "train" + ], + "description": "Dashboard to configure, start and monitor training", + "docker_image": "supervisely/mmseg:1.3.9", + "min_instance_version": "6.8.48", + "main_script": "train/src/main.py", + "gui_template": "train/src/gui.html", + "task_location": "workspace_tasks", + "need_gpu": true, + "gpu": "required", + "isolate": true, + "icon": "https://i.imgur.com/GaEFmkH.png", + "icon_cover": true, + "context_menu": { + "target": ["images_project"], + "context_root": "Neural Networks", + "context_category": "MM Segmentation" + }, + "poster": "https://user-images.githubusercontent.com/48245050/182847473-9a35f213-c27b-4abd-bd64-c73bf80fb056.jpg", + "community_agent": false, + "license": { + "type": "Apache-2.0" + } } diff --git a/train/dev_requirements.txt b/train/dev_requirements.txt new file mode 100644 index 0000000..e6d338a --- /dev/null +++ b/train/dev_requirements.txt @@ -0,0 +1,14 @@ +# git+https://github.com/supervisely/supervisely.git@some-test-branch + +supervisely==6.73.100 + +openmim +ffmpeg-python==0.2.0 +pyyaml==6.0 + +torch==1.13.0 +torchvision==0.14.0 + +mmcv-full==1.7.0 +mmsegmentation==0.29.1 +mmcls==0.24.1 diff --git a/train/requirements.txt b/train/requirements.txt deleted file mode 100644 index 8e30c09..0000000 --- a/train/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -supervisely==6.73.63 diff --git a/train/src/sly_globals.py b/train/src/sly_globals.py index 50f5926..32036cd 100644 --- a/train/src/sly_globals.py +++ b/train/src/sly_globals.py @@ -3,6 +3,8 @@ import sys import supervisely as sly from supervisely.app.v1.app_service import AppService +from supervisely.nn.artifacts.mmsegmentation import MMSegmentation + import shutil import pkg_resources @@ -29,9 +31,9 @@ api = my_app.public_api task_id = my_app.task_id -team_id = int(os.environ['context.teamId']) -workspace_id = int(os.environ['context.workspaceId']) -project_id = int(os.environ['modal.state.slyProjectId']) +team_id = int(os.environ["context.teamId"]) +workspace_id = int(os.environ["context.workspaceId"]) +project_id = int(os.environ["modal.state.slyProjectId"]) project_info = api.project.get_info_by_id(project_id) @@ -49,6 +51,8 @@ checkpoints_dir = os.path.join(artifacts_dir, "checkpoints") sly.fs.mkdir(checkpoints_dir) +sly_mmseg = MMSegmentation(team_id) + configs_dir = os.path.join(root_source_dir, "configs") mmseg_ver = pkg_resources.get_distribution("mmsegmentation").version if os.path.isdir(f"/tmp/mmseg/mmsegmentation-{mmseg_ver}"): diff --git a/train/src/ui/monitoring.py b/train/src/ui/monitoring.py index 5553622..eac504e 100644 --- a/train/src/ui/monitoring.py +++ b/train/src/ui/monitoring.py @@ -39,19 +39,13 @@ def init(data, state): data["outputUrl"] = None -def init_chart(title, names, xs, ys, smoothing=None, yrange=None, decimals=None, xdecimals=None, metric=None): +def init_chart( + title, names, xs, ys, smoothing=None, yrange=None, decimals=None, xdecimals=None, metric=None +): series = [] for name, x, y in zip(names, xs, ys): - series.append({ - "name": name, - "data": [[px, py] for px, py in zip(x, y)] - }) - result = { - "options": { - "title": title - }, - "series": series - } + series.append({"name": name, "data": [[px, py] for px, py in zip(x, y)]}) + result = {"options": {"title": title}, "series": series} if len(names) > 0: result["series"] = series if metric is not None: @@ -69,21 +63,48 @@ def init_chart(title, names, xs, ys, smoothing=None, yrange=None, decimals=None, def init_charts(data, state): state["smoothing"] = 0.6 - state["chartLR"] = init_chart("LR", names=["lr"], xs = [[]], ys = [[]], smoothing=None, - # yrange=[state["lr"] - state["lr"] / 2.0, state["lr"] + state["lr"] / 2.0], - decimals=6, xdecimals=2) - state["chartTrainLoss"] = init_chart("Train Loss", names=["loss"], xs=[[]], ys=[[]], smoothing=state["smoothing"], decimals=6, xdecimals=2) + state["chartLR"] = init_chart( + "LR", + names=["lr"], + xs=[[]], + ys=[[]], + smoothing=None, + # yrange=[state["lr"] - state["lr"] / 2.0, state["lr"] + state["lr"] / 2.0], + decimals=6, + xdecimals=2, + ) + state["chartTrainLoss"] = init_chart( + "Train Loss", + names=["loss"], + xs=[[]], + ys=[[]], + smoothing=state["smoothing"], + decimals=6, + xdecimals=2, + ) state["mean_charts"] = {} for metric in data["availableMetrics"]: - state["mean_charts"][f"chartVal_{metric}"] = init_chart(f"Val {metric}", metric=metric, names=[metric], xs=[[]], ys=[[]], smoothing=state["smoothing"]) + state["mean_charts"][f"chartVal_{metric}"] = init_chart( + f"Val {metric}", + metric=metric, + names=[metric], + xs=[[]], + ys=[[]], + smoothing=state["smoothing"], + ) state["class_charts"] = {} for metric in data["availableMetrics"]: - state["class_charts"][f"chartVal_{metric[1:]}"] = init_chart(f"Val {metric[1:]}", names=[], metric=metric, xs=[], ys=[], smoothing=state["smoothing"]) + state["class_charts"][f"chartVal_{metric[1:]}"] = init_chart( + f"Val {metric[1:]}", names=[], metric=metric, xs=[], ys=[], smoothing=state["smoothing"] + ) state["chartTime"] = init_chart("Time", names=["time"], xs=[[]], ys=[[]], xdecimals=2) - state["chartDataTime"] = init_chart("Data Time", names=["data_time"], xs=[[]], ys=[[]], xdecimals=2) + state["chartDataTime"] = init_chart( + "Data Time", names=["data_time"], xs=[[]], ys=[[]], xdecimals=2 + ) state["chartMemory"] = init_chart("Memory", names=["memory"], xs=[[]], ys=[[]], xdecimals=2) + @g.my_app.callback("change_smoothing") @sly.timeit @g.my_app.ignore_errors_and_show_dialog_window() @@ -92,10 +113,18 @@ def change_smoothing(api: sly.Api, task_id, context, state, app_logger): {"field": "state.chartTrainLoss.options.smoothingWeight", "payload": state["smoothing"]} ] for metric in state["evalMetrics"]: - fields.extend([ - {"field": f"state.mean_charts.chartVal_{metric}.options.smoothingWeight", "payload": state["smoothing"]}, - {"field": f"state.class_charts.chartVal_{metric[1:]}.options.smoothingWeight", "payload": state["smoothing"]} - ]) + fields.extend( + [ + { + "field": f"state.mean_charts.chartVal_{metric}.options.smoothingWeight", + "payload": state["smoothing"], + }, + { + "field": f"state.class_charts.chartVal_{metric[1:]}.options.smoothingWeight", + "payload": state["smoothing"], + }, + ] + ) g.api.app.set_fields(g.task_id, fields) @@ -117,35 +146,53 @@ def upload_monitor(monitor, api: sly.Api, task_id, progress: sly.Progress): progress.set_current_value(monitor.bytes_read, report=False) _update_progress_ui("UploadDir", g.api, g.task_id, progress) - progress = sly.Progress("Upload directory with training artifacts to Team Files", 0, is_size=True) + progress = sly.Progress( + "Upload directory with training artifacts to Team Files", 0, is_size=True + ) progress_cb = partial(upload_monitor, api=g.api, task_id=g.task_id, progress=progress) - remote_dir = f"/mmsegmentation/{g.task_id}_{g.project_info.name}" - res_dir = g.api.file.upload_directory(g.team_id, g.artifacts_dir, remote_dir, progress_size_cb=progress_cb) + model_dir = g.sly_mmseg.framework_folder + remote_artifacts_dir = f"{model_dir}/{g.task_id}_{g.project_info.name}" + remote_weights_dir = os.path.join(remote_artifacts_dir, g.sly_mmseg.weights_folder) + remote_config_path = os.path.join(remote_weights_dir, g.sly_mmseg.config_file) + + res_dir = g.api.file.upload_directory( + g.team_id, g.artifacts_dir, remote_artifacts_dir, progress_size_cb=progress_cb + ) + + # generate metadata file + g.sly_mmseg.generate_metadata( + app_name=g.sly_mmseg.app_name, + task_id=g.task_id, + artifacts_folder=remote_artifacts_dir, + weights_folder=remote_weights_dir, + weights_ext=g.sly_mmseg.weights_ext, + project_name=g.project_info.name, + task_type=g.sly_mmseg.task_type, + config_path=remote_config_path, + ) + return res_dir + def init_class_charts_series(state): classes = state["selectedClasses"] + ["__bg__"] series = [] for class_name in classes: - series.append({ - "name": class_name, - "data": [] - }) - fields = [ - {"field": "state.preparingData", "payload": True} - ] + series.append({"name": class_name, "data": []}) + fields = [{"field": "state.preparingData", "payload": True}] for metric_name in state["evalMetrics"]: - fields.extend([ - {"field": f"state.class_charts.chartVal_{metric_name[1:]}.series", "payload": series} - ]) + fields.extend( + [{"field": f"state.class_charts.chartVal_{metric_name[1:]}.series", "payload": series}] + ) g.api.app.set_fields(g.task_id, fields) def prepare_segmentation_data(state, img_dir, ann_dir, palette): temp_project_seg_dir = g.project_seg_dir + "_temp" - sly.Project.to_segmentation_task(g.project_dir, temp_project_seg_dir, target_classes=state["selectedClasses"]) - + sly.Project.to_segmentation_task( + g.project_dir, temp_project_seg_dir, target_classes=state["selectedClasses"] + ) datasets = os.listdir(temp_project_seg_dir) os.makedirs(os.path.join(g.project_seg_dir, img_dir), exist_ok=True) @@ -158,7 +205,9 @@ def prepare_segmentation_data(state, img_dir, ann_dir, palette): # convert masks to required format and save to general ann_dir mask_files = os.listdir(os.path.join(temp_project_seg_dir, dataset, ann_dir)) for mask_file in mask_files: - mask = cv2.imread(os.path.join(temp_project_seg_dir, dataset, ann_dir, mask_file))[:, :, ::-1] + mask = cv2.imread(os.path.join(temp_project_seg_dir, dataset, ann_dir, mask_file))[ + :, :, ::-1 + ] result = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int32) # human masks to machine masks for color_idx, color in enumerate(palette): @@ -168,9 +217,11 @@ def prepare_segmentation_data(state, img_dir, ann_dir, palette): imgfiles_to_move = os.listdir(os.path.join(temp_project_seg_dir, dataset, img_dir)) for filename in imgfiles_to_move: - shutil.move(os.path.join(temp_project_seg_dir, dataset, img_dir, filename), - os.path.join(g.project_seg_dir, img_dir)) - + shutil.move( + os.path.join(temp_project_seg_dir, dataset, img_dir, filename), + os.path.join(g.project_seg_dir, img_dir), + ) + shutil.rmtree(temp_project_seg_dir) g.api.app.set_field(g.task_id, "state.preparingData", False) @@ -187,26 +238,33 @@ def train(api: sly.Api, task_id, context, state, app_logger): ann_dir = "seg" obj_classes = g.project_meta.obj_classes if g.project_meta.get_obj_class("__bg__") is None: - obj_classes = obj_classes.add(sly.ObjClass(name="__bg__", geometry_type=sly.Bitmap, color=(0,0,0))) + obj_classes = obj_classes.add( + sly.ObjClass(name="__bg__", geometry_type=sly.Bitmap, color=(0, 0, 0)) + ) classes_json = obj_classes.to_json() - classes_json = [obj for obj in classes_json if obj["title"] in state["selectedClasses"] or obj["title"] == "__bg__"] + classes_json = [ + obj + for obj in classes_json + if obj["title"] in state["selectedClasses"] or obj["title"] == "__bg__" + ] classes = [obj["title"] for obj in classes_json] - palette = [obj["color"].lstrip('#') for obj in classes_json] - palette = [[int(color[i:i + 2], 16) for i in (0, 2, 4)] for color in palette] + palette = [obj["color"].lstrip("#") for obj in classes_json] + palette = [[int(color[i : i + 2], 16) for i in (0, 2, 4)] for color in palette] if not os.path.exists(g.project_seg_dir): prepare_segmentation_data(state, img_dir, ann_dir, palette) cfg = init_cfg(state, img_dir, ann_dir, classes, palette) # print(f'Config:\n{cfg.pretty_text}') # TODO: debug - os.makedirs(os.path.join(g.checkpoints_dir, cfg.work_dir.split('/')[-1]), exist_ok=True) - cfg.dump(os.path.join(g.checkpoints_dir, cfg.work_dir.split('/')[-1], "config.py")) + os.makedirs(os.path.join(g.checkpoints_dir, cfg.work_dir.split("/")[-1]), exist_ok=True) + cfg.dump(os.path.join(g.checkpoints_dir, cfg.work_dir.split("/")[-1], "config.py")) # Build the dataset datasets = [build_dataset(cfg.data.train)] # Build the detector model = build_segmentor( - cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg')) + cfg.model, train_cfg=cfg.get("train_cfg"), test_cfg=cfg.get("test_cfg") + ) # Add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES model = revert_sync_batchnorm(model)