From 46731e2c7c590d4c6c3e18376cc3e361d14705b3 Mon Sep 17 00:00:00 2001 From: lukaboljevic Date: Tue, 25 Apr 2023 23:32:31 +0200 Subject: [PATCH 1/2] Correct typos, add missing requirements, update README --- README.md | 20 ++++++++++++-------- calculate_measures.py | 2 +- compare_trackers.py | 4 ++-- create_workspace.py | 2 +- evaluate_tracker.py | 2 -- requirements.txt | 3 ++- utils/dataset.py | 2 +- utils/export_utils.py | 4 ++-- utils/sequence.py | 4 ++-- utils/trackers.yaml | 8 ++++---- utils/utils.py | 4 ++-- 11 files changed, 29 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index e049b27..0323689 100644 --- a/README.md +++ b/README.md @@ -9,27 +9,31 @@ In the following you can find the instructions for creating a workspace, integra - Create a new directory (not within `toolkit-dir` directory) - we will call it `workspace-dir`
- Go to the `toolkit-dir` directory and run the command: ```console -python create_workspace.py --workspace_path workspace-dir --dataset dataset-version +python create_workspace.py --workspace_path ../workspace-dir --dataset dataset-version ``` -Note that `dataset-version` represents the version of the VOT dataset and can be choosen among the following options: `vot2013`, `vot2014`, `vot2015`, `vot2016`. The script will automatically download the dataset (which can take some time), create several folders and the file `trackers.yaml` in the `toolkit-dir`. +Note that `dataset-version` represents the version of the VOT dataset and can be choosen among the following options: `vot2013`, `vot2014`, `vot2015`, `vot2016`. The script will automatically download the dataset (which can take some time), create several folders and the file `trackers.yaml` in the `workspace-dir`. ## 2.) Tracker integration and running -- After the workspace has been successfully created, edit the file `trackers.yaml` in the `toolkit-dir`. See the commented example for the NCC tracker in the `trackers.yaml`. +- After the workspace has been successfully created, edit the file `trackers.yaml` in the `workspace-dir`. See the commented example for the NCC tracker in the `trackers.yaml`. - You can run your tracker on the dataset by running the following command: ```console -python evaluate_tracker.py --workspace_path workspace-dir --tracker tracker_id +python evaluate_tracker.py --workspace_path ../workspace-dir --tracker tracker_id ``` -Note that the `tracker-id` is a tracker identifier (see example in `trackers.yaml`, denoted as tracker identifier). The command will create a new directory with the name of your tracker in the `results` folder, which contains regions predicted by the tracker on all video sequences from the dataset. +Note that the `tracker-id` is a tracker identifier (see example in `trackers.yaml`, denoted as tracker identifier). The command will create a new directory with the name of your tracker in the `results` folder, which contains regions predicted by the tracker on all video sequences from the dataset. Your tracker class thus needs to implement a function `name(self)` (see example trackers in `examples` directory) so the directory can be successfully created. ## 3.) Results visualization and tracking performance evaluation - After the `evaluate_tracker` command has successfully finished, you can visualize tracking results on a specific sequence (`sequence-name`) by running the following command: ```console -python visualize_result.py --workspace_path workspace-dir --tracker tracker-id --sequence sequence-name +python visualize_result.py --workspace_path ../workspace-dir --tracker tracker-id --sequence sequence-name +``` +The command will open a window and show a video with a predicted bounding box on a selected video sequence. Use the `--show-gt` flag to also show ground truth annotations. +- To calculate accuracy, total number of failures and tracking speed for a single tracker, you can run the following command: +```console +python calculate_measures.py --workspace_path ../workspace-dir --tracker tracker-id ``` -The command will open a window and show a video with a predicted bounding box on a selected video sequence. - To compare results of multiple trackers (which have previously been run on the dataset) you can run the following command: ```console -python compare_trackers.py --workspace_path workspace-dir --trackers tracker-id1 tracker-id2 tracker-id3 ... --sensitivity 100 +python compare_trackers.py --workspace_path ../workspace-dir --trackers tracker-id1 tracker-id2 tracker-id3 ... --sensitivity 100 ``` Note that `...` denotes arbitrary number of trackers which can be compared. This command calculates two tracking performance measures: accuracy and robustness and stores the per-sequence results in the directory: `workspace-dir/analysis/tracker-id/results.json`. Additionally, you can find the AR plot in the `workspace-dir/analysis/ar.png` comparing all trackers you specified when running the comparison command. diff --git a/calculate_measures.py b/calculate_measures.py index 97d7ce1..5fe4d25 100644 --- a/calculate_measures.py +++ b/calculate_measures.py @@ -44,7 +44,7 @@ def tracking_analysis(workspace_path, tracker_id): def main(): - parser = argparse.ArgumentParser(description='Tracking Visualization Utility') + parser = argparse.ArgumentParser(description='Tracker Metrics Utility') parser.add_argument('--workspace_path', help='Path to the VOT workspace', required=True, action='store') parser.add_argument('--tracker', help='Tracker identifier', required=True, action='store') diff --git a/compare_trackers.py b/compare_trackers.py index 755b7c1..1e98010 100644 --- a/compare_trackers.py +++ b/compare_trackers.py @@ -32,11 +32,11 @@ def tracking_comparison(workspace_path, tracker_ids, sensitivity, output_path): def main(): - parser = argparse.ArgumentParser(description='Tracking Visualization Utility') + parser = argparse.ArgumentParser(description='Tracker Comparison Utility') parser.add_argument('--workspace_path', help='Path to the VOT workspace', required=True, action='store') parser.add_argument('--trackers', help='Tracker identifiers', required=True, action='store', nargs='*') - parser.add_argument('--sensitivity', help='Sensitivtiy parameter for robustness', default=100, type=int) + parser.add_argument('--sensitivity', help='Sensitivity parameter for robustness', default=100, type=int) parser.add_argument('--output_path', help='Path for the output image', default='', type=str) args = parser.parse_args() diff --git a/create_workspace.py b/create_workspace.py index f94c478..5f06cd1 100644 --- a/create_workspace.py +++ b/create_workspace.py @@ -9,7 +9,7 @@ def create_workspace(workspace_path, dataset_name): if not os.path.exists(workspace_path): - print('Directory given as a workspace does not exist. Please create it meanually.') + print('Directory given as a workspace does not exist. Please create it manually.') exit(-1) # download dataset diff --git a/evaluate_tracker.py b/evaluate_tracker.py index d34bf88..9466879 100644 --- a/evaluate_tracker.py +++ b/evaluate_tracker.py @@ -1,7 +1,5 @@ import argparse import os -import sys -import yaml from utils.utils import load_tracker, load_dataset diff --git a/requirements.txt b/requirements.txt index d7e8c5a..9cc2158 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ opencv-python matplotlib - +PyYAML +numpy diff --git a/utils/dataset.py b/utils/dataset.py index a40598f..c8d9952 100644 --- a/utils/dataset.py +++ b/utils/dataset.py @@ -9,7 +9,7 @@ def __init__(self, workspace_path): self.sequences_dir = os.path.join(workspace_path, 'sequences') if not os.path.isdir(self.sequences_dir): - print('Workspace directors (%s) does not have sequences directory.' % self.sequences_dir) + print('Workspace directory (%s) does not have sequences directory.' % self.sequences_dir) exit(-1) self.sequences = [] diff --git a/utils/export_utils.py b/utils/export_utils.py index d0f5522..6303550 100644 --- a/utils/export_utils.py +++ b/utils/export_utils.py @@ -13,7 +13,7 @@ def print_summary(output_dict): print('------------------------------------') print('Results for tracker:', output_dict['tracker_name']) print(' Average overlap: %.2f' % output_dict['average_overlap']) - print(' Total failures: %.1f' % output_dict['total_failures']) + print(' Total failures: %d' % output_dict['total_failures']) print(' Average speed: %.2f FPS' % output_dict['average_speed']) print('------------------------------------') @@ -26,7 +26,7 @@ def export_plot(outputs, sensitivity, output_path): styles = load_plot_styles() if len(outputs) > len(styles): - print('Number of compared trackers is larger than number of plot stlyes.') + print('Number of compared trackers is larger than number of plot styles.') print('Modify the script utils/plot_styles.py by adding more plot styles and re-run.') exit(-1) diff --git a/utils/sequence.py b/utils/sequence.py index 6859c4e..ac9c0ee 100644 --- a/utils/sequence.py +++ b/utils/sequence.py @@ -3,7 +3,7 @@ import cv2 import numpy as np -from utils.utils import polygon2rectangle +from utils.utils import polygon2rectangle, rectangle2polygon from utils.io_utils import read_regions @@ -63,7 +63,7 @@ def gt_region(self, frame_index, format='RECTANGLE'): print('Unknown output region format: %s. Supported only RECTANGLE and POLYGON.' % format) exit(-1) - return self.groundtruth[frame_index] + # return self.groundtruth[frame_index] def visualize_results(self, regions, show_groundtruth=False): print('********************************************************') diff --git a/utils/trackers.yaml b/utils/trackers.yaml index 5971234..b4beba8 100644 --- a/utils/trackers.yaml +++ b/utils/trackers.yaml @@ -6,9 +6,9 @@ tracker_test: # use tracker identifier as you want to call your tracker from te - insert\path\1 - insert\path\2 ... -# another trackers can be added here, similarly as in the upper example +# Other trackers can be added here, similarly as in the upper example # here is an example for the NCC tracker: -#ncc_tracker: -# tracker_path: path\to\python_ncc.py -# class_name: NCCTracker +# ncc_tracker: +# tracker_path: path\to\python_ncc.py +# class_name: NCCTracker ... \ No newline at end of file diff --git a/utils/utils.py b/utils/utils.py index 525ecb8..febda44 100644 --- a/utils/utils.py +++ b/utils/utils.py @@ -21,7 +21,7 @@ def calculate_overlap(a: list, b: list): b = polygon2rectangle(b) if len(a) != 4 or len(b) != 4: - print('Both regions must have 4 elements (bounding box) to calcualte overlap.') + print('Both regions must have 4 elements (bounding box) to calculate overlap.') exit(-1) if a[2] < 1 or a[3] < 1 or b[2] < 1 or b[3] < 1: @@ -36,7 +36,7 @@ def trajectory_overlaps(t1: list, t2: list): # calcualte per-frame overlap for a trajectory (multiple frames) if len(t1) != len(t2): print('Error: Trajectories must be the same length.') - print(-1) + exit(-1) overlaps = len(t1) * [0] valid = len(t1) * [0] From 145f1337a0b406ea14038c040aec5e3fa8581fb3 Mon Sep 17 00:00:00 2001 From: lukaboljevic Date: Tue, 25 Apr 2023 23:43:49 +0200 Subject: [PATCH 2/2] Fix (my) typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0323689..aeadfd1 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Note that the `tracker-id` is a tracker identifier (see example in `trackers.yam ```console python visualize_result.py --workspace_path ../workspace-dir --tracker tracker-id --sequence sequence-name ``` -The command will open a window and show a video with a predicted bounding box on a selected video sequence. Use the `--show-gt` flag to also show ground truth annotations. +The command will open a window and show a video with a predicted bounding box on a selected video sequence. Use the `--show_gt` flag to also show ground truth annotations. - To calculate accuracy, total number of failures and tracking speed for a single tracker, you can run the following command: ```console python calculate_measures.py --workspace_path ../workspace-dir --tracker tracker-id