Skip to content

Commit

Permalink
Merge pull request #2 from ImIntheMiddle/hugoblox-import-publications
Browse files Browse the repository at this point in the history
Hugo Blox Builder - Import latest publications
  • Loading branch information
ImIntheMiddle authored Nov 24, 2023
2 parents 238759f + 4eb19e6 commit e330a2b
Show file tree
Hide file tree
Showing 4 changed files with 80 additions and 0 deletions.
14 changes: 14 additions & 0 deletions content/publication/taketsugu-active-2023/cite.bib
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
@misc{taketsugu_active_2023,
abstract = {Human Pose (HP) estimation is actively researched because of its wide range of applications. However, even estimators pre-trained on large datasets may not perform satisfactorily due to a domain gap between the training and test data. To address this issue, we present our approach combining Active Learning (AL) and Transfer Learning (TL) to adapt HP estimators to individual video domains efficiently. For efficient learning, our approach quantifies (i) the estimation uncertainty based on the temporal changes in the estimated heatmaps and (ii) the unnaturalness in the estimated full-body HPs. These quantified criteria are then effectively combined with the state-of-the-art representativeness criterion to select uncertain and diverse samples for efficient HP estimator learning. Furthermore, we reconsider the existing Active Transfer Learning (ATL) method to introduce novel ideas related to the retraining methods and Stopping Criteria (SC). Experimental results demonstrate that our method enhances learning efficiency and outperforms comparative methods. Our code is publicly available at: https://github.com/ImIntheMiddle/VATL4Pose-WACV2024},
annote = {Comment: 17 pages, 12 figures, Accepted by WACV 2024},
author = {Taketsugu, Hiromu and Ukita, Norimichi},
copyright = {All rights reserved},
doi = {10.48550/arXiv.2311.05041},
month = {November},
note = {arXiv:2311.05041 [cs]},
publisher = {arXiv},
title = {Active Transfer Learning for Efficient Video-Specific Human Pose Estimation},
url = {http://arxiv.org/abs/2311.05041},
urldate = {2023-11-24},
year = {2023}
}
29 changes: 29 additions & 0 deletions content/publication/taketsugu-active-2023/index.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
---
title: Active Transfer Learning for Efficient Video-Specific Human Pose Estimation
authors:
- Hiromu Taketsugu
- Norimichi Ukita
date: '2023-11-01'
publishDate: '2023-11-24T21:29:26.969043Z'
publication_types:
- manuscript
publication: '*arXiv*'
doi: 10.48550/arXiv.2311.05041
abstract: 'Human Pose (HP) estimation is actively researched because of its wide range
of applications. However, even estimators pre-trained on large datasets may not
perform satisfactorily due to a domain gap between the training and test data. To
address this issue, we present our approach combining Active Learning (AL) and Transfer
Learning (TL) to adapt HP estimators to individual video domains efficiently. For
efficient learning, our approach quantifies (i) the estimation uncertainty based
on the temporal changes in the estimated heatmaps and (ii) the unnaturalness in
the estimated full-body HPs. These quantified criteria are then effectively combined
with the state-of-the-art representativeness criterion to select uncertain and diverse
samples for efficient HP estimator learning. Furthermore, we reconsider the existing
Active Transfer Learning (ATL) method to introduce novel ideas related to the retraining
methods and Stopping Criteria (SC). Experimental results demonstrate that our method
enhances learning efficiency and outperforms comparative methods. Our code is publicly
available at: https://github.com/ImIntheMiddle/VATL4Pose-WACV2024'
links:
- name: URL
url: http://arxiv.org/abs/2311.05041
---
13 changes: 13 additions & 0 deletions content/publication/taketsugu-uncertainty-2023/cite.bib
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
@inproceedings{taketsugu_uncertainty_2023,
abstract = {This paper presents a combination of Active Learning (AL) and Transfer Learning (TL) for efficiently adapting Human Pose (HP) estimators to individual videos. The proposed approach quantifies estimation uncertainty through the temporal changes and unnaturalness of estimated HPs. These uncertainty criteria are combined with clustering-based representativeness criterion to avoid the useless selection of similar samples. Experiments demonstrated that the proposed method achieves high learning efficiency and outperforms comparative methods.},
author = {Taketsugu, Hiromu and Ukita, Norimichi},
booktitle = {2023 18th International Conference on Machine Vision and Applications (MVA)},
copyright = {All rights reserved},
doi = {10.23919/MVA57639.2023.10215565},
month = {July},
pages = {1--5},
title = {Uncertainty Criteria in Active Transfer Learning for Efficient Video-Specific Human Pose Estimation},
url = {https://ieeexplore.ieee.org/abstract/document/10215565},
urldate = {2023-11-24},
year = {2023}
}
24 changes: 24 additions & 0 deletions content/publication/taketsugu-uncertainty-2023/index.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
---
title: Uncertainty Criteria in Active Transfer Learning for Efficient Video-Specific
Human Pose Estimation
authors:
- Hiromu Taketsugu
- Norimichi Ukita
date: '2023-07-01'
publishDate: '2023-11-24T21:29:26.986148Z'
publication_types:
- paper-conference
publication: '*2023 18th International Conference on Machine Vision and Applications
(MVA)*'
doi: 10.23919/MVA57639.2023.10215565
abstract: This paper presents a combination of Active Learning (AL) and Transfer Learning
(TL) for efficiently adapting Human Pose (HP) estimators to individual videos. The
proposed approach quantifies estimation uncertainty through the temporal changes
and unnaturalness of estimated HPs. These uncertainty criteria are combined with
clustering-based representativeness criterion to avoid the useless selection of
similar samples. Experiments demonstrated that the proposed method achieves high
learning efficiency and outperforms comparative methods.
links:
- name: URL
url: https://ieeexplore.ieee.org/abstract/document/10215565
---

0 comments on commit e330a2b

Please sign in to comment.