@inproceedings{Mothes19:One, type = {inproceedings}, key = {Mothes19:One}, title = {One-Shot Learned Priors in Augmented Active Appearance Models for Anatomical Landmark Tracking}, author = {Oliver Mothes and Joachim Denzler}, booktitle = {Computer Vision, Imaging and Computer Graphics -- Theory and Applications}, year = {2019}, address = {Cham}, editor = {Ana Paula Cláudio Ana Paula and and Dominque Bechmann and Paul Richard and Takehiko Yamaguchi and Lars Linsen and Alexandru Telea and Francisco Imai and and Alain Tremeau}, pages = {85-104}, publisher = {Springer International Publishing}, abstract = {In motion science, biology and robotics animal movement analyses are used for the detailed understanding of the human bipedal locomotion. For this investigations an immense amount of recorded image data has to be evaluated by biological experts. During this time-consuming evaluation single anatomical landmarks, for example bone ends, have to be located and annotated in each image. In this paper we show a reduction of this effort by automating the annotation with a minimum level of user interaction. Recent approaches, based on Active Appearance Models, are improved by priors based on anatomical knowledge and an online tracking method, requiring only a single labeled frame. In contrast, we propose a one-shot learned tracking-by-detection prior which overcomes the shortcomings of template drifts without increasing the number of training data. We evaluate our approach based on a variety of real-world X-ray locomotion datasets and show that our method outperforms recent state-of-the-art concepts for the task at hand.}, groups = {biomedical,locomotion}, isbn = {978-3-030-12209-6}, doi = {10.1007/978-3-030-12209-6_5}, url = {https://link.springer.com/chapter/10.1007/978-3-030-12209-6_5}, }