@inproceedings{brust2019integrating, type = {inproceedings}, key = {brust2019integrating}, title = {Integrating Domain Knowledge: Using Hierarchies to Improve Deep Classifiers}, author = {Clemens-Alexander Brust and Joachim Denzler}, booktitle = {Asian Conference on Pattern Recognition (ACPR)}, year = {2019}, abstract = {One of the most prominent problems in machine learning in the age of deep learning is the availability of sufficiently large annotated datasets. For specific domains, \eg animal species, a long-tail distribution means that some classes are observed and annotated insufficiently. Additional labels can be prohibitively expensive, e.g. because domain experts need to be involved. However, there is more information available that is to the best of our knowledge not exploited accordingly. In this paper, we propose to make use of preexisting class hierarchies like WordNet to integrate additional domain knowledge into classification. We encode the properties of such a class hierarchy into a probabilistic model. From there, we derive a novel label encoding and a corresponding loss function. On the ImageNet and NABirds datasets our method offers a relative improvement of 10.4% and 9.6% in accuracy over the baseline respectively. After less than a third of training time, it is already able to match the baseline's fine-grained recognition performance. Both results show that our suggested method is efficient and effective.}, file = {:pdfs/Brust2019IDK.pdf:PDF}, }