@inproceedings{penzel2022investigating, type = {inproceedings}, key = {penzel2022investigating}, title = {Investigating Neural Network Training on a Feature Level using Conditional Independence}, author = {Niklas Penzel and Christian Reimers and Paul Bodesheim and Joachim Denzler}, booktitle = {ECCV Workshop on Causality in Vision (ECCV-WS)}, year = {2022}, pages = {383-399}, abstract = {There are still open questions about how the learned representations of deep models change during the training process. Understanding this process could aid in validating the training. Towards this goal, previous works analyze the training in the mutual information plane. We use a different approach and base our analysis on a method built on Reichenbach’s common cause principle. Using this method, we test whether the model utilizes information contained in human-defined features. Given such a set of features, we investigate how the relative feature usage changes throughout the training process. We analyze mul- tiple networks training on different tasks, including melanoma classifica- tion as a real-world application. We find that over the training, models concentrate on features containing information relevant to the task. This concentration is a form of representation compression. Crucially, we also find that the selected features can differ between training from-scratch and finetuning a pre-trained network.}, doi = {10.1007/978-3-031-25075-0_27}, publisher = {Springer Nature Switzerland}, address = {Cham}, isbn = {978-3-031-25075-0}, groups = {understanding-dl}, }