@inproceedings{Reimers:CIW19, type = {inproceedings}, key = {Reimers:CIW19}, title = {Using Causal Inference to Globally Understand Black Box Predictors Beyond Saliency Maps}, author = {Christian Reimers and Jakob Runge and Joachim Denzler}, booktitle = {International Workshop on Climate Informatics (CI)}, year = {2019}, number = {NCAR/TN-561+PROC}, abstract = {State-of-the-art machine learning methods, especially deep neural networks, have reached impressive results in many prediction and classification tasks. Rising complexity and automatic feature selection make the resulting learned models hard to interpret and turns them into black boxes. Advances into feature visualization have mitigated this problem but some shortcomings still exist. For example, methods only work locally, meaning they only explain the behavior for single inputs, and they only identify important parts of the input. In this work, we propose a method that is also able to decide whether a feature calculated from the input to an estimator is globally useful. Since the question about explanatory power is a causal one, we frame this approach with causal inference methods.}, doi = {10.5065/y82j-f154}, groups = {understanding-dl}, }