{ "id": "1711.06104", "version": "v1", "published": "2017-11-16T14:19:29.000Z", "updated": "2017-11-16T14:19:29.000Z", "title": "A unified view of gradient-based attribution methods for Deep Neural Networks", "authors": [ "Marco Ancona", "Enea Ceolini", "Cengiz Ă–ztireli", "Markus Gross" ], "comment": "Accepted at NIPS 2017 - Workshop Interpreting, Explaining and Visualizing Deep Learning", "categories": [ "cs.LG", "stat.ML" ], "abstract": "Understanding the flow of information in Deep Neural Networks is a challenging problem that has gain increasing attention over the last few years. While several methods have been proposed to explain network predictions, only few attempts to analyze them from a theoretical perspective have been made in the past. In this work we analyze various state-of-the-art attribution methods and prove unexplored connections between them. We also show how some methods can be reformulated and more conveniently implemented. Finally, we perform an empirical evaluation with six attribution methods on a variety of tasks and architectures and discuss their strengths and limitations.", "revisions": [ { "version": "v1", "updated": "2017-11-16T14:19:29.000Z" } ], "analyses": { "keywords": [ "deep neural networks", "gradient-based attribution methods", "unified view", "state-of-the-art attribution methods", "explain network predictions" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }