{ "id": "1805.10694", "version": "v1", "published": "2018-05-27T21:33:37.000Z", "updated": "2018-05-27T21:33:37.000Z", "title": "Towards a Theoretical Understanding of Batch Normalization", "authors": [ "Jonas Kohler", "Hadi Daneshmand", "Aurelien Lucchi", "Ming Zhou", "Klaus Neymeyr", "Thomas Hofmann" ], "categories": [ "stat.ML", "cs.LG" ], "abstract": "Normalization techniques such as Batch Normalization have been applied very successfully for training deep neural networks. Yet, despite its apparent empirical benefits, the reasons behind the success of Batch Normalization are mostly hypothetical. We thus aim to provide a more thorough theoretical understanding from an optimization perspective. Our main contribution towards this goal is the identification of various problem instances in the realm of machine learning where, under certain assumptions, Batch Normalization can provably accelerate optimization with gradient-based methods. We thereby turn Batch Normalization from an effective practical heuristic into a provably converging algorithm for these settings. Furthermore, we substantiate our analysis with empirical evidence that suggests the validity of our theoretical results in a broader context.", "revisions": [ { "version": "v1", "updated": "2018-05-27T21:33:37.000Z" } ], "analyses": { "keywords": [ "theoretical understanding", "training deep neural networks", "turn batch normalization", "normalization techniques", "apparent empirical benefits" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }