{ "id": "2011.14145", "version": "v1", "published": "2020-11-28T15:19:36.000Z", "updated": "2020-11-28T15:19:36.000Z", "title": "Uncertainty Quantification in Deep Learning through Stochastic Maximum Principle", "authors": [ "Richard Archibald", "Feng Bao", "Yanzhao Cao", "He Zhang" ], "categories": [ "cs.LG", "math.OC", "stat.ML" ], "abstract": "We develop a probabilistic machine learning method, which formulates a class of stochastic neural networks by a stochastic optimal control problem. An efficient stochastic gradient descent algorithm is introduced under the stochastic maximum principle framework. Convergence analysis for stochastic gradient descent optimization and numerical experiments for applications of stochastic neural networks are carried out to validate our methodology in both theory and performance.", "revisions": [ { "version": "v1", "updated": "2020-11-28T15:19:36.000Z" } ], "analyses": { "keywords": [ "uncertainty quantification", "stochastic neural networks", "deep learning", "efficient stochastic gradient descent algorithm", "stochastic optimal control problem" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }