{ "id": "1706.08495", "version": "v1", "published": "2017-06-26T17:36:28.000Z", "updated": "2017-06-26T17:36:28.000Z", "title": "Uncertainty Decomposition in Bayesian Neural Networks with Latent Variables", "authors": [ "Stefan Depeweg", "José Miguel Hernández-Lobato", "Finale Doshi-Velez", "Steffen Udluft" ], "categories": [ "stat.ML" ], "abstract": "Bayesian neural networks (BNNs) with latent variables are probabilistic models which can automatically identify complex stochastic patterns in the data. We describe and study in these models a decomposition of predictive uncertainty into its epistemic and aleatoric components. First, we show how such a decomposition arises naturally in a Bayesian active learning scenario by following an information theoretic approach. Second, we use a similar decomposition to develop a novel risk sensitive objective for safe reinforcement learning (RL). This objective minimizes the effect of model bias in environments whose stochastic dynamics are described by BNNs with latent variables. Our experiments illustrate the usefulness of the resulting decomposition in active learning and safe RL settings.", "revisions": [ { "version": "v1", "updated": "2017-06-26T17:36:28.000Z" } ], "analyses": { "keywords": [ "bayesian neural networks", "latent variables", "uncertainty decomposition", "safe rl settings", "information theoretic approach" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }