{ "id": "1706.02690", "version": "v1", "published": "2017-06-08T17:43:56.000Z", "updated": "2017-06-08T17:43:56.000Z", "title": "Principled Detection of Out-of-Distribution Examples in Neural Networks", "authors": [ "Shiyu Liang", "Yixuan Li", "R. Srikant" ], "categories": [ "cs.LG", "stat.ML" ], "abstract": "We consider the problem of detecting out-of-distribution examples in neural networks. We propose ODIN, a simple and effective out-of-distribution detector for neural networks, that does not require any change to a pre-trained model. Our method is based on the observation that using temperature scaling and adding small perturbations to the input can separate the softmax score distributions of in- and out-of-distribution samples, allowing for more effective detection. We show in a series of experiments that our approach is compatible with diverse network architectures and datasets. It consistently outperforms the baseline approach[1] by a large margin, establishing a new state-of-the-art performance on this task. For example, ODIN reduces the false positive rate from the baseline 34.7% to 4.3% on the DenseNet (applied to CIFAR-10) when the true positive rate is 95%. We theoretically analyze the method and prove that performance improvement is guaranteed under mild conditions on the image distributions.", "revisions": [ { "version": "v1", "updated": "2017-06-08T17:43:56.000Z" } ], "analyses": { "keywords": [ "neural networks", "out-of-distribution examples", "principled detection", "diverse network architectures", "softmax score distributions" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }