{ "id": "1812.01804", "version": "v1", "published": "2018-12-05T03:31:07.000Z", "updated": "2018-12-05T03:31:07.000Z", "title": "Random Spiking and Systematic Evaluation of Defenses Against Adversarial Examples", "authors": [ "Huangyi Ge", "Sze Yiu Chau", "Ninghui Li" ], "categories": [ "cs.LG", "cs.AI", "cs.CR", "stat.ML" ], "abstract": "Image classifiers often suffer from adversarial examples, which are generated by adding a small amount of noises to input images to trick classifiers into misclassification. Over the years, many defense mechanisms have been proposed, and different researchers have made seemingly contradictory claims on their effectiveness. We argue that such discrepancies are primarily due to inconsistent assumptions on the attacker's knowledge. To this end, we present an analysis of possible adversarial models, and propose an evaluation framework for comparing different defense mechanisms. As part of the framework, we introduced a more powerful and realistic adversary strategy. We propose a new defense mechanism called Random Spiking (RS), which generalizes dropout and introduces random noises in the training process in a controlled manner. With a carefully chosen placement, RS incurs negligible negative impact on prediction accuracy. Evaluations under our proposed framework suggest RS delivers better protection against adversarial examples than many existing schemes.", "revisions": [ { "version": "v1", "updated": "2018-12-05T03:31:07.000Z" } ], "analyses": { "keywords": [ "adversarial examples", "random spiking", "systematic evaluation", "defense mechanism", "rs delivers better protection" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }