{ "id": "2107.12657", "version": "v1", "published": "2021-07-27T08:09:32.000Z", "updated": "2021-07-27T08:09:32.000Z", "title": "Continual Learning with Neuron Activation Importance", "authors": [ "Sohee Kim", "Seungkyu Lee" ], "categories": [ "cs.LG", "cs.CV" ], "abstract": "Continual learning is a concept of online learning with multiple sequential tasks. One of the critical barriers of continual learning is that a network should learn a new task keeping the knowledge of old tasks without access to any data of the old tasks. In this paper, we propose a neuron activation importance-based regularization method for stable continual learning regardless of the order of tasks. We conduct comprehensive experiments on existing benchmark data sets to evaluate not just the stability and plasticity of our method with improved classification accuracy also the robustness of the performance along the changes of task order.", "revisions": [ { "version": "v1", "updated": "2021-07-27T08:09:32.000Z" } ], "analyses": { "keywords": [ "continual learning", "old tasks", "neuron activation importance-based regularization method", "existing benchmark data sets", "multiple sequential tasks" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }