{ "id": "2205.09357", "version": "v1", "published": "2022-05-19T07:27:12.000Z", "updated": "2022-05-19T07:27:12.000Z", "title": "Continual Pre-Training Mitigates Forgetting in Language and Vision", "authors": [ "Andrea Cossu", "Tinne Tuytelaars", "Antonio Carta", "Lucia Passaro", "Vincenzo Lomonaco", "Davide Bacciu" ], "comment": "under review", "categories": [ "cs.LG", "cs.AI" ], "abstract": "Pre-trained models are nowadays a fundamental component of machine learning research. In continual learning, they are commonly used to initialize the model before training on the stream of non-stationary data. However, pre-training is rarely applied during continual learning. We formalize and investigate the characteristics of the continual pre-training scenario in both language and vision environments, where a model is continually pre-trained on a stream of incoming data and only later fine-tuned to different downstream tasks. We show that continually pre-trained models are robust against catastrophic forgetting and we provide strong empirical evidence supporting the fact that self-supervised pre-training is more effective in retaining previous knowledge than supervised protocols. Code is provided at https://github.com/AndreaCossu/continual-pretraining-nlp-vision .", "revisions": [ { "version": "v1", "updated": "2022-05-19T07:27:12.000Z" } ], "analyses": { "keywords": [ "continual pre-training mitigates forgetting", "downstream tasks", "fundamental component", "vision environments", "continual pre-training scenario" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }