{ "id": "2407.00176", "version": "v1", "published": "2024-06-28T18:29:51.000Z", "updated": "2024-06-28T18:29:51.000Z", "title": "The impact of model size on catastrophic forgetting in Online Continual Learning", "authors": [ "Eunhae Lee" ], "categories": [ "cs.LG", "cs.CV" ], "abstract": "This study investigates the impact of model size on Online Continual Learning performance, with a focus on catastrophic forgetting. Employing ResNet architectures of varying sizes, the research examines how network depth and width affect model performance in class-incremental learning using the SplitCIFAR-10 dataset. Key findings reveal that larger models do not guarantee better Continual Learning performance; in fact, they often struggle more in adapting to new tasks, particularly in online settings. These results challenge the notion that larger models inherently mitigate catastrophic forgetting, highlighting the nuanced relationship between model size and Continual Learning efficacy. This study contributes to a deeper understanding of model scalability and its practical implications in Continual Learning scenarios.", "revisions": [ { "version": "v1", "updated": "2024-06-28T18:29:51.000Z" } ], "analyses": { "keywords": [ "online continual learning", "model size", "better continual learning performance", "inherently mitigate catastrophic forgetting", "larger models inherently mitigate catastrophic" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }