{ "id": "1811.12273", "version": "v1", "published": "2018-11-29T16:06:57.000Z", "updated": "2018-11-29T16:06:57.000Z", "title": "On the Transferability of Representations in Neural Networks Between Datasets and Tasks", "authors": [ "Haytham M. Fayek", "Lawrence Cavedon", "Hong Ren Wu" ], "comment": "Accepted Paper in the Continual Learning Workshop, NeurIPS 2018", "journal": "Continual Learning Workshop, 32nd Neural Information Processing Systems (NeurIPS 2018), Montreal, Canada", "categories": [ "cs.LG", "stat.ML" ], "abstract": "Deep networks, composed of multiple layers of hierarchical distributed representations, tend to learn low-level features in initial layers and transition to high-level features towards final layers. Paradigms such as transfer learning, multi-task learning, and continual learning leverage this notion of generic hierarchical distributed representations to share knowledge across datasets and tasks. Herein, we study the layer-wise transferability of representations in deep networks across a few datasets and tasks and note some interesting empirical observations.", "revisions": [ { "version": "v1", "updated": "2018-11-29T16:06:57.000Z" } ], "analyses": { "keywords": [ "neural networks", "transferability", "deep networks", "learn low-level features", "share knowledge" ], "tags": [ "journal article" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }