{ "id": "1506.03379", "version": "v1", "published": "2015-06-10T16:23:29.000Z", "updated": "2015-06-10T16:23:29.000Z", "title": "The Online Discovery Problem and Its Application to Lifelong Reinforcement Learning", "authors": [ "Emma Brunskill", "Lihong Li" ], "comment": "17 pages", "categories": [ "cs.LG", "cs.AI" ], "abstract": "Transferring knowledge across a sequence of related tasks is an important challenge in reinforcement learning. Despite much encouraging empirical evidence that shows benefits of transfer, there has been very little theoretical analysis. In this paper, we study a class of lifelong reinforcement-learning problems: the agent solves a sequence of tasks modeled as finite Markov decision processes (MDPs), each of which is from a finite set of MDPs with the same state/action spaces and different transition/reward functions. Inspired by the need for cross-task exploration in lifelong learning, we formulate a novel online discovery problem and give an optimal learning algorithm to solve it. Such results allow us to develop a new lifelong reinforcement-learning algorithm, whose overall sample complexity in a sequence of tasks is much smaller than that of single-task learning, with high probability, even if the sequence of tasks is generated by an adversary. Benefits of the algorithm are demonstrated in a simulated problem.", "revisions": [ { "version": "v1", "updated": "2015-06-10T16:23:29.000Z" } ], "analyses": { "keywords": [ "lifelong reinforcement learning", "application", "novel online discovery problem", "finite markov decision processes", "overall sample complexity" ], "note": { "typesetting": "TeX", "pages": 17, "language": "en", "license": "arXiv", "status": "editable", "adsabs": "2015arXiv150603379B" } } }