{ "id": "1801.09624", "version": "v1", "published": "2018-01-29T16:56:49.000Z", "updated": "2018-01-29T16:56:49.000Z", "title": "Learning the Reward Function for a Misspecified Model", "authors": [ "Erik Talvitie" ], "comment": "Currently under review", "categories": [ "cs.LG" ], "abstract": "In model-based reinforcement learning it is typical to treat the problems of learning the dynamics model and learning the reward function separately. However, when the dynamics model is flawed, it may generate erroneous states that would never occur in the true environment. A reward function trained only to map environment states to rewards (as is typical) would have little guidance in such states. This paper presents a novel error bound that accounts for the reward model's behavior in states sampled from the model. This bound is used to extend the existing Hallucinated DAgger-MC algorithm, which offers theoretical performance guarantees in deterministic MDPs that do not assume a perfect model can be learned. Empirically, this approach to reward learning can yield dramatic improvements in control performance when the dynamics model is flawed.", "revisions": [ { "version": "v1", "updated": "2018-01-29T16:56:49.000Z" } ], "analyses": { "subjects": [ "I.2.6", "I.2.8" ], "keywords": [ "reward function", "misspecified model", "dynamics model", "yield dramatic improvements", "offers theoretical performance guarantees" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }