{ "id": "2106.03498", "version": "v1", "published": "2021-06-07T10:35:52.000Z", "updated": "2021-06-07T10:35:52.000Z", "title": "Identifiability in inverse reinforcement learning", "authors": [ "Haoyang Cao", "Samuel N. Cohen", "Lukasz Szpruch" ], "categories": [ "cs.LG", "math.OC" ], "abstract": "Inverse reinforcement learning attempts to reconstruct the reward function in a Markov decision problem, using observations of agent actions. As already observed by Russell the problem is ill-posed, and the reward function is not identifiable, even under the presence of perfect information about optimal behavior. We provide a resolution to this non-identifiability for problems with entropy regularization. For a given environment, we fully characterize the reward functions leading to a given policy and demonstrate that, given demonstrations of actions for the same reward under two distinct discount factors, or under sufficiently different environments, the unobserved reward can be recovered up to a constant. Through a simple numerical experiment, we demonstrate the accurate reconstruction of the reward function through our proposed resolution.", "revisions": [ { "version": "v1", "updated": "2021-06-07T10:35:52.000Z" } ], "analyses": { "subjects": [ "49N45", "93B30", "93E12", "93B15", "49N10", "90C40", "60J10", "62M05" ], "keywords": [ "reward function", "identifiability", "inverse reinforcement learning attempts", "distinct discount factors", "markov decision problem" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }