{ "id": "1801.06176", "version": "v1", "published": "2018-01-18T18:57:33.000Z", "updated": "2018-01-18T18:57:33.000Z", "title": "Integrating planning for task-completion dialogue policy learning", "authors": [ "Baolin Peng", "Xiujun Li", "Jianfeng Gao", "Jingjing Liu", "Kam-Fai Wong" ], "comment": "11 pages, 6 figures", "categories": [ "cs.CL", "cs.AI", "cs.LG", "cs.NE" ], "abstract": "Training a task-completion dialogue agent with real users via reinforcement learning (RL) could be prohibitively expensive, because it requires many interactions with users. One alternative is to resort to a user simulator, while the discrepancy of between simulated and real users makes the learned policy unreliable in practice. This paper addresses these challenges by integrating planning into the dialogue policy learning based on Dyna-Q framework, and provides a more sample-efficient approach to learn the dialogue polices. The proposed agent consists of a planner trained on-line with limited real user experience that can generate large amounts of simulated experience to supplement with limited real user experience, and a policy model trained on these hybrid experiences. The effectiveness of our approach is validated on a movie-booking task in both a simulation setting and a human-in-the-loop setting.", "revisions": [ { "version": "v1", "updated": "2018-01-18T18:57:33.000Z" } ], "analyses": { "keywords": [ "task-completion dialogue policy learning", "limited real user experience", "integrating planning", "generate large amounts", "task-completion dialogue agent" ], "note": { "typesetting": "TeX", "pages": 11, "language": "en", "license": "arXiv", "status": "editable" } } }