{ "id": "1902.07286", "version": "v1", "published": "2019-02-19T21:07:10.000Z", "updated": "2019-02-19T21:07:10.000Z", "title": "Online Learning with Continuous Variations: Dynamic Regret and Reductions", "authors": [ "Ching-An Cheng", "Jonathan Lee", "Ken Goldberg", "Byron Boots" ], "categories": [ "cs.LG", "math.OC", "stat.ML" ], "abstract": "We study the dynamic regret of a new class of online learning problems, in which the gradient of the loss function changes continuously across rounds with respect to the learner's decisions. This setup is motivated by the use of online learning as a tool to analyze the performance of iterative algorithms. Our goal is to identify interpretable dynamic regret rates that explicitly consider the loss variations as consequences of the learner's decisions as opposed to external constraints. We show that achieving sublinear dynamic regret in general is equivalent to solving certain variational inequalities, equilibrium problems, and fixed-point problems. Leveraging this identification, we present necessary and sufficient conditions for the existence of efficient algorithms that achieve sublinear dynamic regret. Furthermore, we show a reduction from dynamic regret to both static regret and convergence rate to equilibriums in the aforementioned problems, which allows us to analyze the dynamic regret of many existing learning algorithms in few steps.", "revisions": [ { "version": "v1", "updated": "2019-02-19T21:07:10.000Z" } ], "analyses": { "keywords": [ "online learning", "continuous variations", "learners decisions", "achieve sublinear dynamic regret", "loss function changes" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }