{ "id": "2211.01053", "version": "v1", "published": "2022-11-02T11:37:06.000Z", "updated": "2022-11-02T11:37:06.000Z", "title": "Fantasizing with Dual GPs in Bayesian Optimization and Active Learning", "authors": [ "Paul E. Chang", "Prakhar Verma", "ST John", "Victor Picheny", "Henry Moss", "Arno Solin" ], "comment": "In the 2022 NeurIPS Workshop on Gaussian Processes, Spatiotemporal Modeling, and Decision-making Systems", "categories": [ "cs.LG", "stat.ML" ], "abstract": "Gaussian processes (GPs) are the main surrogate functions used for sequential modelling such as Bayesian Optimization and Active Learning. Their drawbacks are poor scaling with data and the need to run an optimization loop when using a non-Gaussian likelihood. In this paper, we focus on `fantasizing' batch acquisition functions that need the ability to condition on new fantasized data computationally efficiently. By using a sparse Dual GP parameterization, we gain linear scaling with batch size as well as one-step updates for non-Gaussian likelihoods, thus extending sparse models to greedy batch fantasizing acquisition functions.", "revisions": [ { "version": "v1", "updated": "2022-11-02T11:37:06.000Z" } ], "analyses": { "keywords": [ "bayesian optimization", "active learning", "greedy batch fantasizing acquisition functions", "sparse dual gp parameterization", "non-gaussian likelihood" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }