{ "id": "2007.15645", "version": "v1", "published": "2020-07-30T17:58:05.000Z", "updated": "2020-07-30T17:58:05.000Z", "title": "Approximation of Smoothness Classes by Deep ReLU Networks", "authors": [ "Mazen Ali", "Anthony Nouy" ], "categories": [ "math.FA", "cs.LG", "cs.NA", "math.NA" ], "abstract": "We consider approximation rates of sparsely connected deep rectified linear unit (ReLU) and rectified power unit (RePU) neural networks for functions in Besov spaces $B^\\alpha_{q}(L^p)$ in arbitrary dimension $d$, on bounded or unbounded domains. We show that RePU networks with a fixed activation function attain optimal approximation rates for functions in the Besov space $B^\\alpha_{\\tau}(L^\\tau)$ on the critical embedding line $1/\\tau=\\alpha/d+1/p$ for arbitrary smoothness order $\\alpha>0$. Moreover, we show that ReLU networks attain near to optimal rates for any Besov space strictly above the critical line. Using interpolation theory, this implies that the entire range of smoothness classes at or above the critical line is (near to) optimally approximated by deep ReLU/RePU networks.", "revisions": [ { "version": "v1", "updated": "2020-07-30T17:58:05.000Z" } ], "analyses": { "subjects": [ "41A65", "41A15", "68T05", "42C40", "65D99" ], "keywords": [ "deep relu networks", "smoothness classes", "attain optimal approximation rates", "function attain optimal approximation", "activation function attain optimal" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }