{ "id": "2410.00242", "version": "v1", "published": "2024-09-30T21:22:41.000Z", "updated": "2024-09-30T21:22:41.000Z", "title": "Quantized and Asynchronous Federated Learning", "authors": [ "Tomas Ortega", "Hamid Jafarkhani" ], "categories": [ "cs.LG", "eess.SP", "math.OC" ], "abstract": "Recent advances in federated learning have shown that asynchronous variants can be faster and more scalable than their synchronous counterparts. However, their design does not include quantization, which is necessary in practice to deal with the communication bottleneck. To bridge this gap, we develop a novel algorithm, Quantized Asynchronous Federated Learning (QAFeL), which introduces a hidden-state quantization scheme to avoid the error propagation caused by direct quantization. QAFeL also includes a buffer to aggregate client updates, ensuring scalability and compatibility with techniques such as secure aggregation. Furthermore, we prove that QAFeL achieves an $\\mathcal{O}(1/\\sqrt{T})$ ergodic convergence rate for stochastic gradient descent on non-convex objectives, which is the optimal order of complexity, without requiring bounded gradients or uniform client arrivals. We also prove that the cross-term error between staleness and quantization only affects the higher-order error terms. We validate our theoretical findings on standard benchmarks.", "revisions": [ { "version": "v1", "updated": "2024-09-30T21:22:41.000Z" } ], "analyses": { "subjects": [ "68W10", "68W15", "68W40", "90C06", "90C35", "90C26", "G.1.6", "F.2.1", "E.4" ], "keywords": [ "asynchronous federated learning", "higher-order error terms", "hidden-state quantization scheme", "ergodic convergence rate", "stochastic gradient descent" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }