{ "id": "2106.12576", "version": "v1", "published": "2021-06-22T20:37:12.000Z", "updated": "2021-06-22T20:37:12.000Z", "title": "DP-SGD vs PATE: Which Has Less Disparate Impact on Model Accuracy?", "authors": [ "Archit Uniyal", "Rakshit Naidu", "Sasikanth Kotti", "Sahib Singh", "Patrik Joslin Kenfack", "Fatemehsadat Mireshghallah", "Andrew Trask" ], "comment": "4 pages, 3 images", "categories": [ "cs.LG", "cs.AI", "cs.CR" ], "abstract": "Recent advances in differentially private deep learning have demonstrated that application of differential privacy, specifically the DP-SGD algorithm, has a disparate impact on different sub-groups in the population, which leads to a significantly high drop-in model utility for sub-populations that are under-represented (minorities), compared to well-represented ones. In this work, we aim to compare PATE, another mechanism for training deep learning models using differential privacy, with DP-SGD in terms of fairness. We show that PATE does have a disparate impact too, however, it is much less severe than DP-SGD. We draw insights from this observation on what might be promising directions in achieving better fairness-privacy trade-offs.", "revisions": [ { "version": "v1", "updated": "2021-06-22T20:37:12.000Z" } ], "analyses": { "keywords": [ "disparate impact", "model accuracy", "differential privacy", "significantly high drop-in model utility", "achieving better fairness-privacy trade-offs" ], "note": { "typesetting": "TeX", "pages": 4, "language": "en", "license": "arXiv", "status": "editable" } } }