{ "id": "2008.13122", "version": "v1", "published": "2020-08-30T09:06:03.000Z", "updated": "2020-08-30T09:06:03.000Z", "title": "Adversarial Learning for Counterfactual Fairness", "authors": [ "Vincent Grari", "Sylvain Lamprier", "Marcin Detyniecki" ], "comment": "11 pages, 5 figures", "categories": [ "cs.LG", "cs.AI", "cs.CY", "stat.ML" ], "abstract": "In recent years, fairness has become an important topic in the machine learning research community. In particular, counterfactual fairness aims at building prediction models which ensure fairness at the most individual level. Rather than globally considering equity over the entire population, the idea is to imagine what any individual would look like with a variation of a given attribute of interest, such as a different gender or race for instance. Existing approaches rely on Variational Auto-encoding of individuals, using Maximum Mean Discrepancy (MMD) penalization to limit the statistical dependence of inferred representations with their corresponding sensitive attributes. This enables the simulation of counterfactual samples used for training the target fair model, the goal being to produce similar outcomes for every alternate version of any individual. In this work, we propose to rely on an adversarial neural learning approach, that enables more powerful inference than with MMD penalties, and is particularly better fitted for the continuous setting, where values of sensitive attributes cannot be exhaustively enumerated. Experiments show significant improvements in term of counterfactual fairness for both the discrete and the continuous settings.", "revisions": [ { "version": "v1", "updated": "2020-08-30T09:06:03.000Z" } ], "analyses": { "keywords": [ "adversarial learning", "individual", "machine learning research community", "target fair model", "sensitive attributes" ], "note": { "typesetting": "TeX", "pages": 11, "language": "en", "license": "arXiv", "status": "editable" } } }