@inproceedings{sharma2023on, author = {Sharma, Mohit and Deshpande, Amit and Anand, Avinash}, title = {On comparing fair classifiers under data bias}, organization = {Microsoft}, booktitle = {NeurIPS workshop Algorithmic Fairness through the Lens of Time}, year = {2023}, month = {November}, abstract = {In this paper, we consider a theoretical model for injecting data bias, namely, under-representation and label bias (Blum \& Stangl, 2019). We empirically study the effect of varying data biases on the accuracy and fairness of fair classifiers. Through extensive experiments on both synthetic and real-world datasets (e.g., Adult, German Credit, Bank Marketing, COMPAS), we empirically audit pre-, in-, and post-processing fair classifiers from standard fairness toolkits for their fairness and accuracy by injecting varying amounts of under-representation and label bias in their training data (but not the test data). Our main observations are: 1. The fairness and accuracy of many standard fair classifiers degrade severely as the bias injected in their training data increases; 2. A simple logistic regression model trained on the right data can often outperform, in both accuracy and fairness, most fair classifiers trained on biased training data, and 3. A few simple fairness techniques (e.g., reweighing, exponentiated gradients) seem to offer stable accuracy and fairness guarantees even when their training data is injected with under-representation and label bias. Our experiments also show how to integrate a measure of data bias risk in the existing fairness dashboards for real-world deployments.}, url = {http://approjects.co.za/?big=en-us/research/publication/on-comparing-fair-classifiers-under-data-bias/}, }