@techreport{bird2020fairlearn, author = {Bird, Sarah and Dudík, Miro and Edgar, Richard and Horn, Brandon and Lutz, Roman and Milan, Vanessa and Sameki, Mehrnoosh and Wallach, Hanna and Walker, Kathleen}, title = {Fairlearn: A toolkit for assessing and improving fairness in AI}, institution = {Microsoft}, year = {2020}, month = {May}, abstract = {We introduce Fairlearn, an open source toolkit that empowers data scientists and developers to assess and improve the fairness of their AI systems. Fairlearn has two components: an interactive visualization dashboard and unfairness mitigation algorithms. These components are designed to help with navigating trade-offs between fairness and model performance. We emphasize that prioritizing fairness in AI systems is a sociotechnical challenge. Because there are many complex sources of unfairness—some societal and some technical—it is not possible to fully “debias” a system or to guarantee fairness; the goal is to mitigate fairness-related harms as much as possible. As Fairlearn grows to include additional fairness metrics, unfairness mitigation algorithms, and visualization capabilities, we hope that it will be shaped by a diverse community of stakeholders, ranging from data scientists, developers, and business decision makers to the people whose lives may be affected by the predictions of AI systems.}, url = {http://approjects.co.za/?big=en-us/research/publication/fairlearn-a-toolkit-for-assessing-and-improving-fairness-in-ai/}, number = {MSR-TR-2020-32}, }