@misc{nori2019interpretml, author = {Nori, Harsha and Jenkins, Samuel and Koch, Paul and Caruana, Rich}, title = {InterpretML: A Unified Framework for Machine Learning Interpretability}, howpublished = {ArXiv}, year = {2019}, month = {September}, abstract = {InterpretML is an open-source Python package which exposes machine learning interpretability algorithms to practitioners and researchers. InterpretML exposes two types of interpretability - glassbox models, which are machine learning models designed for interpretability (ex: linear models, rule lists, generalized additive models), and blackbox explainability techniques for explaining existing systems (ex: Partial Dependence, LIME). The package enables practitioners to easily compare interpretability algorithms by exposing multiple methods under a unified API, and by having a built-in, extensible visualization platform. InterpretML also includes the first implementation of the Explainable Boosting Machine, a powerful, interpretable, glassbox model that can be as accurate as many blackbox models. The MIT licensed source code can be downloaded from this http URL.}, url = {http://approjects.co.za/?big=en-us/research/publication/interpretml-a-unified-framework-for-machine-learning-interpretability/}, }