@inproceedings{namaki2020vamsa, author = {Namaki, Mohammad Hossein and Floratou, Avrilia and Psallidas, Fotis and Krishnan, Subru and Agrawal, Ashvin and Zhu, Yiwen and Weimer, Markus and Wu, Yinghui}, title = {Vamsa: Automated Provenance Tracking in Data Science Scripts}, booktitle = {KDD}, year = {2020}, month = {August}, abstract = {There has recently been a lot of ongoing research in the areas of fairness, bias and explainability of machine learning (ML) models due to the self-evident or regulatory requirements of various ML applications. We make the following observation: All of these approaches require a robust understanding of the relationship between ML models and the data used to train them. In this work, we introduce the ML provenance tracking problem: the fundamental idea is to automatically track which columns in a dataset have been used to derive the features/labels of an ML model. We discuss the challenges in capturing such information in the context of Python, the most common language used by data scientists. We then present Vamsa, a modular system that extracts provenance from Python scripts without requiring any changes to the users' code. Using 26K real data science scripts, we verify the effectiveness of Vamsa in terms of coverage, and performance. We also evaluate Vamsa's accuracy on a smaller subset of manually labeled data. Our analysis shows that Vamsa's precision and recall range from 90.4% to 99.1% and its latency is in the order of milliseconds for average size scripts. Drawing from our experience in deploying ML models in production, we also present an example in which Vamsa helps automatically identify models that are affected by data corruption issues.}, url = {http://approjects.co.za/?big=en-us/research/publication/vamsa-automated-provenance-tracking-in-data-science-scripts/}, }