@inproceedings{mahajan2020themis, author = {Mahajan, Kshiteej and Balasubramanian, Arjun and Singhvi, Arjun and Venkataraman, Shivaram and Akella, Aditya and Phanishayee, Amar and Chawla, Shuchi}, title = {THEMIS: Fair and Efficient GPU Cluster Scheduling}, booktitle = {USENIX NSDI 2020}, year = {2020}, month = {February}, abstract = {Modern distributed machine learning (ML) training workloads benefit significantly from leveraging GPUs. However, significant contention ensues when multiple such workloads are run atop a shared cluster of GPUs. A key question is how to fairly apportion GPUs across workloads. We find that established cluster scheduling disciplines are a poor fit because of ML workloads’ unique attributes: ML jobs have long-running tasks that need to be gang-scheduled, and their performance is sensitive to tasks’ relative placement. We propose THEMIS, a new scheduling framework for ML training workloads. It’s GPU allocation policy enforces that ML workloads complete in a finish-time fair manner, a new notion we introduce. To capture placement sensitivity and ensure efficiency, THEMIS uses a two-level scheduling architecture where ML workloads bid on available resources that are offered in an auction run by a central arbiter. Our auction design allocates GPUs to winning bids by trading off efficiency for fairness in the short term, but ensuring finish-time fairness in the long term. Our evaluation on a production trace shows that THEMIS can improve fairness by more than 2.25X and is 5% to 250% more cluster efficient in comparison to state-of-the-art schedulers.}, url = {http://approjects.co.za/?big=en-us/research/publication/themis-fair-and-efficient-gpu-cluster-scheduling/}, }