@inproceedings{gao2020estimating, author = {Gao, Yanjie and Liu, Yu and Zhang, Hongyu and Li, Zhengxian and Zhu, Yonghao and Lin, Haoxiang and Yang, Mao}, title = {Estimating GPU Memory Consumption of Deep Learning Models}, booktitle = {ESEC/FSE 2020}, year = {2020}, month = {November}, abstract = {Deep learning (DL) has been increasingly adopted by a variety of software-intensive systems. Developers mainly use GPUs to accelerate the training, testing, and deployment of DL models. However, the GPU memory consumed by a DL model is often unknown to them before a DL job is executed. Therefore, an improper choice of neural network architecture or hyperparameters can cause the DL job to run out of the limited GPU memory and fail. Our empirical study has found that many DL job failures are due to the exhaustion of GPU memory. This leads to a horrendous waste of computing resources and a significant reduction in development productivity. In this paper, we propose DNNMem, an accurate estimation tool for GPU memory consumption of DL models. DNNMem employs an analytic estimation approach to systematically calculate the memory consumption of both the computation graph and the DL framework runtime. We have evaluated DNNMem on 5 real-world representative models with different hyperparameters under 3 mainstream frameworks (TensorFlow, PyTorch, and MXNet). Our extensive experiments show that DNNMem is effective in estimating GPU memory consumption.}, publisher = {ACM}, url = {http://approjects.co.za/?big=en-us/research/publication/estimating-gpu-memory-consumption-of-deep-learning-models/}, pages = {1342-1352}, note = {The 28th ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering, Industry Track}, }