@inproceedings{yi2023boosting, author = {Yi, Rongjie and Cao, Ting and Zhou, Ao and Ma, Xiao and Wang, Shangguang and Xu, Mengwei}, title = {Boosting DNN Cold Inference on Devices}, booktitle = {The 21st Annual International Conference on Mobile Systems, Applications and Services (MobiSys ’23)}, year = {2023}, month = {June}, abstract = {DNNs are ubiquitous on edge devices nowadays. With its increasing importance and use cases, it’s not likely to pack all DNNs into device memory and expect that each inference has been warmed up. Therefore, cold inference, the process to read, initialize, and execute a DNN model, is becoming commonplace and its performance is urgently demanded to be optimized. To this end, we present NNV12, the first on-device inference engine optimizing cold inference. NNV12 is built atop three novel optimization knobs: selecting a proper kernel (i.e., operator implementation) for each DNN operator, bypassing the weights transformation process by caching the post-transformed weights on disk, and pipelined execution of many kernels on asymmetric processors. To tackle with the huge search space, NNV12 employs a heuristic-based scheme to obtain a near optimal kernel scheduling plan. We fully implement a prototype of NNV12 and evaluate its performance across extensive experiments. It shows that NNV12 achieves up to 15.2× speedup compared to the state-of-the-art DNN engines on edge CPUs and 401.5× speedup on edge GPUs, respectively.}, publisher = {ACM}, url = {http://approjects.co.za/?big=en-us/research/publication/boosting-dnn-cold-inference-on-devices/}, }