@article{nie2023vspim, author = {Nie, Chen and Tang, Chenyu and Lin, Jie and Hu, Huan and Lv, Chenyang and Cao, Ting and Zhang, Weifeng and Jiang, Li and Liang, Xiaoyao and Qian, Weikang and Sun, Yanan and He, Zhezhi}, title = {VSPIM: SRAM Processing-in-Memory DNN Acceleration via Vector-Scalar Operations}, year = {2023}, month = {June}, abstract = {Processing-in-Memory (PIM) has been widely explored for accelerating data-intensive machine learning computation that mainly consists of general-matrix-multiplication (GEMM), by mitigating the burden of data movements and exploiting the ultra-high memory parallelism. The two mainstreams of PIM, the analog- and digital-type, have both been exploited in accelerating machine learning workloads by numerous outstanding prior works. Currently, the digital-PIM is increasingly favored due to the broader computing support and the avoidance of errors caused by intrinsic non-idealities, e.g., process variation. Nevertheless, it still lacks further optimization considering the characteristics of the GEMM computation, including better efficient data layout and scheduling, and the ability to handle the sparsity of activations at the bit-level. To boost the performance and efficiency of digital SRAM PIM, we propose the architecture called VSPIM that performs the computation in a bit-serial fashion, with unique support of vector-scalar computing pattern. The novelties of the VSPIM can be concluded as follows: 1) support bit-serial based scalar-vector computing via ingenious parallel bit-broadcasting; 2) refine the GEMM mapping strategy and computing pattern to enhance performance and efficiency; 3) powered by the introduced scalar-vector operation, the bit-sparsity of activation is leveraged to halt unnecessary computation to maximize efficiency and throughput. Our comprehensive evaluation shows that, compared to the state-of-the-art SRAM-based digital-PIM design (Neural Cache), VSPIM can significantly boost the performance and energy efficiency by up to 8.87x and 4.81x respectively, with negligible area overhead, upon multiple representative neural networks.}, url = {http://approjects.co.za/?big=en-us/research/publication/vspim-sram-processing-in-memory-dnn-acceleration-via-vector-scalar-operations/}, journal = {IEEE Transactions on Computers}, }