@misc{wang2024q-sparse, author = {Wang, Hongyu and Ma, Shuming and Wang, Ruiping and Wei, Furu}, title = {Q-Sparse: All Large Language Models can be Fully Sparsely-Activated}, howpublished = {arXiv}, year = {2024}, month = {July}, abstract = {We introduce, Q-Sparse, a simple yet effective approach to training sparsely-activated large language models (LLMs). Q-Sparse enables full sparsity of activations in LLMs which can bring significant efficiency gains in inference. This is achieved by applying top-K sparsification to the activations and the straight-through-estimator to the training. We also introduce Block Q-Sparse for batch training and inference. The key results from this work are, (1) Q-Sparse can achieve results comparable to those of baseline LLMs while being much more efficient at inference time; (2) We present an inference-optimal scaling law for sparsely-activated LLMs; (3) Q-Sparse is effective in different settings, including training-from-scratch, continue-training of off-the-shelf LLMs, and finetuning; (4) Q-Sparse works for both full-precision and 1-bit LLMs (e.g., BitNet b1.58). Particularly, the synergy of BitNet b1.58 and Q-Sparse (can be equipped with MoE) provides the cornerstone and a clear path to revolutionize the efficiency, including cost and energy consumption, of future LLMs.}, url = {http://approjects.co.za/?big=en-us/research/publication/q-sparse-all-large-language-models-can-be-fully-sparsely-activated/}, }