@inproceedings{zuo2022taming, author = {Zuo, Simiao and Liu, Xiaodong and Jiao, Jian and Kim, Young Jin and Hassan Awadalla, Hany and Zhang, Ruofei and Zhao, Tuo and Gao, Jianfeng}, title = {Taming Sparsely Activated Transformer with Stochastic Experts}, booktitle = {ICLR 2022}, year = {2022}, month = {April}, abstract = {Sparsely activated models (SAMs), such as Mixture-of-Experts (MoE), can easily scale to have outrageously large amounts of parameters without significant increase in computational cost. However, SAMs are reported to be parameter inefficient such that larger models do not always lead to better performance. While most on-going research focuses on improving SAMs models by exploring methods of routing inputs to experts, our analysis reveals that such research might not lead to the solution we expect, i.e., the commonly-used routing methods based on gating mechanisms do not work better than randomly routing inputs to experts. In this paper, we propose a new expert-based model, THOR (Transformer witH StOchastic ExpeRts). Unlike classic expert-based models, such as the Switch Transformer, experts in THOR are randomly activated for each input during training and inference. THOR models are trained using a consistency regularized loss, where experts learn not only from training data but also from other experts as teachers, such that all the experts make consistent predictions. We validate the effectiveness of THOR on machine translation tasks. Results show that THOR models are more parameter efficient in that they significantly outperform the Transformer and MoE models across various settings. For example, in multilingual translation, THOR outperforms the Switch Transformer by 2 BLEU scores, and obtains the same BLEU score as that of a state-of-the-art MoE model that is 18 times larger. Our code is publicly available on GitHub.}, url = {http://approjects.co.za/?big=en-us/research/publication/taming-sparsely-activated-transformer-with-stochastic-experts-2/}, }