@inproceedings{liu2023tinygsm, author = {Liu, Bingbin and Bubeck, Sébastien and Eldan, Ronen and Kulkarni, Janardhan (Jana) and Li, Yuanzhi and Nguyen, Anh and Ward, Rachel and Zhang, Yi}, title = {TinyGSM: achieving >80% on GSM8k with small language models}, booktitle = {3nd MATH-AI Workshop at NeurIPS'23}, year = {2023}, month = {December}, abstract = {Small-scale models offer various computational advantages, and yet to which extent size is critical for problem-solving abilities remains an open question. Specifically for solving grade school math, the smallest model size so far required to break the 80% barrier on the GSM8K benchmark remains to be 34B. Our work studies how high-quality datasets may be the key for small language models to acquire mathematical reasoning. We introduce , a synthetic dataset of 12.3M grade school math problems paired with Python solutions, generated fully by GPT-3.5. After finetuning on , we find that a duo of a 1.3B generation model and a 1.3B verifier model can achieve 81.5% accuracy, outperforming existing models that are orders of magnitude larger. This also rivals the performance of the GPT-3.5 ``teacher'' model (77.4%), from which our model's training data is generated. Our approach is simple and has two key components: 1) the high-quality dataset , 2) the use of a verifier, which selects the final outputs from multiple candidate generations.}, url = {http://approjects.co.za/?big=en-us/research/publication/tinygsm-achieving-80-on-gsm8k-with-small-language-models/}, }