@inproceedings{zou2024promptintern, author = {Zou, Jiaru and Zhou, Mengyu and Li, Tao and Han, Shi and Zhang, Dongmei}, title = {PromptIntern: Saving Inference Costs by Internalizing Recurrent Prompt during Large Language Model Fine-tuning}, booktitle = {The 2024 Conference on Empirical Methods in Natural Language Processing (EMNLP '24)}, year = {2024}, month = {November}, abstract = {Recent advances in fine-tuning large language models (LLMs) have greatly enhanced their usage in domain-specific tasks. Despite the success, fine-tuning continues to rely on repeated and lengthy prompts, which escalate computational expenses, require more resources, and lead to slower inference. In this paper, we present a novel approach, PromptIntern, which internalizes prompt knowledge during model fine-tuning to achieve efficient inference and save costs. Instead of compressing the prompts for a vanilla model, PromptIntern aims to embed the recurrent prompt directly into the model parameters. We design a fine-tuning pipeline that includes instruction template compression, few-shot example absorption, and a progressive internalization strategy, effectively diminishing the need for intricate prompts during inference. Comprehensive experiments on challenging NL2Code tasks demonstrate that our method reduces input tokens by more than 90%, accelerates inference by 4.2 times, and reduces monetary inference costs by 88.3%.}, url = {http://approjects.co.za/?big=en-us/research/publication/promptintern/}, }