@inproceedings{ge2023extensible, author = {Ge, Tao and Jing, Hu and Dong, Li and Mao, Shaoguang and Xia, Yan and Wang, Xun and Chen, Si-Qing and Wei, Furu}, title = {Extensible Prompts for Language Models on Zero-shot Language Style Customization}, booktitle = {NeurIPS 2023}, year = {2023}, month = {October}, abstract = {We propose eXtensible Prompt (X-Prompt) for prompting a large language model (LLM) beyond natural language (NL). X-Prompt instructs an LLM with not only NL but also an extensible vocabulary of imaginary words. Imaginary words can help represent what NL words hardly describe, allowing a prompt to be more descriptive; also, they are designed to be out-of-distribution (OOD) robust so that they can be used like NL words in various prompts, distinguishing X-Prompt from soft prompt that is for fitting in-distribution data. To this end, we propose context-augmented learning (CAL) to learn imaginary words for general usability, enabling them to work properly in OOD (unseen) prompts. We conduct experiments that use X-Prompt for zero-shot language style customization as a case study. The promising results of X-Prompt demonstrate its potential of approaching advanced interaction between humans and LLMs to bridge their communication gap.}, url = {http://approjects.co.za/?big=en-us/research/publication/extensible-prompts-for-language-models-on-zero-shot-language-style-customization/}, }