@inproceedings{hao2022optimizing, author = {Hao, Yaru and Chi, Zewen and Dong, Li and Wei, Furu}, title = {Optimizing Prompts for Text-to-Image Generation}, booktitle = {NeurIPS 2023}, year = {2022}, month = {December}, abstract = {Well-designed prompts can guide text-to-image models to generate amazing images. However, the performant prompts are often model-specific and misaligned with user input. Instead of laborious human engineering, we propose prompt adaptation, a general framework that automatically adapts original user input to model-preferred prompts. Specifically, we first perform supervised fine-tuning with a pretrained language model on a small collection of manually engineered prompts. Then we use reinforcement learning to explore better prompts. We define a reward function that encourages the policy to generate more aesthetically pleasing images while preserving the original user intentions. Experimental results on Stable Diffusion show that our method outperforms manual prompt engineering in terms of both automatic metrics and human preference ratings. Moreover, reinforcement learning further boosts performance, especially on out-of-domain prompts. The pretrained checkpoints are available at this https URL. The demo can be found at this https URL.}, url = {http://approjects.co.za/?big=en-us/research/publication/optimizing-prompts-for-text-to-image-generation/}, }