@misc{wu2025collabllm, author = {Wu, Shirley and Galley, Michel and Peng, Baolin and Cheng, Hao and Li, Gavin and Dou, Yao and Cai, Weixin and Zou, James and Leskovec, J. and Gao, Jianfeng}, title = {CollabLLM: From Passive Responders to Active Collaborators}, year = {2025}, month = {February}, abstract = {Large Language Models are typically trained with next-turn rewards, limiting their ability to optimize for long-term interaction. As a result, they often respond passively to ambiguous or open-ended user requests, failing to help users reach their ultimate intents and leading to inefficient conversations. To address these limitations, we introduce CollabLLM, a novel and general training framework that enhances multiturn human-LLM collaboration. Its key innovation is a collaborative simulation that estimates the long-term contribution of responses using Multiturn-aware Rewards. By reinforcement fine-tuning these rewards, CollabLLM goes beyond responding to user requests, and actively uncovers user intent and offers insightful suggestions-a key step towards more human-centered AI. We also devise a multiturn interaction benchmark with three challenging tasks such as document creation. CollabLLM significantly outperforms our baselines with averages of 18.5% higher task performance and 46.3% improved interactivity by LLM judges. Finally, we conduct a large user study with 201 judges, where CollabLLM increases user satisfaction by 17.6% and reduces user spent time by 10.4%.}, url = {http://approjects.co.za/?big=en-us/research/publication/collabllm-from-passive-responders-to-active-collaborators/}, }