@misc{jhamtani2025lm, author = {Jhamtani, Harsh and Andreas, Jacob and Van Durme, Ben}, title = {LM Agents for Coordinating Multi-User Information Gathering}, howpublished = {arXiv: Computation and Language}, year = {2025}, month = {February}, abstract = {This paper introduces PeopleJoin, a benchmark for evaluating LM-mediated collaborative problem solving. Given a user request, PeopleJoin agents must identify teammates who might be able to assist, converse with these teammates to gather information, and finally compile a useful answer or summary for the original user. PeopleJoin comprises two evaluation domains: PeopleJoin-QA, focused on questions about tabular data, and PeopleJoin-DocCreation, focused on document creation tasks. The two domains are adapted from existing NLP benchmarks for database question answering and multi-document summarization; here, however, the information needed to complete these tasks is distributed across synthetic ``organizations'' of 2--20 users, simulating natural multi-user collaboration scenarios. We implemented several popular LM agent architectures, evaluating their accuracy and efficiency at completing tasks, and highlight new research questions that can be studied using PeopleJoin.}, url = {http://approjects.co.za/?big=en-us/research/publication/lm-agents-for-coordinating-multi-user-information-gathering/}, }