@inproceedings{gupta2026chow-liu, author = {Gupta, Naman and Singh, Vaibhav and Iyer, Arun and Shiragur, Kirankumar and Grover, Pratham and Bairi, Ramakrishna and Maiti, Ritabrata and Damle, Sankarshan and Gupta, Shachee Mishra and Maurya, Rishikesh and D C, Vageesh}, title = {Chow-Liu Ordering for Long-Context Reasoning in Chain-of-Agents}, booktitle = {International Conference on Learning Representations Workshop on Memory for LLM-Based Agentic Systems (MemAgents-ICLR)}, year = {2026}, month = {April}, abstract = {Sequential multi-agent reasoning frameworks such as Chain-of-Agents (CoA) handle long-context queries by decomposing inputs into chunks and processing them sequentially using LLM-based worker agents that read from and update a bounded shared memory. From a probabilistic perspective, CoA aims to approximate the conditional distribution corresponding to a model capable of jointly reasoning over the entire long context. CoA achieves this through a latent-state factorization in which only bounded summaries of previously processed evidence are passed between agents. The resulting bounded-memory approximation introduces a lossy information bottleneck, making the final evidence state inherently dependent on the order in which chunks are processed. In this work, we study the problem of chunk ordering for long-context reasoning. We use the well-known Chow-Liu trees to learn a dependency structure that prioritizes strongly related chunks. Empirically, we show that a breadth-first traversal of the resulting tree yields chunk orderings that reduce information loss across agents and consistently outperform both default document-chunk ordering and semantic score-based ordering in answer relevance and exact-match accuracy across three long-context benchmarks.}, url = {http://approjects.co.za/?big=en-us/research/publication/chow-liu-ordering-for-long-context-reasoning-in-chain-of-agents/}, }