@inproceedings{lee2025gistify, author = {Lee, Hyunji and Kim, Minseon and Singh, Chinmay and Pereira, Matheus and Sonwane, Atharv and White, Isadora and Stengel-Eskin, Elias and Bansal, Mohit and Shi, Zhengyan and Sordoni, Alessandro and Côté, Marc-Alexandre and Yuan, Xingdi and Caccia, Lucas}, title = {Gistify! Codebase-Level Understanding via Runtime Execution}, booktitle = {ICLR 2026}, year = {2025}, month = {October}, abstract = {As coding agents are increasingly deployed in large codebases, the need to automatically design challenging, codebase-level evaluation is central. We propose Gistify, a task where a coding LLM must create a single, minimal, self-contained file that can reproduce a specific functionality of a codebase. The coding LLM is given full access to a codebase along with a specific entrypoint (e.g., a python command), and the generated file must replicate the output of the same command ran under the full codebase, while containing only the essential components necessary to execute the provided command. Success on Gistify requires both structural understanding of the codebase, accurate modeling of its execution flow as well as the ability to produce potentially large code patches. Our findings show that current state-of-the-art models struggle to reliably solve Gistify tasks, especially ones with long executions traces.}, url = {http://approjects.co.za/?big=en-us/research/publication/gistify-codebase-level-understanding-via-runtime-execution/}, }