@article{ye2026online, author = {Ye, Tianzhu and Dong, Li and Dong, Qingxiu and Wu, Xun and Huang, Shaohan and Wei, Furu}, title = {Online Experiential Learning for Language Models}, year = {2026}, month = {March}, abstract = {The prevailing paradigm for improving large language models relies on offline training with human annotations or simulated environments, leaving the rich experience accumulated during real-world deployment entirely unexploited. We propose Online Experiential Learning (OEL), a framework that enables language models to continuously improve from their own deployment experience. OEL operates in two stages: first, transferable experiential knowledge is extracted and accumulated from interaction trajectories collected on the user side; second, this knowledge is consolidated into model parameters via on-policy context distillation, requiring no access to the user-side environment. The two stages are iterated to form an online learning loop, where the improved model collects higher-quality trajectories that yield richer experiential knowledge for subsequent rounds. We evaluate OEL on text-based game environments across multiple model scales and both thinking and non-thinking variants. OEL achieves consistent improvements over successive iterations, enhancing both task accuracy and token efficiency while preserving out-of-distribution performance. Our analysis further shows that extracted experiential knowledge is significantly more effective than raw trajectories, and that on-policy consistency between the knowledge source and the policy model is critical for effective learning.}, url = {http://approjects.co.za/?big=en-us/research/publication/online-experiential-learning-for-language-models/}, journal = {arXiv: Computation and Language}, volume = {2603}, number = {16856}, }