@inproceedings{chen2025the, author = {Chen, Fan and Huang, Audrey and Golowich, Noah and Malladi, Sadhika and Block, Adam and Ash, Jordan and Krishnamurthy, Akshay and Krishnamurthy, Akshay and Foster, Dylan}, title = {The Coverage Principle: How Pre-Training Enables Post-Training}, booktitle = {ICLR 2026}, year = {2025}, month = {October}, abstract = {Language models demonstrate remarkable abilities when pre-trained on large text corpora and fine-tuned for specific tasks, but how and why pre-training shapes the success of the final model remains poorly understood. Notably, although pre-training success is often quantified by cross-entropy loss, cross-entropy can be a poor predictor of downstream performance. Instead, we provide a theoretical perspective on this relationship through the lens of \emph[coverage], which quantifies the probability mass the pre-trained model places on high-quality responses and which is necessary and sufficient for post-training and test-time scaling methods such as Best-of-N to succeed. Our main results develop an understanding of \emph[the coverage principle], a phenomenon whereby next-token prediction (more generally, maximum likelihood) implicitly optimizes toward a model with good coverage. In particular, we uncover a mechanism that explains the power of coverage in predicting downstream performance: \emph[coverage generalizes faster than cross-entropy], avoiding spurious dependence on problem-dependent parameters such as the sequence length. We also study practical algorithmic interventions with provable benefits for improving coverage, including (i) model/checkpoint selection procedures, (ii) gradient normalization schemes, and (iii) test-time decoding strategies.}, url = {http://approjects.co.za/?big=en-us/research/publication/the-coverage-principle-how-pre-training-enables-post-training/}, }