@article{wang2024bootstrap, author = {Wang, Liang and Yang, Nan and Zhang, Xingxing and Huang, Xiaolong and Wei, Furu}, title = {Bootstrap Your Own Context Length}, year = {2024}, month = {December}, abstract = {We introduce a bootstrapping approach to train long-context language models by exploiting their short-context capabilities only. Our method utilizes a simple agent workflow to synthesize diverse long-context instruction tuning data, thereby eliminating the necessity for manual data collection and annotation. The proposed data synthesis workflow requires only a short-context language model, a text retriever, and a document collection, all of which are readily accessible within the open-source ecosystem. Subsequently, language models are fine-tuned using the synthesized data to extend their context lengths. In this manner, we effectively transfer the short-context capabilities of language models to long-context scenarios through a bootstrapping process. We conduct experiments with the open-source Llama-3 family of models and demonstrate that our method can successfully extend the context length to up to 1M tokens, achieving superior performance across various benchmarks.}, url = {http://approjects.co.za/?big=en-us/research/publication/bootstrap-your-own-context-length/}, journal = {ArXiv}, volume = {abs/2412.18860}, }