@misc{zhang2024video, author = {Zhang, Wentao and Guo, Junliang and He, Tianyu and Zhao, Li and Xu, Linli and Bian, Jiang}, title = {Video In-context Learning}, howpublished = {arXiv}, year = {2024}, month = {July}, abstract = {In-context learning for vision data has been underexplored compared with that in natural language. Previous works studied image in-context learning, urging models to generate a single image guided by demonstrations. In this paper, we propose and study video in-context learning, where the model starts from an existing video clip and generates diverse potential future sequences, each semantically guided by the prompted video demonstrations. To achieve this, we provide a clear definition of the task, and train an autoregressive Transformer on video datasets. We thoroughly analyze the effect of different datasets and represent frames as discrete tokens, and then model them by next token predictions. We design various evaluation metrics, including both objective and subjective measures, to demonstrate the visual quality and semantic accuracy of generation results. Our model follows the scaling law and generates high-quality video clips that accurately align with the semantic guidance provided by in-context examples.}, url = {http://approjects.co.za/?big=en-us/research/publication/video-in-context-learning/}, }