@inproceedings{yang2023i-code, author = {Yang, Ziyi and Fang, Yuwei and Zhu, Chenguang and Pryzant, Reid and Chen, Dongdong and Shi, Yu and Xu, Yichong and Qian, Yao and Gao, Mei and Chen, Yi-Ling and Lu, Liyang and Xie, Yujia and Gmyr, Robert and Codella, Noel and Kanda, Naoyuki and Xiao, Bin and Yuan, Lu and Yoshioka, Takuya and Zeng, Michael and Huang, Xuedong}, title = {i-Code: An Integrative and Composable Multimodal Learning Framework}, booktitle = {AAAI 2023}, year = {2023}, month = {February}, abstract = {Human intelligence is multimodal; we integrate visual, linguistic, and acoustic signals to maintain a holistic worldview. Most current pretraining methods, however, are limited to one or two modalities. We present i-Code, a self-supervised pretraining framework where users may flexibly combine the modalities of vision, speech, and language into unified and general-purpose vector representations. In this framework, data from each modality are first given to pretrained single-modality encoders. The encoder outputs are then integrated with a multimodal fusion network, which uses novel attention mechanisms and other architectural innovations to effectively combine information from the different modalities. The entire system is pretrained end-to-end with new objectives including masked modality unit modeling and cross-modality contrastive learning. Unlike previous research using only video for pretraining, the i-Code framework can dynamically process single, dual, and triple-modality data during training and inference, flexibly projecting different combinations of modalities into a single representation space. Experimental results demonstrate how i-Code can outperform state-of-the-art techniques on five video understanding tasks and the GLUE NLP benchmark, improving by as much as 11% and demonstrating the power of integrative multimodal pretraining.}, url = {http://approjects.co.za/?big=en-us/research/publication/i-code-an-integrative-and-composable-multimodal-learning-framework/}, }