@inproceedings{durante2024an, author = {Durante, Zane and Sarkar, Bidipta and Gong, Ran and Taori, Rohan and Noda, Yusuke and Tang, Paul and Adeli, Ehsan and Lakshmikanth, Shrinidhi Kowshika and Schulman, Kevin and Milstein, Arnold and Terzopoulos, Demetri and Famoti, Ade and Kuno, Noboru and Llorens, Ashley J. and Vo, Hoi and Ikeuchi, Katsushi and Li, Fei-Fei and Gao, Jianfeng and Wake, Naoki and Huang, Qiuyuan}, title = {An Interactive Agent Foundation Model}, year = {2024}, month = {February}, abstract = {The development of artificial intelligence systems is transitioning from creating static, task-specificmodels to dynamic, agent-based systems capable of performing well in a wide range of applications. We propose an Agent Foundation Model that uses a novel multi-task agent training paradigm for training AI agents across a wide range of domains, datasets, and tasks. Our training paradigm unifies diverse pretraining strategies, including visual masked auto-encoders, language modeling, and next-action prediction, enabling a versatile and adaptable AI framework. We demonstrate the performance of our framework across three separate domains— Robotics, Gaming AI, and Healthcare. Our model demonstrates its ability to generate meaningful and contextually relevant outputs in each area. The strength of our approach lies in its generality, leveraging a variety of data sources such as robotics sequences, gameplay data, large-scale video datasets, and textual information for effective multimodal and multi-task learning. Our approach provides a promising avenue for developing generalist, action-taking, multimodal systems.}, publisher = {arXiv}, url = {http://approjects.co.za/?big=en-us/research/publication/interactive-agent-foundation-model/}, }