@inproceedings{ma2019livebot, author = {Ma, Shuming and Cui, Lei and Dai, Damai and Wei, Furu and Sun, Xu}, title = {LiveBot: Generating Live Video Comments Based on Visual and Textual Contexts}, booktitle = {AAAI 2019}, year = {2019}, month = {July}, abstract = {We introduce the task of automatic live commenting. Live commenting, which is also called “video barrage”, is an emerging feature on online video sites that allows real-time comments from viewers to fly across the screen like bullets or roll at the right side of the screen. The live comments are a mixture of opinions for the video and the chit chats with other comments. Automatic live commenting requires AI agents to comprehend the videos and interact with human viewers who also make the comments, so it is a good testbed of an AI agent’s ability to deal with both dynamic vision and language. In this work, we construct a large-scale live comment dataset with 2,361 videos and 895,929 live comments. Then, we introduce two neural models to generate live comments based on the visual and textual contexts, which achieve better performance than previous neural baselines such as the sequence-to-sequence model. Finally, we provide a retrieval-based evaluation protocol for automatic live commenting where the model is asked to sort a set of candidate comments based on the log-likelihood score, and evaluated on metrics such as mean-reciprocal-rank. Putting it all together, we demonstrate the first “LiveBot”. The datasets and the codes can be found at https://github.com/lancopku/livebot.}, url = {http://approjects.co.za/?big=en-us/research/publication/livebot-generating-live-video-comments-based-on-visual-and-textual-contexts/}, }