@inproceedings{yu2017multi-level, author = {Yu, Dongfei and Fu, Jianlong and Rui, Yong and Mei, Tao}, title = {Multi-level Attention Networks for Visual Question Answering}, booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2017}, month = {July}, abstract = {Inspired by the recent success of text-based question answering, visual question answering (VQA) is proposed to automatically answer natural language questions with the reference to a given image. Compared with text-based QA, VQA is more challenging because the reasoning process on visual domain needs both effective semantic embedding and fine-grained visual understanding. Existing approaches predominantly infer answers from the abstract low-level visual features, while neglecting the modeling of high-level image semantics and the rich spatial context of regions. To solve the challenges, we propose a multi-level attention network for visual question answering that can simultaneously reduce the semantic gap by semantic attention and benefit fine-grained spatial inference by visual attention. First, we generate semantic concepts from high-level semantics in convolutional neural networks (CNN) and select those question-related concepts as semantic attention. Second, we encode region-based middle-level outputs from CNN into spatially-embedded representation by a bidirectional recurrent neural network, and further pinpoint the answer-related regions by multiple layer perceptron as visual attention. Third, we jointly optimize semantic attention, visual attention and question embedding by a softmax classifier to infer the final answer. Extensive experiments show the proposed approach outperforms the-state-of-arts on two challenging VQA datasets.}, url = {http://approjects.co.za/?big=en-us/research/publication/multi-level-attention-networks-visual-question-answering/}, edition = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, }