@techreport{liu2019learning, author = {Liu, Bei and Huang, Zhicheng and Zeng, Zhaoyang and Chen, Zheyu and Fu, Jianlong}, title = {Learning Rich Image Region Representation for Visual Question Answering}, institution = {Microsoft}, year = {2019}, month = {June}, abstract = {We propose to boost VQA by leveraging more powerful feature extractors by improving the representation ability of both visual and text features and the ensemble of models. For visual feature, some detection techniques are used to improve the detector. For text feature, we adopt BERT as the language model and find that it can significantly improve VQA performance. Our solution won second place in the VQA Challenge 2019.}, url = {http://approjects.co.za/?big=en-us/research/publication/learning-rich-image-region-representation-for-visual-question-answering/}, number = {MSR-TR-2020-1}, note = {CVPR 2019, VQA Workshop}, }