@inproceedings{yim2024dermavqa, author = {Yim, Wen-wai and Fu, Yujuan and Sun, Zhaoyi and Ben Abacha, Asma and Yetisgen, Meliha and Xia, Fei}, title = {DermaVQA: A Multilingual Visual Question Answering Dataset for Dermatology}, booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024}, year = {2024}, month = {October}, abstract = {Remote medical care has become commonplace with the establishment of patient portals, the maturation of web technologies, and the proliferation of personal devices. However, though on-demand care provides convenience and expands patient access, this same phenomenon may lead to increased workload for healthcare providers. Drafting candidate responses may help speed up physician workflows answering electronic messages. One specialty that may benefit from the latest multi-modal vision-language foundational models is dermatology. However, there is no existing dataset that incorporate dermatological health queries along with user-generated images. In this work, we contribute a new dataset, DermaVQA (https://osf.io/72rp3/), for the task of dermatology question answering and we benchmark the performance of state-of-the-art multi-modal models on multilingual response generation using relevant multi-reference metrics. The dataset and corresponding code are available on our project’s GitHub repository (https://github.com/velvinnn/DermaVQA).}, url = {http://approjects.co.za/?big=en-us/research/publication/__trashed-7/}, }