@inproceedings{yildirim2024multimodal, author = {Yildirim, Nur and Richardson (nee Murfet), Hannah and Wetscherek, Maria T and Bajwa, Junaid and Jacob, Joseph and Pinnock, Mark A and Harris, Stephen and Coelho de Castro, Daniel and Bannur, Shruthi and Hyland, Stephanie and Ghosh, Pratik and Ranjit, Mercy and Bouzid, Kenza and Schwaighofer, Anton and Pérez-García, Fernando and Sharma, Harshita and Oktay, Ozan and Lungren, Matthew P and Alvarez-Valle, Javier and Nori, Aditya and Thieme, Anja}, title = {Multimodal Healthcare AI: Identifying and Designing Clinically Relevant Vision-Language Applications for Radiology}, booktitle = {CHI 2024}, year = {2024}, month = {May}, abstract = {Recent advances in AI combine large language models (LLMs) with vision encoders that bring forward unprecedented technical capabilities to leverage for a wide range of healthcare applications. Focusing on the domain of radiology, vision-language models (VLMs) achieve good performance results for tasks such as generating radiology findings based on a patient’s medical image, or answering visual questions (e.g., “Where are the nodules in this chest X-ray?”). However, the clinical utility of potential applications of these capabilities is currently underexplored. We engaged in an iterative, multidisciplinary design process to envision clinically relevant VLM interactions, and co-designed four VLM use concepts: Draft Report Generation, Augmented Report Review, Visual Search and Querying, and Patient Imaging History Highlights. We studied these concepts with 13 radiologists and clinicians who assessed the VLM concepts as valuable, yet articulated many design considerations. Reflecting on our findings, we discuss implications for integrating VLM capabilities in radiology, and for healthcare AI more generally,}, publisher = {ACM}, url = {http://approjects.co.za/?big=en-us/research/publication/multimodal-healthcare-ai-identifying-and-designing-clinically-relevant-vision-language-applications-for-radiology/}, }