@inproceedings{sun2024trustllm, author = {Sun, Lichao and Huang, Yue and Wang, Haoran and Wu, Siyuan and Zhang, Qihui and Gao, Chujie and Huang, Yixin and Lyu, Wenhan and Zhang, Yixuan and Li, Xiner and Liu, Zheng and Liu, Yixin and Wang, Yijue and Zhang, Zhikun and Kailkhura, B. and Xiong, Caiming and Xiao, Chaowei and Li, Chunyuan and Xing, Eric P. and Huang, Furong and Liu, Haodong and Ji, Heng and Wang, Hongyi and Zhang, Huan and Yao, Huaxiu and Kellis, M. and Zitnik, M. and Jiang, Meng and Bansal, Mohit and Zou, James and Pei, Jian and Liu, Jian and Gao, Jianfeng and Han, Jiawei and Zhao, Jieyu and Tang, Jiliang and Wang, Jindong and Mitchell, John and Shu, Kai and Xu, Kaidi and Chang, Kai-Wei and He, Lifang and Huang, Lifu and Backes, M. and Gong, Neil Zhenqiang and Yu, Philip S. and Chen, Pin-Yu and Gu, Quanquan and Xu, Ran and Ying, Rex and Ji, Shuiwang and Jana, S. and Chen, Tian-Xiang and Liu, Tianming and Zhou, Tianying and Wang, William and Li, Xiang and Zhang, Xiang-Yu and Wang, Xiao and Xie, Xing and Chen, Xun and Wang, Xuyu and Liu, Yan and Ye, Yanfang and Cao, Yinzhi and Zhao, Yue}, title = {TrustLLM: Trustworthiness in Large Language Models}, booktitle = {ICML 2024}, year = {2024}, month = {January}, abstract = {Large language models (LLMs), exemplified by ChatGPT, have gained considerable attention for their excellent natural language processing capabilities. Nonetheless, these LLMs present many challenges, particularly in the realm of trustworthiness. Therefore, ensuring the trustworthiness of LLMs emerges as an important topic. This paper introduces TrustLLM, a comprehensive study of trustworthiness in LLMs, including principles for different dimensions of trustworthiness, established benchmark, evaluation, and analysis of trustworthiness for mainstream LLMs, and discussion of open challenges and future directions. Specifically, we first propose a set of principles for trustworthy LLMs that span eight different dimensions. Based on these principles, we further establish a benchmark across six dimensions including truthfulness, safety, fairness, robustness, privacy, and machine ethics. We then present a study evaluating 16 mainstream LLMs in TrustLLM, consisting of over 30 datasets. Our findings firstly show that in general trustworthiness and utility (i.e., functional effectiveness) are positively related. Secondly, our observations reveal that proprietary LLMs generally outperform most open-source counterparts in terms of trustworthiness, raising concerns about the potential risks of widely accessible open-source LLMs. However, a few open-source LLMs come very close to proprietary ones. Thirdly, it is important to note that some LLMs may be overly calibrated towards exhibiting trustworthiness, to the extent that they compromise their utility by mistakenly treating benign prompts as harmful and consequently not responding. Finally, we emphasize the importance of ensuring transparency not only in the models themselves but also in the technologies that underpin trustworthiness. Knowing the specific trustworthy technologies that have been employed is crucial for analyzing their effectiveness.}, url = {http://approjects.co.za/?big=en-us/research/publication/trustllm-trustworthiness-in-large-language-models/}, }