@techreport{wang2023evaluating, author = {Wang, Xiting and Jiang, Liming and Hernandez-Orallo, Jose and Sun, Luning and Stillwell, David and Luo, Fang and Xie, Xing}, title = {Evaluating General-Purpose AI with Psychometrics}, institution = {Microsoft}, year = {2023}, month = {October}, abstract = {Comprehensive and accurate evaluation of general-purpose AI systems such as large language models allows for effective mitigation of their risks and deepened understanding of their capabilities. Current evaluation methodology, mostly based on benchmarks of specific tasks, falls short of adequately assessing these versatile AI systems, as present techniques lack a scientific foundation for predicting their performance on unforeseen tasks and explaining their varying performance on specific task items or user inputs. Moreover, existing benchmarks of specific tasks raise growing concerns about their reliability and validity. To tackle these challenges, we suggest transitioning from task-oriented evaluation to construct-oriented evaluation. Psychometrics, the science of psychological measurement, provides a rigorous methodology for identifying and measuring the latent constructs that underlie performance across multiple tasks. We discuss its merits, warn against potential pitfalls, and propose a framework to put it into practice. Finally, we explore future opportunities of integrating psychometrics with the evaluation of general-purpose AI systems.}, url = {http://approjects.co.za/?big=en-us/research/publication/evaluating-general-purpose-ai-with-psychometrics/}, number = {MSR-TR-2023-41}, }