@inproceedings{li2011lasso, author = {Li, Jinyu and Yuan, Ming and Lee, Chin-Hui}, title = {LASSO model adaptation for automatic speech recognition}, booktitle = {ICML Workshop on Learning Architectures, Representations, and Optimization for Speech and Visual Information Processing}, year = {2011}, month = {January}, abstract = {Inspired by the success of least absolute shrinkage and selection operator (LASSO) in statistical learning, we propose an regularized maximum likelihood linear regression (MLLR) to estimate models with only a limited set of adaptation data to improve accuracy for automatic speech recognition, by regularizing the standard MLLR objective function with an constraint. The so-called LASSO MLLR is a natural solution to the data insufficiency problem because the constraint regularizes some parameters to exactly 0 and reduces the number of free parameters to estimate. Tested on the 5k-WSJ0 task, the proposed LASSO MLLR gives significant word error rate reduction from the errors obtained with the standard MLLR in an utterance-by-utterance unsupervised adaptation scenario.}, url = {http://approjects.co.za/?big=en-us/research/publication/lasso-model-adaptation-for-automatic-speech-recognition/}, edition = {ICML Workshop on Learning Architectures, Representations, and Optimization for Speech and Visual Information Processing}, }