@inproceedings{yao2014recurrent, author = {Yao, Kaisheng and Peng, Baolin and Zweig, Geoffrey and Yu, Dong and Li, Xiaolong(Shiao-Long) and Gao, Feng}, title = {Recurrent Conditional Random Field for Language Understanding}, booktitle = {ICASSP 2014}, year = {2014}, month = {January}, abstract = {Recurrent neural networks (RNNs) have recently produced record setting performance in language modeling and word-labeling tasks. In the word-labeling task, the RNN is used analogously to the more traditional conditional random field (CRF) to assign a label to each word in an input sequence, and has been shown to significantly outperform CRFs. In contrast to CRFs, RNNs operate in an online fashion to assign labels as soon as a word is seen, rather than after seeing the whole word sequence. In this paper, we show that the performance of an RNN tagger can be significantly improved by incorporating elements of the CRF model; specifically, the explicit modeling of output-label dependencies with transition features, its global sequence-level objective function, and offline decoding. We term the resulting model a “recurrent conditional random field” and demonstrate its effectiveness on the ATIS travel domain dataset and a variety of web-search language understanding datasets.}, publisher = {IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}, url = {http://approjects.co.za/?big=en-us/research/publication/recurrent-conditional-random-field-for-language-understanding/}, edition = {ICASSP 2014}, }