@techreport{yu2014an, author = {Yu, Dong and Eversole, Adam and Seltzer, Mike and Yao, Kaisheng and Kuchaiev, Oleksii and Zhang, Yu and Seide, Frank and Huang, Zhiheng and Guenter, Brian and Wang, Huaming and Droppo, Jasha and Zweig, Geoffrey and Rossbach, Chris and Gao, Jie and Stolcke, Andreas and Currey, Jon and Slaney, Malcolm and Chen, Guoguo and Agarwal, Amit and Basoglu, Chris and Padmilac, Marko and Kamenev, Alexey and Ivanov, Vladimir and Cypher, Scott and Parthasarathi, Hari and Mitra, Bhaskar and Peng, Baolin and Huang, Xuedong}, title = {An Introduction to Computational Networks and the Computational Network Toolkit}, year = {2014}, month = {October}, abstract = {We introduce computational network (CN), a unified framework for describing arbitrary learning machines, such as deep neural networks (DNNs), convolutional neural networks (CNNs), recurrent neural networks (RNNs), long short term memory (LSTM), logistic regression, and maximum entropy model, that can be illustrated as a series of computational steps. A CN is a directed graph in which each leaf node represents an input value or a parameter and each non-leaf node represents a matrix operation upon its children. We describe algorithms to carry out forward computation and gradient calculation in CN and introduce most popular computation node types used in a typical CN. We further introduce the computational network toolkit (CNTK), an implementation of CN that supports both GPU and CPU. We describe the architecture and the key components of the CNTK, the command line options to use CNTK, and the network definition and model editing language, and provide sample setups for acoustic model, language model, and spoken language understanding. We also describe the Argon speech recognition decoder as an example to integrate with CNTK.}, publisher = {Microsoft Research}, url = {http://approjects.co.za/?big=en-us/research/publication/an-introduction-to-computational-networks-and-the-computational-network-toolkit/}, number = {MSR-TR-2014-112}, }