@inproceedings{palangi2017grammatically-interpretable, author = {Palangi, Hamid and Huang, Qiuyuan and Smolensky, Paul and He, Xiaodong and Deng, Li}, title = {Grammatically-Interpretable Learned Representations in Deep NLP Models}, booktitle = {NIPS 2017, Workshop}, year = {2017}, month = {December}, abstract = {We introduce two architectures, the Tensor Product Recurrent Network (TPRN) and the Tensor Product Generation Network (TPGN). In the application of TPRN, internal representations — learned by end-to-end optimization in a deep neural network performing a textual QA task — are interpretable using basic concepts from linguistic theory. This interpretability is achieved without paying a performance penalty. In another application, image-to-text generation or image captioning, TPGN gives better results than the state-of-the-art long short-term memory (LSTM) based approaches. Learned internal representations in the TPGN can also be interpreted as containing grammatical-role information.}, url = {http://approjects.co.za/?big=en-us/research/publication/grammatically-interpretable-learned-representations-in-deep-nlp-models/}, }