@inproceedings{yuan2017machine, author = {Yuan, Xingdi and Wang, Tong and Gulcehre, Caglar and Sordoni, Alessandro and Bachman, Philip and Subramanian, Sandeep and Zhang, Saizheng and Trischler, Adam}, title = {Machine Comprehension by Text-to-Text Neural Question Generation}, booktitle = {RepL4NLP workshop, ACL 2017}, year = {2017}, month = {July}, abstract = {We propose a recurrent neural model that generates natural-language questions from documents, conditioned on answers. We show how to train the model using a combination of supervised and reinforcement learning. After teacher forcing for standard maximum likelihood training, we fine-tune the model using policy gradient techniques to maximize several rewards that measure question quality. Most notably, one of these rewards is the performance of a question-answering system. We motivate question generation as a means to improve the performance of question answering systems. Our model is trained and evaluated on the recent question-answering dataset SQuAD.}, url = {http://approjects.co.za/?big=en-us/research/publication/machine-comprehension-by-text-to-text-neural-question-generation-2/}, edition = {RepL4NLP workshop, ACL 2017}, }