@inproceedings{wu2021a, author = {Wu, Zeqiu and Galley, Michel and Brockett, Chris and Zhang, Yizhe and Gao, Xiang and Quirk, Chris and Koncel-Kedziorski, Rik and Gao, Jianfeng and Hajishirzi, Hannaneh and Ostendorf, Mari and Dolan, Bill}, title = {A Controllable Model of Grounded Response Generation}, booktitle = {AAAI 2021}, year = {2021}, month = {January}, abstract = {Current end-to-end neural conversation models inherently lack the flexibility to impose semantic control in the response generation process. This control is essential to ensure that users' semantic intents are satisfied and to impose a degree of specificity on generated outputs. Attempts to boost informativeness alone come at the expense of factual accuracy, as attested by GPT-2's propensity to "hallucinate" facts. While this may be mitigated by access to background knowledge, there is scant guarantee of relevance and informativeness in generated responses. We propose a framework that we call controllable grounded response generation (CGRG), in which lexical control phrases are either provided by an user or automatically extracted by a content planner from dialogue context and grounding knowledge. Quantitative and qualitative results show that, using this framework, a GPT-2 based model trained on a conversation-like Reddit dataset outperforms strong generation baselines.}, url = {http://approjects.co.za/?big=en-us/research/publication/a-controllable-model-of-grounded-response-generation/}, }