@inproceedings{xu2022narrate, author = {Xu, Ruochen and Zhu, Chenguang and Zeng, Michael}, title = {Narrate Dialogues for Better Summarization}, organization = {Association for Computational Linguistics}, booktitle = {2022 Empirical Methods in Natural Language Processing}, year = {2022}, month = {December}, abstract = {Dialogue summarization models aim to generate a concise and accurate summary for multi-party dialogue. The complexity of dialogue, including coreference, dialogue acts, and inter-speaker interactions bring unique challenges to dialogue summarization. Most recent neural models achieve state-of-art performance following the pretrain-then-finetune recipe, where the large-scale language model (LLM) is pretrained on large-scale single-speaker written text, but later finetuned on multi-speaker dialogue text. To mitigate the gap between pretraining and finetuning, we propose several approaches to convert the dialogue into a third-person narrative style and show that the narration serves as a valuable annotation for LLMs. Empirical results on three benchmark datasets show our simple approach achieves higher scores on the ROUGE and a factual correctness metric.}, publisher = {Association for Computational Linguistics}, url = {http://approjects.co.za/?big=en-us/research/publication/narrate-dialogues-for-better-summarization/}, pages = {3565-3575}, }