@inproceedings{zhong2022unsupervised, author = {Zhong, Ming and Liu, Yang and Ge, Suyu and Mao, Yuning and Jiao, Yizhu and Zhang, Xingxing and Xu, Yichong and Zhu, Chenguang and Zeng, Michael and Han, Jiawei}, title = {Unsupervised Multi-Granularity Summarization}, booktitle = {EMNLP 2022}, year = {2022}, month = {October}, abstract = {Text summarization is a user-preference based task, i.e., for one document, users often have different priorities for summary. As a key aspect of customization in summarization, granularity is used to measure the semantic coverage between the summary and source document. However, developing systems that can generate summaries with customizable semantic coverage is still an under-explored topic. In this paper, we propose the first unsupervised multi-granularity summarization framework, GranuSum. We take events as the basic semantic units of the source documents and propose to rank these events by their salience. We also develop a model to summarize input documents with given events as anchors and hints. By inputting different numbers of events, GranuSum is capable of producing multi-granular summaries in an unsupervised manner. Meanwhile, we annotate a new benchmark GranuDUC that contains multiple summaries at different granularities for each document cluster. Experimental results confirm the substantial superiority of GranuSum on multi-granularity summarization over strong baselines. Further, by exploiting the event information, GranuSum also exhibits state-of-the-art performance under the conventional unsupervised abstractive setting. Dataset for this paper can be found at: this https URL.}, url = {http://approjects.co.za/?big=en-us/research/publication/unsupervised-multi-granularity-summarization/}, }