@techreport{yoshioka2019meeting, author = {Yoshioka, Takuya and Chen, Zhuo and Dimitriadis, Dimitrios and Hinthorn, William and Huang, Xuedong and Stolcke, Andreas and Zeng, Michael}, title = {Meeting Transcription Using Virtual Microphone Arrays}, institution = {Microsoft}, year = {2019}, month = {July}, abstract = {We describe a system that generates speaker-annotated transcripts of meetings by using a virtual microphone array, a set of spatially distributed asynchronous recording devices such as laptops and mobile phones. The system is composed of continuous audio stream alignment, blind beamforming, speech recognition, speaker diarization using prior speaker information, and system combination. With seven input audio streams, our system achieves a word error rate (WER) of 22.3% and comes within 3% of the close-talking microphone WER on the non-overlapping speech segments. The speaker-attributed WER (SAWER) is 26.7%. The relative gains in SAWER over a single-device system are 14.8%, 20.3%, and 22.4% for three, five, and seven microphones, respectively. The presented system achieves a 13.6% diarization error rate when 10% of the speech duration contains more than one speaker. The contribution of each component to the overall performance is also investigated, and we validate the system with experiments on the NIST RT-07 conference meeting test set.  }, url = {http://approjects.co.za/?big=en-us/research/publication/meeting-transcriptions-using-virtual-microphone-arrays/}, number = {MSR-TR-2019-11}, note = {Revised version}, }