@inproceedings{otani2022lite, author = {Otani, Naoki and Gamon, Michael and Jauhar, Sujay Kumar and Yang, Mei and Malireddi, Sri and Riva, Oriana}, title = {LITE: Intent-based Task Representation Learning Using Weak Supervision}, booktitle = {2022 Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL)}, year = {2022}, month = {July}, abstract = {Users write to-dos as personal notes to themselves, about things they need to complete, remember or organize. To-do texts are usually short and under-specified, which poses a challenge for current text representation models. Yet, understanding and representing their meaning is the first step towards providing intelligent assistance for to-do management. We address this problem by proposing a neural multi-task learning framework, LITE, which extracts representations of English to-do tasks with a multi-head attention mechanism on top of a pre-trained text encoder. To adapt representation models to to-do texts, we collect weak-supervision labels from semantically rich external resources (e.g., dynamic common-sense knowledge base), following the principle that to-do tasks with similar intents have similar labels. We then train the model on multiple generative/predictive training objectives jointly. We evaluate our representation model on five downstream tasks and show that our approach consistently improves performance over baseline models, achieving an error reduction of up to 38.7%.}, url = {http://approjects.co.za/?big=en-us/research/publication/lite-intent-based-task-representation-learning-using-weak-supervision/}, }