@unpublished{vogelstein2020a, author = {Vogelstein, Joshua T. and Helm, Hayden S. and Mehta, Ronak D. and Dey, Jayanta and Yang, Weiwei and Tower, Bryan and LeVine, Will and Larson, Jonathan and White, Chris and Priebe, Carey}, title = {A general approach to progressive learning}, year = {2020}, month = {April}, abstract = {In biological learning, data is used to improve performance on the task at hand, while simultaneously improving performance on both previously encountered tasks and as yet unconsidered future tasks. In contrast, classical machine learning starts from a blank slate, or tabula rasa, using data only for the single task at hand. While typical transfer learning algorithms can improve performance on future tasks, their performance degrades upon learning new tasks. Many recent approaches have attempted to mitigate this issue, called catastrophic forgetting, to maintain performance given new tasks. But striving to avoid forgetting sets the goal unnecessarily low: the goal of progressive learning, whether biological or artificial, is to improve performance on all tasks (including past and future) with any new data. We propose a general approach to progressive learning that ensembles representations, rather than learners. We show that ensembling representations---including representations learned by decision forests or neural networks---enables both forward and backward transfer on a variety of simulated and real data tasks, including vision, language, and adversarial tasks. This work suggests that further improvements in progressive learning may follow from a deeper understanding of how biological learning achieves such high degrees of efficiency.}, url = {http://approjects.co.za/?big=en-us/research/publication/a-general-approach-to-progressive-learning/}, }