@inproceedings{mazoure2020deep, author = {Mazoure, Bogdan and Tachet des Combes, Remi and Doan, Thang Long and Bachman, Philip and Hjelm, Devon}, title = {Deep Reinforcement and InfoMax Learning}, organization = {ACM}, booktitle = {NeurIPS 2020}, year = {2020}, month = {December}, abstract = {We begin with the hypothesis that a model-free agent whose representations are predictive of properties of future states (beyond expected rewards) will be more capable of solving and adapting to new RL problems. To test that hypothesis, we introduce an objective based on Deep InfoMax (DIM) which trains the agent to predict the future by maximizing the mutual information between its internal representation of successive timesteps. We test our approach in several synthetic settings, where it successfully learns representations that are predictive of the future. Finally, we augment C51, a strong RL baseline, with our temporal DIM objective and demonstrate improved performance on a continual learning task and on the recently introduced Procgen environment.}, url = {http://approjects.co.za/?big=en-us/research/publication/deep-reinforcement-and-infomax-learning/}, }