@inproceedings{zhang2021distributional, author = {Zhang, Pushi and Chen, Xiaoyu and Zhao, Li and Xiong, Wei and Qin, Tao and Liu, Tie-Yan}, title = {Distributional Reinforcement Learning for Multi-Dimensional Reward Functions}, booktitle = {Proceedings of the 35th Conference on Neural Information Processing Systems (NeurIPS)}, year = {2021}, month = {December}, abstract = {A growing trend for value-based reinforcement learning (RL) algorithms is to capture more information than scalar value functions in the value network. One of the most well-known methods in this branch is distributional RL, which models return distribution instead of scalar value. In another line of work, hybrid reward architectures (HRA) in RL have studied to model source-specific value functions for each source of reward, which is also shown to be beneficial in performance. To fully inherit the benefits of distributional RL and hybrid reward architectures, we introduce Multi-Dimensional Distributional DQN (MD3QN), which extends distributional RL to model the joint return distribution from multiple reward sources. As a by-product of joint distribution modeling, MD3QN can capture not only the randomness in returns for each source of reward, but also the rich reward correlation between the randomness of different sources. We prove the convergence for the joint distributional Bellman operator and build our empirical algorithm by minimizing the Maximum Mean Discrepancy between joint return distribution and its Bellman target. In experiments, our method accurately models the joint return distribution in environments with richly correlated reward functions, and outperforms previous RL methods utilizing multi-dimensional reward functions in the control setting.}, url = {http://approjects.co.za/?big=en-us/research/publication/distributional-reinforcement-learning-for-multi-dimensional-reward-functions/}, }