@inproceedings{vanseijen2017hybrid, author = {van Seijen, Harm and Fatemi, Mehdi and Romoff , Joshua and Laroche, Romain and Barnes, Tavian and Tsang, Jeffrey}, title = {Hybrid Reward Architecture for Reinforcement Learning}, booktitle = {Neural Information Processing Systems (NIPS)}, year = {2017}, month = {June}, abstract = {One of the main challenges in reinforcement learning (RL) is generalisation. In typical deep RL methods this is achieved by approximating the optimal value function with a low-dimensional representation using a deep network. While this approach works well in many domains, in domains where the optimal value function cannot easily be reduced to a low-dimensional representation, learning can be very slow and unstable. This paper contributes towards tackling such challenging domains, by proposing a new method, called Hybrid Reward Architecture (HRA). HRA takes as input a decomposed reward function and learns a separate value function for each component reward function. Because each component typically only depends on a subset of all features, the overall value function is much smoother and can be easier approximated by a low-dimensional representation, enabling more effective learning. We demonstrate HRA on a toy-problem and the Atari game Ms. Pac-Man, where HRA achieves above-human performance.}, url = {http://approjects.co.za/?big=en-us/research/publication/hybrid-reward-architecture-reinforcement-learning/}, edition = {Neural Information Processing Systems (NIPS)}, }