@unpublished{laroche2017multi-advisor, author = {Laroche, Romain and Fatemi, Mehdi and van Seijen, Harm and Romoff, Joshua}, title = {Multi-Advisor Reinforcement Learning}, year = {2017}, month = {April}, abstract = {This article deals with a novel branch of Separation of Concerns, called Multi-Advisor Reinforcement Learning (MAd-RL), where a single-agent RL problem is distributed to n learners, called advisors. Each advisor tries to solve the problem with a different focus. Their advice is then communicated to an aggregator, which is in control of the system. For the local training, three off-policy bootstrapping methods are proposed and analysed: local-max bootstraps with the local greedy action, rand-policy bootstraps with respect to the random policy, and agg-policy bootstraps with respect to the aggregator's greedy policy. MAd-RL is positioned as a generalisation of Reinforcement Learning with Ensemble methods. An experiment is held on a simplified version of the Ms. Pac-Man Atari game. The results confirm the theoretical relative strengths and weaknesses of each method.}, url = {http://approjects.co.za/?big=en-us/research/publication/multi-advisor-reinforcement-learning/}, note = {arXiv:1704.00756}, }