@article{macua2013distributed, author = {Macua, Sergio Valcarcel and Chen, Jianshu and Zazo, S. and Sayed, A. H. and Valcarcel Macua, Sergio}, title = {Distributed Policy Evaluation Under Multiple Behavior Strategies}, year = {2013}, month = {December}, abstract = {We apply diffusion strategies to develop a fully-distributed cooperative reinforcement learning algorithm in which agents in a network communicate only with their immediate neighbors to improve predictions about their environment. The algorithm can also be applied to off-policy learning, meaning that the agents can predict the response to a behavior different from the actual policies they are following. The proposed distributed strategy is efficient, with linear complexity in both computation time and memory footprint. We provide a mean-square-error performance analysis and establish convergence under constant step-size updates, which endow the network with continuous learning capabilities. The results show a clear gain from cooperation: when the individual agents can estimate the solution, cooperation increases stability and reduces bias and variance of the prediction error; but, more importantly, the network is able to approach the optimal solution even when none of the individual agents can (e.g., when the individual behavior policies restrict each agent to sample a small portion of the state space).}, url = {http://approjects.co.za/?big=en-us/research/publication/distributed-policy-evaluation-under-multiple-behavior-strategies/}, pages = {1260-1274}, journal = {IEEE Transactions on Automatic Control}, volume = {60}, }