@inproceedings{liu2019trust, author = {Liu, Guoqing and Zhao, Li and Yang, Feidiao and Bian, Jiang and Qin, Tao and Yu, Nenghai and Liu, Tie-Yan}, title = {Trust Region Evolution Strategies}, booktitle = {Proceedings of the Thirty-Third AAAI Conference on Artificial Intelligence}, year = {2019}, month = {February}, abstract = {Evolution Strategies (ES), a class of black-box optimization algorithms, has recently been demonstrated to be a viable alternative to popular MDP-based RL techniques such as Q-learning and Policy Gradients. ES achieves fairly good performance on challenging reinforcement learning problems and is easier to scale in a distributed setting. However, standard ES algorithms perform one gradient update per data sample, which is not very efficient. In this paper, with the purpose of more efficient using of sampled data, we propose a novel iterative procedure that optimizes a surrogate objective function, enabling to reuse data sample for multiple epochs of updates. We prove monotonic improvement guarantee for such procedure. By making several approximations to the theoretically-justified procedure, we further develop a practical algorithm called Trust Region Evolution Strategies (TRES). Our experiments demonstrate the effectiveness of TRES on a range of popular MuJoCo locomotion tasks in the OpenAI Gym, achieving better performance than ES algorithm.}, url = {http://approjects.co.za/?big=en-us/research/publication/trust-region-evolution-strategies/}, }