@article{laroche2017safe, author = {Laroche, Romain and Trichelair, Paul}, title = {Safe Policy Improvement with Baseline Bootstrapping}, year = {2017}, month = {December}, abstract = {A common goal in Reinforcement Learning is to derive a good strategy given a limited batch of data. In this paper, we adopt the safe policy improvement (SPI) approach: we compute a target policy guaranteed to perform at least as well as a given baseline policy. Our SPI strategy, inspired by the knows-what-it-knows paradigms, consists in bootstrapping the target policy with the baseline policy when it does not know. We develop two computationally efficient bootstrapping algorithms, a value-based and a policy-based, both accompanied with theoretical SPI bounds. Three algorithm variants are proposed. We empirically show the literature algorithms limits on a small stochastic gridworld problem, and then demonstrate that our five algorithms not only improve the worst case scenarios, but also the mean performance.}, url = {http://approjects.co.za/?big=en-us/research/publication/safe-policy-improvement-baseline-bootstrapping/}, pages = {8+1+8}, journal = {arXiv preprint arXiv:1712.06924}, }