@inproceedings{cheng2020a, author = {Cheng, Ching-An and Tachet des Combes, Remi and Boots, Byron and Gordon, Geoff}, title = {A Reduction from Reinforcement Learning to No-Regret Online Learning}, booktitle = {International Conference on Artificial Intelligence and Statistics}, year = {2020}, month = {June}, abstract = {We present a reduction from reinforcement learning (RL) to no-regret online learning based on the saddle-point formulation of RL, by which "any" online algorithm with sublinear regret can generate policies with provable performance guarantees. This new perspective decouples the RL problem into two parts: regret minimization and function approximation. The first part admits a standard online-learning analysis, and the second part can be quantified independently of the learning algorithm. Therefore, the proposed reduction can be used as a tool to systematically design new RL algorithms. We demonstrate this idea by devising a simple RL algorithm based on mirror descent and the generative-model oracle. For any γ-discounted tabular RL problem, with probability at least 1−δ, it learns an ϵ-optimal policy using at most O(|S||A|log(1/δ) / (1−γ)^4ϵ^2) samples. Furthermore, this algorithm admits a direct extension to linearly parameterized function approximators for large-scale applications, with computation and sample complexities independent of |S|,|A|, though at the cost of potential approximation bias.}, url = {http://approjects.co.za/?big=en-us/research/publication/a-reduction-from-reinforcement-learning-to-no-regret-online-learning/}, }