@inproceedings{koul2023pclast, author = {Koul, Anurag and Sujit, Shivakanth and Chen, Shaoru and Evans, Ben and Wu, Lili and Xu, Byron and Chari, Rajan and Islam, Riashat and Seraj, Raihan and Efroni, Yonathan and Molu, Lekan and Dudík, Miro and Langford, John and Lamb, Alex}, title = {PcLast: Discovering Plannable Continuous Latent States}, booktitle = {ICML 2024}, year = {2023}, month = {November}, abstract = {Goal-conditioned planning benefits from learned low-dimensional representations of rich, high-dimensional observations. While compact latent representations, typically learned from variational autoencoders or inverse dynamics, enable goal-conditioned planning they ignore state affordances, thus hampering their sample-efficient planning capabilities. In this paper, we learn a representation that associates reachable states together for effective onward planning. We first learn a latent representation with multi-step inverse dynamics (to remove distracting information); and then transform this representation to associate reachable states together in $\ell_2$ space. Our proposals are rigorously tested in various simulation testbeds. Numerical results in reward-based and reward-free settings show significant improvements in sampling efficiency, and yields layered state abstractions that enable computationally efficient hierarchical planning.}, url = {http://approjects.co.za/?big=en-us/research/publication/pclast-discovering-plannable-continuous-latent-states/}, }