@inproceedings{zeng2021learning, author = {Zeng, Ailing and Sun, Xiao and Yang, Lei and Zhao, Nanxuan and Liu, Minhao and Xu, Qiang}, title = {Learning Skeletal Graph Neural Networks for Hard 3D Pose Estimation}, booktitle = {ICCV 2021}, year = {2021}, month = {October}, abstract = {Various deep learning techniques have been proposed to solve the single-view 2D-to-3D pose estimation problem. While the average prediction accuracy has been improved significantly over the years, the performance on hard poses with depth ambiguity, self-occlusion, and complex or rare poses is still far from satisfactory. In this work, we target these hard poses and present a novel skeletal GNN learning solution. To be specific, we propose a hop-aware hierarchical channel-squeezing fusion layer to effectively extract relevant information from neighboring nodes while suppressing undesired noises in GNN learning. In addition, we propose a temporal-aware dynamic graph construction procedure that is robust and effective for 3D pose estimation. Experimental results on the Human3.6M dataset show that our solution achieves 10.3\% average prediction accuracy improvement and greatly improves on hard poses over state-of-the-art techniques. We further apply the proposed technique on the skeleton-based action recognition task and also achieve state-of-the-art performance. Our code is available on GitHub.}, url = {http://approjects.co.za/?big=en-us/research/publication/learning-skeletal-graph-neural-networks-for-hard-3d-pose-estimation/}, }