@inproceedings{geng2021bottom-up, author = {Geng, Zigang and Sun, Ke and Xiao, Bin and Zhang, Zhaoxiang and Wang, Jingdong}, title = {Bottom-Up Human Pose Estimation Via Disentangled Keypoint Regression}, booktitle = {CVPR 2021}, year = {2021}, month = {June}, abstract = {We present a simple yet effective approach, named disentangled keypoint regression (DEKR). We adopt adaptive convolutions through pixel-wise spatial transformer to activate the pixels in the keypoint regions and accordingly learn representations from them. We use a multi-branch structure for separate regression: each branch learns a representation with dedicated adaptive convolutions and regresses one keypoint. The resulting disentangled representations are able to attend to the keypoint regions, respectively, and thus the keypoint regression is spatially more accurate. We empirically show that the proposed direct regression method outperforms keypoint detection and grouping methods and achieves superior bottom-up pose estimation results on two benchmark datasets, COCO and CrowdPose. The code and models are available on GitHub.}, url = {http://approjects.co.za/?big=en-us/research/publication/bottom-up-human-pose-estimation-via-disentangled-keypoint-regression/}, }