@inproceedings{wang2022debiased, author = {Wang, Xudong and Wu, Zhirong and Lian, Long and Yu, Stella X.}, title = {Debiased Learning from Naturally Imbalanced Pseudo-Labels}, booktitle = {2022 Computer Vision and Pattern Recognition}, year = {2022}, month = {April}, abstract = {Pseudo-labels are confident predictions made on unlabeled target data by a classifier trained on labeled source data. They are widely used for adapting a model to unlabeled data, e.g., in a semi-supervised learning setting. Our key insight is that pseudo-labels are naturally imbalanced due to intrinsic data similarity, even when a model is trained on balanced source data and evaluated on balanced target data. If we address this previously unknown imbalanced classification problem arising from pseudo-labels instead of ground-truth training labels, we could remove model biases towards false majorities created by pseudo-labels. We propose a novel and effective debiased learning method with pseudo-labels, based on counterfactual reasoning and adaptive margins: The former removes the classifier response bias, whereas the latter adjusts the margin of each class according to the imbalance of pseudo-labels. Validated by extensive experimentation, our simple debiased learning delivers significant accuracy gains over the state-of-the-art on ImageNet-1K: 26% for semi-supervised learning with 0.2% annotations and 9% for zero-shot learning. Our code is available at: https://github.com/frank-xwang/debiased-pseudo-labeling.}, url = {http://approjects.co.za/?big=en-us/research/publication/debiased-learning-from-naturally-imbalanced-pseudo-labels/}, }