@inproceedings{dong2020self-robust, author = {Dong, Xiaoyi and Zhou, Hang and Hua, Gang and Zhang, Weiming and Yu, Nenghai and Chen, Dongdong}, title = {Self-Robust 3D Point Recognition via Gather-Vector Guidance}, booktitle = {2020 Computer Vision and Pattern Recognition}, year = {2020}, month = {June}, abstract = {In this paper, we look into the problem of 3D adversary attack, and propose to leverage the internal properties of the point clouds and the adversarial examples to design a new self-robust deep neural network (DNN) based 3D recognition systems. As a matter of fact, on one hand, point clouds are highly structured. Hence for each local part of clean point clouds, it is possible to learn what is it (``part of a bottle") and its relative position (``upper part of a bottle") to the global object center. On the other hand, with the visual quality constraint, 3D adversarial samples often only produce small local perturbations, thus they will roughly keep the original global center but may cause incorrect local relative position estimation. Motivated by these two properties, we use relative position (dubbed as ``gather-vector") as the adversarial indicator and propose a new robust gather module. Equipped with this module, we further propose a new self-robust 3D point recognition network. Through extensive experiments, we demonstrate that the proposed method can improve the robustness of the target attack under the white-box setting significantly. For I-FGSM based attack, our method reduces the attack success rate from 94.37 \% to 75.69 \%. For C\&W based attack, our method reduces the attack success rate more than 40.00 \%. Moreover, our method is complementary to other types of defense methods to achieve better defense results.}, publisher = {IEEE}, url = {http://approjects.co.za/?big=en-us/research/publication/self-robust-3d-point-recognition-via-gather-vector-guidance/}, pages = {11513-11521}, }