@inproceedings{zhang2022boosting, author = {Zhang, Bohang and Jiang, Du and He, Di and Wang, Liwei}, title = {Boosting the Certified Robustness of L-infinity Distance Nets}, booktitle = {ICLR 2022}, year = {2022}, month = {March}, abstract = {Recently, Zhang et al. (2021) developed a new neural network architecture based onĀ -distance functions, which naturally possesses certified robustness by its construction. Despite the novel design and theoretical foundation, so far the model only achieved comparable performance to conventional networks. In this paper, we make the following two contributions: (i) We demonstrate that -distance nets enjoy a fundamental advantage in certified robustness over conventional networks (under typical certification approaches); (ii) With an improved training process we are able to significantly boost the certified accuracy of -distance nets. Our training approach largely alleviates the optimization problem that arose in the previous training scheme, in particular, the unexpected large Lipschitz constant due to the use of a crucial trick called -relaxation. The core of our training approach is a novel objective function that combines scaled cross-entropy loss and clipped hinge loss with a decaying mixing coefficient. Experiments show that using the proposed training strategy, the certified accuracy of -distance net can be dramatically improved from 33.30% to 40.06% on CIFAR-10 (), meanwhile outperforming other approaches in this area by a large margin. Our results clearly demonstrate the effectiveness and potential of -distance net for certified robustness. Codes are available on GitHub.}, url = {http://approjects.co.za/?big=en-us/research/publication/boosting-the-certified-robustness-of-l-infinity-distance-nets/}, }