@inproceedings{wang2023geometric, author = {Wang, Yusong and Li, Shaoning and Wang, Tong and Shao, Bin and Zheng, Nanning and Liu, Tie-Yan}, title = {Geometric Transformer with Interatomic Positional Encoding}, booktitle = {NeurIPS 2023}, year = {2023}, month = {October}, abstract = {The widespread adoption of Transformer architectures in various data modalities has opened new avenues for the applications in molecular modeling. Nevertheless, it remains elusive that whether the Transformer-based architecture can do molecular modeling as good as equivariant GNNs. In this paper, by designing Interatomic Positional Encoding (IPE) thatparameterizes atomic environments as Transformer's positional encodings,we propose Geoformer, a novel geometric Transformer to effectively model molecular structures for various molecular property prediction. We evaluate Geoformer on several benchmarks, including the QM9 dataset and the recently proposed Molecule3D dataset. Compared with both Transformers and equivariant GNN models, Geoformer outperforms the state-of-the-art (SoTA) algorithms on QM9, and achieves the best performance on Molecule3D for both random and scaffold splits.By introducing IPE, Geoformer paves the way for molecular geometric modeling based on Transformer architecture. The source code of Geoformer is available at https://github.com/microsoft/AI2BMD/tree/Geoformer .}, url = {http://approjects.co.za/?big=en-us/research/publication/geometric-transformer-with-interatomic-positional-encoding/}, }