@inproceedings{kumar2022how, author = {Kumar, Ananya and Shen, Ruoqi and Bubeck, Sébastien and Gunasekar, Suriya}, title = {How to Fine-Tune Vision Models with SGD}, booktitle = {ICLR 2024}, year = {2022}, month = {November}, abstract = {SGD and AdamW are the two most used optimizers for fine-tuning large neural networks in computer vision. When the two methods perform the same, SGD is preferable because it uses less memory (12 bytes/parameter with momentum and 8 bytes/parameter without) than AdamW (16 bytes/parameter). However, on a suite of downstream tasks, especially those with distribution shifts, we find that fine-tuning with AdamW performs substantially better than SGD on modern Vision Transformer and ConvNeXt models. We find that large gaps in performance between SGD and AdamW occur when the fine-tuning gradients in the first"embedding"layer are much larger than in the rest of the model. Our analysis suggests an easy fix that works consistently across datasets and models: freezing the embedding layer (less than 1% of the parameters) leads to SGD with or without momentum performing slightly better than AdamW while using less memory (e.g., on ViT-L, SGD uses 33% less GPU memory). Our insights result in state-of-the-art accuracies on five popular distribution shift benchmarks: WILDS-FMoW, WILDS-Camelyon, BREEDS-Living-17, Waterbirds, and DomainNet.}, url = {http://approjects.co.za/?big=en-us/research/publication/how-to-fine-tune-vision-models-with-sgd/}, }