@inproceedings{gupta2025refa, author = {Gupta, Taneesh and Madhavan, Rahul and Zhang, Xuchao and Bansal, Chetan and Rajmohan, Saravan}, title = {REFA: Reference Free Alignment for multi-preference optimization}, booktitle = {COLM 2025}, year = {2025}, month = {July}, abstract = {To mitigate reward hacking from response verbosity, modern preference optimization methods are increasingly adopting length normalization (e.g., SimPO, ORPO, LN-DPO). While effective against this bias, we demonstrate that length normalization itself introduces a failure mode: the URSLA shortcut. Here models learn to satisfy the alignment objective by prematurely truncating low-quality responses rather than learning from their semantic content. To address this, we introduce REFA, a new alignment framework that proposes probabilistic control on a structural token that controls termination. Our core innovation is a new class of regularizers that operate directly on the probability of the End-of-Sequence (EOS) token, a previously unexploited control lever. This token-level intervention provides a principled solution to the URSLA shortcut, ensuring genuine quality improvements. Furthermore, it unlocks a versatile mechanism for managing the alignment-efficiency tradeoff, enabling practitioners to fine-tune models that adhere to specific token budgets. Empirically, REFA achieves a 60.29% win rate and a 52.17% length-controlled win rate on AlpacaEval2 with Llama-3-8B-Instruct, demonstrating the power of our token-level control paradigm.}, url = {http://approjects.co.za/?big=en-us/research/publication/refa-reference-free-alignment-for-multi-preference-optimization/}, }