@inproceedings{dereziski2018batch-expansion, author = {Dereziński, Michał and Mahajan, Dhruv and Keerthi, S. Sathiya and Vishwanathan, S. V. N. and Weimer, Markus}, title = {Batch-Expansion Training: An Efficient Optimization Framework}, booktitle = {International Conference on Artificial Intelligence and Statistics}, year = {2018}, month = {February}, abstract = {We propose Batch-Expansion Training (BET), a framework for running a batch optimizer on a gradually expanding dataset. As opposed to stochastic approaches, batches do not need to be resampled i.i.d. at every iteration, thus making BET more resource efficient in a distributed setting, and when disk-access is constrained. Moreover, BET can be easily paired with most batch optimizers, does not require any parameter-tuning, and compares favorably to existing stochastic and batch methods. We show that when the batch size grows exponentially with the number of outer iterations, BET achieves optimal O˜(1/ǫ) data-access convergence rate for strongly convex objectives. Experiments in parallel and distributed settings show that BET performs better than standard batch and stochastic approaches.}, publisher = {arXiv}, url = {http://approjects.co.za/?big=en-us/research/publication/batch-expansion-training-an-efficient-optimization-framework/}, pages = {736-744}, }