@inproceedings{7479, abstract = {Multi-exit architectures, in which a stack of processing layers is interleaved with early output layers, allow the processing of a test example to stop early and thus save computation time and/or energy. In this work, we propose a new training procedure for multi-exit architectures based on the principle of knowledge distillation. The method encourage searly exits to mimic later, more accurate exits, by matching their output probabilities. Experiments on CIFAR100 and ImageNet show that distillation-based training significantly improves the accuracy of early exits while maintaining state-of-the-art accuracy for late ones. The method is particularly beneficial when training data is limited and it allows a straightforward extension to semi-supervised learning,i.e. making use of unlabeled data at training time. Moreover, it takes only afew lines to implement and incurs almost no computational overhead at training time, and none at all at test time.}, author = {Bui Thi Mai, Phuong and Lampert, Christoph}, booktitle = {IEEE International Conference on Computer Vision}, isbn = {9781728148038}, issn = {15505499}, location = {Seoul, Korea}, pages = {1355--1364}, publisher = {IEEE}, title = {{Distillation-based training for multi-exit architectures}}, doi = {10.1109/ICCV.2019.00144}, volume = {2019-October}, year = {2019}, }