@inproceedings{1706, abstract = {We consider a problem of learning kernels for use in SVM classification in the multi-task and lifelong scenarios and provide generalization bounds on the error of a large margin classifier. Our results show that, under mild conditions on the family of kernels used for learning, solving several related tasks simultaneously is beneficial over single task learning. In particular, as the number of observed tasks grows, assuming that in the considered family of kernels there exists one that yields low approximation error on all tasks, the overhead associated with learning such a kernel vanishes and the complexity converges to that of learning when this good kernel is given to the learner.}, author = {Pentina, Anastasia and Ben David, Shai}, location = {Banff, AB, Canada}, pages = {194 -- 208}, publisher = {Springer}, title = {{Multi-task and lifelong learning of kernels}}, doi = {10.1007/978-3-319-24486-0_13}, volume = {9355}, year = {2015}, } @inproceedings{1859, abstract = {Structural support vector machines (SSVMs) are amongst the best performing models for structured computer vision tasks, such as semantic image segmentation or human pose estimation. Training SSVMs, however, is computationally costly, because it requires repeated calls to a structured prediction subroutine (called \emph{max-oracle}), which has to solve an optimization problem itself, e.g. a graph cut. In this work, we introduce a new algorithm for SSVM training that is more efficient than earlier techniques when the max-oracle is computationally expensive, as it is frequently the case in computer vision tasks. The main idea is to (i) combine the recent stochastic Block-Coordinate Frank-Wolfe algorithm with efficient hyperplane caching, and (ii) use an automatic selection rule for deciding whether to call the exact max-oracle or to rely on an approximate one based on the cached hyperplanes. We show experimentally that this strategy leads to faster convergence to the optimum with respect to the number of requires oracle calls, and that this translates into faster convergence with respect to the total runtime when the max-oracle is slow compared to the other steps of the algorithm. }, author = {Shah, Neel and Kolmogorov, Vladimir and Lampert, Christoph}, location = {Boston, MA, USA}, pages = {2737 -- 2745}, publisher = {IEEE}, title = {{A multi-plane block-coordinate Frank-Wolfe algorithm for training structural SVMs with a costly max-oracle}}, doi = {10.1109/CVPR.2015.7298890}, year = {2015}, } @inproceedings{1860, abstract = {Classifiers for object categorization are usually evaluated by their accuracy on a set of i.i.d. test examples. This provides us with an estimate of the expected error when applying the classifiers to a single new image. In real application, however, classifiers are rarely only used for a single image and then discarded. Instead, they are applied sequentially to many images, and these are typically not i.i.d. samples from a fixed data distribution, but they carry dependencies and their class distribution varies over time. In this work, we argue that the phenomenon of correlated data at prediction time is not a nuisance, but a blessing in disguise. We describe a probabilistic method for adapting classifiers at prediction time without having to retrain them. We also introduce a framework for creating realistically distributed image sequences, which offers a way to benchmark classifier adaptation methods, such as the one we propose. Experiments on the ILSVRC2010 and ILSVRC2012 datasets show that adapting object classification systems at prediction time can significantly reduce their error rate, even with no additional human feedback.}, author = {Royer, Amélie and Lampert, Christoph}, location = {Boston, MA, United States}, pages = {1401 -- 1409}, publisher = {IEEE}, title = {{Classifier adaptation at prediction time}}, doi = {10.1109/CVPR.2015.7298746}, year = {2015}, } @inproceedings{1858, abstract = {We study the problem of predicting the future, though only in the probabilistic sense of estimating a future state of a time-varying probability distribution. This is not only an interesting academic problem, but solving this extrapolation problem also has many practical application, e.g. for training classifiers that have to operate under time-varying conditions. Our main contribution is a method for predicting the next step of the time-varying distribution from a given sequence of sample sets from earlier time steps. For this we rely on two recent machine learning techniques: embedding probability distributions into a reproducing kernel Hilbert space, and learning operators by vector-valued regression. We illustrate the working principles and the practical usefulness of our method by experiments on synthetic and real data. We also highlight an exemplary application: training a classifier in a domain adaptation setting without having access to examples from the test time distribution at training time.}, author = {Lampert, Christoph}, location = {Boston, MA, United States}, pages = {942 -- 950}, publisher = {IEEE}, title = {{Predicting the future behavior of a time-varying probability distribution}}, doi = {10.1109/CVPR.2015.7298696}, year = {2015}, } @inproceedings{1857, abstract = {Sharing information between multiple tasks enables algorithms to achieve good generalization performance even from small amounts of training data. However, in a realistic scenario of multi-task learning not all tasks are equally related to each other, hence it could be advantageous to transfer information only between the most related tasks. In this work we propose an approach that processes multiple tasks in a sequence with sharing between subsequent tasks instead of solving all tasks jointly. Subsequently, we address the question of curriculum learning of tasks, i.e. finding the best order of tasks to be learned. Our approach is based on a generalization bound criterion for choosing the task order that optimizes the average expected classification performance over all tasks. Our experimental results show that learning multiple related tasks sequentially can be more effective than learning them jointly, the order in which tasks are being solved affects the overall performance, and that our model is able to automatically discover the favourable order of tasks. }, author = {Pentina, Anastasia and Sharmanska, Viktoriia and Lampert, Christoph}, location = {Boston, MA, United States}, pages = {5492 -- 5500}, publisher = {IEEE}, title = {{Curriculum learning of multiple tasks}}, doi = {10.1109/CVPR.2015.7299188}, year = {2015}, } @inproceedings{12881, author = {Martius, Georg S and Olbrich, Eckehard}, booktitle = {Proceedings of the 13th European Conference on Artificial Life}, isbn = {9780262330275}, location = {York, United Kingdom}, pages = {78}, publisher = {MIT Press}, title = {{Quantifying self-organizing behavior of autonomous robots}}, doi = {10.7551/978-0-262-33027-5-ch018}, year = {2015}, } @phdthesis{1401, abstract = {The human ability to recognize objects in complex scenes has driven research in the computer vision field over couple of decades. This thesis focuses on the object recognition task in images. That is, given the image, we want the computer system to be able to predict the class of the object that appears in the image. A recent successful attempt to bridge semantic understanding of the image perceived by humans and by computers uses attribute-based models. Attributes are semantic properties of the objects shared across different categories, which humans and computers can decide on. To explore the attribute-based models we take a statistical machine learning approach, and address two key learning challenges in view of object recognition task: learning augmented attributes as mid-level discriminative feature representation, and learning with attributes as privileged information. Our main contributions are parametric and non-parametric models and algorithms to solve these frameworks. In the parametric approach, we explore an autoencoder model combined with the large margin nearest neighbor principle for mid-level feature learning, and linear support vector machines for learning with privileged information. In the non-parametric approach, we propose a supervised Indian Buffet Process for automatic augmentation of semantic attributes, and explore the Gaussian Processes classification framework for learning with privileged information. A thorough experimental analysis shows the effectiveness of the proposed models in both parametric and non-parametric views.}, author = {Sharmanska, Viktoriia}, issn = {2663-337X}, pages = {144}, publisher = {Institute of Science and Technology Austria}, title = {{Learning with attributes for object recognition: Parametric and non-parametrics views}}, doi = {10.15479/at:ista:1401}, year = {2015}, } @article{1655, abstract = {Quantifying behaviors of robots which were generated autonomously from task-independent objective functions is an important prerequisite for objective comparisons of algorithms and movements of animals. The temporal sequence of such a behavior can be considered as a time series and hence complexity measures developed for time series are natural candidates for its quantification. The predictive information and the excess entropy are such complexity measures. They measure the amount of information the past contains about the future and thus quantify the nonrandom structure in the temporal sequence. However, when using these measures for systems with continuous states one has to deal with the fact that their values will depend on the resolution with which the systems states are observed. For deterministic systems both measures will diverge with increasing resolution. We therefore propose a new decomposition of the excess entropy in resolution dependent and resolution independent parts and discuss how they depend on the dimensionality of the dynamics, correlations and the noise level. For the practical estimation we propose to use estimates based on the correlation integral instead of the direct estimation of the mutual information based on next neighbor statistics because the latter allows less control of the scale dependencies. Using our algorithm we are able to show how autonomous learning generates behavior of increasing complexity with increasing learning duration.}, author = {Martius, Georg S and Olbrich, Eckehard}, journal = {Entropy}, number = {10}, pages = {7266 -- 7297}, publisher = {MDPI}, title = {{Quantifying emergent behavior of autonomous robots}}, doi = {10.3390/e17107266}, volume = {17}, year = {2015}, } @inbook{1829, abstract = {Hitting and batting tasks, such as tennis forehands, ping-pong strokes, or baseball batting, depend on predictions where the ball can be intercepted and how it can properly be returned to the opponent. These predictions get more accurate over time, hence the behaviors need to be continuously modified. As a result, movement templates with a learned global shape need to be adapted during the execution so that the racket reaches a target position and velocity that will return the ball over to the other side of the net or court. It requires altering learned movements to hit a varying target with the necessary velocity at a specific instant in time. Such a task cannot be incorporated straightforwardly in most movement representations suitable for learning. For example, the standard formulation of the dynamical system based motor primitives (introduced by Ijspeert et al (2002b)) does not satisfy this property despite their flexibility which has allowed learning tasks ranging from locomotion to kendama. In order to fulfill this requirement, we reformulate the Ijspeert framework to incorporate the possibility of specifying a desired hitting point and a desired hitting velocity while maintaining all advantages of the original formulation.We show that the proposed movement template formulation works well in two scenarios, i.e., for hitting a ball on a string with a table tennis racket at a specified velocity and for returning balls launched by a ball gun successfully over the net using forehand movements.}, author = {Muelling, Katharina and Kroemer, Oliver and Lampert, Christoph and Schölkopf, Bernhard}, booktitle = {Learning Motor Skills}, editor = {Kober, Jens and Peters, Jan}, pages = {69 -- 82}, publisher = {Springer}, title = {{Movement templates for learning of hitting and batting}}, doi = {10.1007/978-3-319-03194-1_3}, volume = {97}, year = {2014}, } @inproceedings{2033, abstract = {The learning with privileged information setting has recently attracted a lot of attention within the machine learning community, as it allows the integration of additional knowledge into the training process of a classifier, even when this comes in the form of a data modality that is not available at test time. Here, we show that privileged information can naturally be treated as noise in the latent function of a Gaussian process classifier (GPC). That is, in contrast to the standard GPC setting, the latent function is not just a nuisance but a feature: it becomes a natural measure of confidence about the training data by modulating the slope of the GPC probit likelihood function. Extensive experiments on public datasets show that the proposed GPC method using privileged noise, called GPC+, improves over a standard GPC without privileged knowledge, and also over the current state-of-the-art SVM-based method, SVM+. Moreover, we show that advanced neural networks and deep learning methods can be compressed as privileged information.}, author = {Hernandez Lobato, Daniel and Sharmanska, Viktoriia and Kersting, Kristian and Lampert, Christoph and Quadrianto, Novi}, booktitle = {Advances in Neural Information Processing Systems}, location = {Montreal, Canada}, number = {January}, pages = {837--845}, publisher = {Neural Information Processing Systems}, title = {{Mind the nuisance: Gaussian process classification using privileged noise}}, volume = {1}, year = {2014}, }