@unpublished{10803, abstract = {Given the abundance of applications of ranking in recent years, addressing fairness concerns around automated ranking systems becomes necessary for increasing the trust among end-users. Previous work on fair ranking has mostly focused on application-specific fairness notions, often tailored to online advertising, and it rarely considers learning as part of the process. In this work, we show how to transfer numerous fairness notions from binary classification to a learning to rank setting. Our formalism allows us to design methods for incorporating fairness objectives with provable generalization guarantees. An extensive experimental evaluation shows that our method can improve ranking fairness substantially with no or only little loss of model quality.}, author = {Konstantinov, Nikola H and Lampert, Christoph}, booktitle = {arXiv}, title = {{Fairness through regularization for learning to rank}}, doi = {10.48550/arXiv.2102.05996}, year = {2021}, } @phdthesis{9418, abstract = {Deep learning is best known for its empirical success across a wide range of applications spanning computer vision, natural language processing and speech. Of equal significance, though perhaps less known, are its ramifications for learning theory: deep networks have been observed to perform surprisingly well in the high-capacity regime, aka the overfitting or underspecified regime. Classically, this regime on the far right of the bias-variance curve is associated with poor generalisation; however, recent experiments with deep networks challenge this view. This thesis is devoted to investigating various aspects of underspecification in deep learning. First, we argue that deep learning models are underspecified on two levels: a) any given training dataset can be fit by many different functions, and b) any given function can be expressed by many different parameter configurations. We refer to the second kind of underspecification as parameterisation redundancy and we precisely characterise its extent. Second, we characterise the implicit criteria (the inductive bias) that guide learning in the underspecified regime. Specifically, we consider a nonlinear but tractable classification setting, and show that given the choice, neural networks learn classifiers with a large margin. Third, we consider learning scenarios where the inductive bias is not by itself sufficient to deal with underspecification. We then study different ways of ‘tightening the specification’: i) In the setting of representation learning with variational autoencoders, we propose a hand- crafted regulariser based on mutual information. ii) In the setting of binary classification, we consider soft-label (real-valued) supervision. We derive a generalisation bound for linear networks supervised in this way and verify that soft labels facilitate fast learning. Finally, we explore an application of soft-label supervision to the training of multi-exit models.}, author = {Bui Thi Mai, Phuong}, issn = {2663-337X}, pages = {125}, publisher = {Institute of Science and Technology Austria}, title = {{Underspecification in deep learning}}, doi = {10.15479/AT:ISTA:9418}, year = {2021}, } @inbook{14987, abstract = {The goal of zero-shot learning is to construct a classifier that can identify object classes for which no training examples are available. When training data for some of the object classes is available but not for others, the name generalized zero-shot learning is commonly used. In a wider sense, the phrase zero-shot is also used to describe other machine learning-based approaches that require no training data from the problem of interest, such as zero-shot action recognition or zero-shot machine translation.}, author = {Lampert, Christoph}, booktitle = {Computer Vision}, editor = {Ikeuchi, Katsushi}, isbn = {9783030634155}, pages = {1395--1397}, publisher = {Springer}, title = {{Zero-Shot Learning}}, doi = {10.1007/978-3-030-63416-2_874}, year = {2021}, } @unpublished{8063, abstract = {We present a generative model of images that explicitly reasons over the set of objects they show. Our model learns a structured latent representation that separates objects from each other and from the background; unlike prior works, it explicitly represents the 2D position and depth of each object, as well as an embedding of its segmentation mask and appearance. The model can be trained from images alone in a purely unsupervised fashion without the need for object masks or depth information. Moreover, it always generates complete objects, even though a significant fraction of training images contain occlusions. Finally, we show that our model can infer decompositions of novel images into their constituent objects, including accurate prediction of depth ordering and segmentation of occluded parts.}, author = {Anciukevicius, Titas and Lampert, Christoph and Henderson, Paul M}, booktitle = {arXiv}, title = {{Object-centric image generation with factored depths, locations, and appearances}}, year = {2020}, } @inproceedings{8188, abstract = {A natural approach to generative modeling of videos is to represent them as a composition of moving objects. Recent works model a set of 2D sprites over a slowly-varying background, but without considering the underlying 3D scene that gives rise to them. We instead propose to model a video as the view seen while moving through a scene with multiple 3D objects and a 3D background. Our model is trained from monocular videos without any supervision, yet learns to generate coherent 3D scenes containing several moving objects. We conduct detailed experiments on two datasets, going beyond the visual complexity supported by state-of-the-art generative approaches. We evaluate our method on depth-prediction and 3D object detection---tasks which cannot be addressed by those earlier works---and show it out-performs them even on 2D instance segmentation and tracking.}, author = {Henderson, Paul M and Lampert, Christoph}, booktitle = {34th Conference on Neural Information Processing Systems}, isbn = {9781713829546}, location = {Vancouver, Canada}, pages = {3106–3117}, publisher = {Curran Associates}, title = {{Unsupervised object-centric video generation and decomposition in 3D}}, volume = {33}, year = {2020}, } @article{6952, abstract = {We present a unified framework tackling two problems: class-specific 3D reconstruction from a single image, and generation of new 3D shape samples. These tasks have received considerable attention recently; however, most existing approaches rely on 3D supervision, annotation of 2D images with keypoints or poses, and/or training with multiple views of each object instance. Our framework is very general: it can be trained in similar settings to existing approaches, while also supporting weaker supervision. Importantly, it can be trained purely from 2D images, without pose annotations, and with only a single view per instance. We employ meshes as an output representation, instead of voxels used in most prior work. This allows us to reason over lighting parameters and exploit shading information during training, which previous 2D-supervised methods cannot. Thus, our method can learn to generate and reconstruct concave object classes. We evaluate our approach in various settings, showing that: (i) it learns to disentangle shape from pose and lighting; (ii) using shading in the loss improves performance compared to just silhouettes; (iii) when using a standard single white light, our model outperforms state-of-the-art 2D-supervised methods, both with and without pose supervision, thanks to exploiting shading cues; (iv) performance improves further when using multiple coloured lights, even approaching that of state-of-the-art 3D-supervised methods; (v) shapes produced by our model capture smooth surfaces and fine details better than voxel-based approaches; and (vi) our approach supports concave classes such as bathtubs and sofas, which methods based on silhouettes cannot learn.}, author = {Henderson, Paul M and Ferrari, Vittorio}, issn = {1573-1405}, journal = {International Journal of Computer Vision}, pages = {835--854}, publisher = {Springer Nature}, title = {{Learning single-image 3D reconstruction by generative modelling of shape, pose and shading}}, doi = {10.1007/s11263-019-01219-8}, volume = {128}, year = {2020}, } @inproceedings{7936, abstract = {State-of-the-art detection systems are generally evaluated on their ability to exhaustively retrieve objects densely distributed in the image, across a wide variety of appearances and semantic categories. Orthogonal to this, many real-life object detection applications, for example in remote sensing, instead require dealing with large images that contain only a few small objects of a single class, scattered heterogeneously across the space. In addition, they are often subject to strict computational constraints, such as limited battery capacity and computing power.To tackle these more practical scenarios, we propose a novel flexible detection scheme that efficiently adapts to variable object sizes and densities: We rely on a sequence of detection stages, each of which has the ability to predict groups of objects as well as individuals. Similar to a detection cascade, this multi-stage architecture spares computational effort by discarding large irrelevant regions of the image early during the detection process. The ability to group objects provides further computational and memory savings, as it allows working with lower image resolutions in early stages, where groups are more easily detected than individuals, as they are more salient. We report experimental results on two aerial image datasets, and show that the proposed method is as accurate yet computationally more efficient than standard single-shot detectors, consistently across three different backbone architectures.}, author = {Royer, Amélie and Lampert, Christoph}, booktitle = {IEEE Winter Conference on Applications of Computer Vision}, isbn = {9781728165530}, location = { Snowmass Village, CO, United States}, publisher = {IEEE}, title = {{Localizing grouped instances for efficient detection in low-resource scenarios}}, doi = {10.1109/WACV45572.2020.9093288}, year = {2020}, } @inproceedings{7937, abstract = {Fine-tuning is a popular way of exploiting knowledge contained in a pre-trained convolutional network for a new visual recognition task. However, the orthogonal setting of transferring knowledge from a pretrained network to a visually different yet semantically close source is rarely considered: This commonly happens with real-life data, which is not necessarily as clean as the training source (noise, geometric transformations, different modalities, etc.).To tackle such scenarios, we introduce a new, generalized form of fine-tuning, called flex-tuning, in which any individual unit (e.g. layer) of a network can be tuned, and the most promising one is chosen automatically. In order to make the method appealing for practical use, we propose two lightweight and faster selection procedures that prove to be good approximations in practice. We study these selection criteria empirically across a variety of domain shifts and data scarcity scenarios, and show that fine-tuning individual units, despite its simplicity, yields very good results as an adaptation technique. As it turns out, in contrast to common practice, rather than the last fully-connected unit it is best to tune an intermediate or early one in many domain- shift scenarios, which is accurately detected by flex-tuning.}, author = {Royer, Amélie and Lampert, Christoph}, booktitle = {2020 IEEE Winter Conference on Applications of Computer Vision}, isbn = {9781728165530}, location = {Snowmass Village, CO, United States}, publisher = {IEEE}, title = {{A flexible selection scheme for minimum-effort transfer learning}}, doi = {10.1109/WACV45572.2020.9093635}, year = {2020}, } @inbook{8092, abstract = {Image translation refers to the task of mapping images from a visual domain to another. Given two unpaired collections of images, we aim to learn a mapping between the corpus-level style of each collection, while preserving semantic content shared across the two domains. We introduce xgan, a dual adversarial auto-encoder, which captures a shared representation of the common domain semantic content in an unsupervised way, while jointly learning the domain-to-domain image translations in both directions. We exploit ideas from the domain adaptation literature and define a semantic consistency loss which encourages the learned embedding to preserve semantics shared across domains. We report promising qualitative results for the task of face-to-cartoon translation. The cartoon dataset we collected for this purpose, “CartoonSet”, is also publicly available as a new benchmark for semantic style transfer at https://google.github.io/cartoonset/index.html.}, author = {Royer, Amélie and Bousmalis, Konstantinos and Gouws, Stephan and Bertsch, Fred and Mosseri, Inbar and Cole, Forrester and Murphy, Kevin}, booktitle = {Domain Adaptation for Visual Understanding}, editor = {Singh, Richa and Vatsa, Mayank and Patel, Vishal M. and Ratha, Nalini}, isbn = {9783030306717}, pages = {33--49}, publisher = {Springer Nature}, title = {{XGAN: Unsupervised image-to-image translation for many-to-many mappings}}, doi = {10.1007/978-3-030-30671-7_3}, year = {2020}, } @inproceedings{7481, abstract = {We address the following question: How redundant is the parameterisation of ReLU networks? Specifically, we consider transformations of the weight space which leave the function implemented by the network intact. Two such transformations are known for feed-forward architectures: permutation of neurons within a layer, and positive scaling of all incoming weights of a neuron coupled with inverse scaling of its outgoing weights. In this work, we show for architectures with non-increasing widths that permutation and scaling are in fact the only function-preserving weight transformations. For any eligible architecture we give an explicit construction of a neural network such that any other network that implements the same function can be obtained from the original one by the application of permutations and rescaling. The proof relies on a geometric understanding of boundaries between linear regions of ReLU networks, and we hope the developed mathematical tools are of independent interest.}, author = {Bui Thi Mai, Phuong and Lampert, Christoph}, booktitle = {8th International Conference on Learning Representations}, location = {Online}, title = {{Functional vs. parametric equivalence of ReLU networks}}, year = {2020}, } @inproceedings{8724, abstract = {We study the problem of learning from multiple untrusted data sources, a scenario of increasing practical relevance given the recent emergence of crowdsourcing and collaborative learning paradigms. Specifically, we analyze the situation in which a learning system obtains datasets from multiple sources, some of which might be biased or even adversarially perturbed. It is known that in the single-source case, an adversary with the power to corrupt a fixed fraction of the training data can prevent PAC-learnability, that is, even in the limit of infinitely much training data, no learning system can approach the optimal test error. In this work we show that, surprisingly, the same is not true in the multi-source setting, where the adversary can arbitrarily corrupt a fixed fraction of the data sources. Our main results are a generalization bound that provides finite-sample guarantees for this learning setting, as well as corresponding lower bounds. Besides establishing PAC-learnability our results also show that in a cooperative learning setting sharing data with other parties has provable benefits, even if some participants are malicious. }, author = {Konstantinov, Nikola H and Frantar, Elias and Alistarh, Dan-Adrian and Lampert, Christoph}, booktitle = {Proceedings of the 37th International Conference on Machine Learning}, issn = {2640-3498}, location = {Online}, pages = {5416--5425}, publisher = {ML Research Press}, title = {{On the sample complexity of adversarial multi-source PAC learning}}, volume = {119}, year = {2020}, } @phdthesis{8390, abstract = {Deep neural networks have established a new standard for data-dependent feature extraction pipelines in the Computer Vision literature. Despite their remarkable performance in the standard supervised learning scenario, i.e. when models are trained with labeled data and tested on samples that follow a similar distribution, neural networks have been shown to struggle with more advanced generalization abilities, such as transferring knowledge across visually different domains, or generalizing to new unseen combinations of known concepts. In this thesis we argue that, in contrast to the usual black-box behavior of neural networks, leveraging more structured internal representations is a promising direction for tackling such problems. In particular, we focus on two forms of structure. First, we tackle modularity: We show that (i) compositional architectures are a natural tool for modeling reasoning tasks, in that they efficiently capture their combinatorial nature, which is key for generalizing beyond the compositions seen during training. We investigate how to to learn such models, both formally and experimentally, for the task of abstract visual reasoning. Then, we show that (ii) in some settings, modularity allows us to efficiently break down complex tasks into smaller, easier, modules, thereby improving computational efficiency; We study this behavior in the context of generative models for colorization, as well as for small objects detection. Secondly, we investigate the inherently layered structure of representations learned by neural networks, and analyze its role in the context of transfer learning and domain adaptation across visually dissimilar domains. }, author = {Royer, Amélie}, isbn = {978-3-99078-007-7}, issn = {2663-337X}, pages = {197}, publisher = {Institute of Science and Technology Austria}, title = {{Leveraging structure in Computer Vision tasks for flexible Deep Learning models}}, doi = {10.15479/AT:ISTA:8390}, year = {2020}, } @inproceedings{8186, abstract = {Numerous methods have been proposed for probabilistic generative modelling of 3D objects. However, none of these is able to produce textured objects, which renders them of limited use for practical tasks. In this work, we present the first generative model of textured 3D meshes. Training such a model would traditionally require a large dataset of textured meshes, but unfortunately, existing datasets of meshes lack detailed textures. We instead propose a new training methodology that allows learning from collections of 2D images without any 3D information. To do so, we train our model to explain a distribution of images by modelling each image as a 3D foreground object placed in front of a 2D background. Thus, it learns to generate meshes that when rendered, produce images similar to those in its training set. A well-known problem when generating meshes with deep networks is the emergence of self-intersections, which are problematic for many use-cases. As a second contribution we therefore introduce a new generation process for 3D meshes that guarantees no self-intersections arise, based on the physical intuition that faces should push one another out of the way as they move. We conduct extensive experiments on our approach, reporting quantitative and qualitative results on both synthetic data and natural images. These show our method successfully learns to generate plausible and diverse textured 3D samples for five challenging object classes.}, author = {Henderson, Paul M and Tsiminaki, Vagia and Lampert, Christoph}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, issn = {2575-7075}, location = {Virtual}, pages = {7498--7507}, publisher = {IEEE}, title = {{Leveraging 2D data to learn textured 3D mesh generation}}, doi = {10.1109/CVPR42600.2020.00752}, year = {2020}, } @article{6944, abstract = {We study the problem of automatically detecting if a given multi-class classifier operates outside of its specifications (out-of-specs), i.e. on input data from a different distribution than what it was trained for. This is an important problem to solve on the road towards creating reliable computer vision systems for real-world applications, because the quality of a classifier’s predictions cannot be guaranteed if it operates out-of-specs. Previously proposed methods for out-of-specs detection make decisions on the level of single inputs. This, however, is insufficient to achieve low false positive rate and high false negative rates at the same time. In this work, we describe a new procedure named KS(conf), based on statistical reasoning. Its main component is a classical Kolmogorov–Smirnov test that is applied to the set of predicted confidence values for batches of samples. Working with batches instead of single samples allows increasing the true positive rate without negatively affecting the false positive rate, thereby overcoming a crucial limitation of single sample tests. We show by extensive experiments using a variety of convolutional network architectures and datasets that KS(conf) reliably detects out-of-specs situations even under conditions where other tests fail. It furthermore has a number of properties that make it an excellent candidate for practical deployment: it is easy to implement, adds almost no overhead to the system, works with any classifier that outputs confidence scores, and requires no a priori knowledge about how the data distribution could change.}, author = {Sun, Rémy and Lampert, Christoph}, issn = {1573-1405}, journal = {International Journal of Computer Vision}, number = {4}, pages = {970--995}, publisher = {Springer Nature}, title = {{KS(conf): A light-weight test if a multiclass classifier operates outside of its specifications}}, doi = {10.1007/s11263-019-01232-x}, volume = {128}, year = {2020}, } @book{7171, abstract = {Wissen Sie, was sich hinter künstlicher Intelligenz und maschinellem Lernen verbirgt? Dieses Sachbuch erklärt Ihnen leicht verständlich und ohne komplizierte Formeln die grundlegenden Methoden und Vorgehensweisen des maschinellen Lernens. Mathematisches Vorwissen ist dafür nicht nötig. Kurzweilig und informativ illustriert Lisa, die Protagonistin des Buches, diese anhand von Alltagssituationen. Ein Buch für alle, die in Diskussionen über Chancen und Risiken der aktuellen Entwicklung der künstlichen Intelligenz und des maschinellen Lernens mit Faktenwissen punkten möchten. Auch für Schülerinnen und Schüler geeignet!}, editor = {Kersting, Kristian and Lampert, Christoph and Rothkopf, Constantin}, isbn = {978-3-658-26762-9}, pages = {XIV, 245}, publisher = {Springer Nature}, title = {{Wie Maschinen Lernen: Künstliche Intelligenz Verständlich Erklärt}}, doi = {10.1007/978-3-658-26763-6}, year = {2019}, } @inproceedings{6942, abstract = {Graph games and Markov decision processes (MDPs) are standard models in reactive synthesis and verification of probabilistic systems with nondeterminism. The class of 𝜔 -regular winning conditions; e.g., safety, reachability, liveness, parity conditions; provides a robust and expressive specification formalism for properties that arise in analysis of reactive systems. The resolutions of nondeterminism in games and MDPs are represented as strategies, and we consider succinct representation of such strategies. The decision-tree data structure from machine learning retains the flavor of decisions of strategies and allows entropy-based minimization to obtain succinct trees. However, in contrast to traditional machine-learning problems where small errors are allowed, for winning strategies in graph games and MDPs no error is allowed, and the decision tree must represent the entire strategy. In this work we propose decision trees with linear classifiers for representation of strategies in graph games and MDPs. We have implemented strategy representation using this data structure and we present experimental results for problems on graph games and MDPs, which show that this new data structure presents a much more efficient strategy representation as compared to standard decision trees.}, author = {Ashok, Pranav and Brázdil, Tomáš and Chatterjee, Krishnendu and Křetínský, Jan and Lampert, Christoph and Toman, Viktor}, booktitle = {16th International Conference on Quantitative Evaluation of Systems}, isbn = {9783030302801}, issn = {0302-9743}, location = {Glasgow, United Kingdom}, pages = {109--128}, publisher = {Springer Nature}, title = {{Strategy representation by decision trees with linear classifiers}}, doi = {10.1007/978-3-030-30281-8_7}, volume = {11785}, year = {2019}, } @article{6554, abstract = {Due to the importance of zero-shot learning, i.e. classifying images where there is a lack of labeled training data, the number of proposed approaches has recently increased steadily. We argue that it is time to take a step back and to analyze the status quo of the area. The purpose of this paper is three-fold. First, given the fact that there is no agreed upon zero-shot learning benchmark, we first define a new benchmark by unifying both the evaluation protocols and data splits of publicly available datasets used for this task. This is an important contribution as published results are often not comparable and sometimes even flawed due to, e.g. pre-training on zero-shot test classes. Moreover, we propose a new zero-shot learning dataset, the Animals with Attributes 2 (AWA2) dataset which we make publicly available both in terms of image features and the images themselves. Second, we compare and analyze a significant number of the state-of-the-art methods in depth, both in the classic zero-shot setting but also in the more realistic generalized zero-shot setting. Finally, we discuss in detail the limitations of the current status of the area which can be taken as a basis for advancing it.}, author = {Xian, Yongqin and Lampert, Christoph and Schiele, Bernt and Akata, Zeynep}, issn = {1939-3539}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {9}, pages = {2251 -- 2265}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, title = {{Zero-shot learning - A comprehensive evaluation of the good, the bad and the ugly}}, doi = {10.1109/tpami.2018.2857768}, volume = {41}, year = {2019}, } @inproceedings{7479, abstract = {Multi-exit architectures, in which a stack of processing layers is interleaved with early output layers, allow the processing of a test example to stop early and thus save computation time and/or energy. In this work, we propose a new training procedure for multi-exit architectures based on the principle of knowledge distillation. The method encourage searly exits to mimic later, more accurate exits, by matching their output probabilities. Experiments on CIFAR100 and ImageNet show that distillation-based training significantly improves the accuracy of early exits while maintaining state-of-the-art accuracy for late ones. The method is particularly beneficial when training data is limited and it allows a straightforward extension to semi-supervised learning,i.e. making use of unlabeled data at training time. Moreover, it takes only afew lines to implement and incurs almost no computational overhead at training time, and none at all at test time.}, author = {Bui Thi Mai, Phuong and Lampert, Christoph}, booktitle = {IEEE International Conference on Computer Vision}, isbn = {9781728148038}, issn = {15505499}, location = {Seoul, Korea}, pages = {1355--1364}, publisher = {IEEE}, title = {{Distillation-based training for multi-exit architectures}}, doi = {10.1109/ICCV.2019.00144}, volume = {2019-October}, year = {2019}, } @inproceedings{7640, abstract = {We propose a new model for detecting visual relationships, such as "person riding motorcycle" or "bottle on table". This task is an important step towards comprehensive structured mage understanding, going beyond detecting individual objects. Our main novelty is a Box Attention mechanism that allows to model pairwise interactions between objects using standard object detection pipelines. The resulting model is conceptually clean, expressive and relies on well-justified training and prediction procedures. Moreover, unlike previously proposed approaches, our model does not introduce any additional complex components or hyperparameters on top of those already required by the underlying detection model. We conduct an experimental evaluation on two datasets, V-COCO and Open Images, demonstrating strong quantitative and qualitative results.}, author = {Kolesnikov, Alexander and Kuznetsova, Alina and Lampert, Christoph and Ferrari, Vittorio}, booktitle = {Proceedings of the 2019 International Conference on Computer Vision Workshop}, isbn = {9781728150239}, location = {Seoul, South Korea}, publisher = {IEEE}, title = {{Detecting visual relationships using box attention}}, doi = {10.1109/ICCVW.2019.00217}, year = {2019}, } @inproceedings{6569, abstract = {Knowledge distillation, i.e. one classifier being trained on the outputs of another classifier, is an empirically very successful technique for knowledge transfer between classifiers. It has even been observed that classifiers learn much faster and more reliably if trained with the outputs of another classifier as soft labels, instead of from ground truth data. So far, however, there is no satisfactory theoretical explanation of this phenomenon. In this work, we provide the first insights into the working mechanisms of distillation by studying the special case of linear and deep linear classifiers. Specifically, we prove a generalization bound that establishes fast convergence of the expected risk of a distillation-trained linear classifier. From the bound and its proof we extract three keyfactors that determine the success of distillation: data geometry – geometric properties of the datadistribution, in particular class separation, has an immediate influence on the convergence speed of the risk; optimization bias– gradient descentoptimization finds a very favorable minimum of the distillation objective; and strong monotonicity– the expected risk of the student classifier always decreases when the size of the training set grows.}, author = {Bui Thi Mai, Phuong and Lampert, Christoph}, booktitle = {Proceedings of the 36th International Conference on Machine Learning}, location = {Long Beach, CA, United States}, pages = {5142--5151}, publisher = {ML Research Press}, title = {{Towards understanding knowledge distillation}}, volume = {97}, year = {2019}, } @inproceedings{6590, abstract = {Modern machine learning methods often require more data for training than a single expert can provide. Therefore, it has become a standard procedure to collect data from external sources, e.g. via crowdsourcing. Unfortunately, the quality of these sources is not always guaranteed. As additional complications, the data might be stored in a distributed way, or might even have to remain private. In this work, we address the question of how to learn robustly in such scenarios. Studying the problem through the lens of statistical learning theory, we derive a procedure that allows for learning from all available sources, yet automatically suppresses irrelevant or corrupted data. We show by extensive experiments that our method provides significant improvements over alternative approaches from robust statistics and distributed optimization. }, author = {Konstantinov, Nikola H and Lampert, Christoph}, booktitle = {Proceedings of the 36th International Conference on Machine Learning}, location = {Long Beach, CA, USA}, pages = {3488--3498}, publisher = {ML Research Press}, title = {{Robust learning from untrusted sources}}, volume = {97}, year = {2019}, } @inproceedings{6482, abstract = {Computer vision systems for automatic image categorization have become accurate and reliable enough that they can run continuously for days or even years as components of real-world commercial applications. A major open problem in this context, however, is quality control. Good classification performance can only be expected if systems run under the specific conditions, in particular data distributions, that they were trained for. Surprisingly, none of the currently used deep network architectures have a built-in functionality that could detect if a network operates on data from a distribution it was not trained for, such that potentially a warning to the human users could be triggered. In this work, we describe KS(conf), a procedure for detecting such outside of specifications (out-of-specs) operation, based on statistical testing of the network outputs. We show by extensive experiments using the ImageNet, AwA2 and DAVIS datasets on a variety of ConvNets architectures that KS(conf) reliably detects out-of-specs situations. It furthermore has a number of properties that make it a promising candidate for practical deployment: it is easy to implement, adds almost no overhead to the system, works with all networks, including pretrained ones, and requires no a priori knowledge of how the data distribution could change. }, author = {Sun, Rémy and Lampert, Christoph}, isbn = {9783030129385}, issn = {1611-3349}, location = {Stuttgart, Germany}, pages = {244--259}, publisher = {Springer Nature}, title = {{KS(conf): A light-weight test if a ConvNet operates outside of Its specifications}}, doi = {10.1007/978-3-030-12939-2_18}, volume = {11269}, year = {2019}, } @phdthesis{68, abstract = {The most common assumption made in statistical learning theory is the assumption of the independent and identically distributed (i.i.d.) data. While being very convenient mathematically, it is often very clearly violated in practice. This disparity between the machine learning theory and applications underlies a growing demand in the development of algorithms that learn from dependent data and theory that can provide generalization guarantees similar to the independent situations. This thesis is dedicated to two variants of dependencies that can arise in practice. One is a dependence on the level of samples in a single learning task. Another dependency type arises in the multi-task setting when the tasks are dependent on each other even though the data for them can be i.i.d. In both cases we model the data (samples or tasks) as stochastic processes and introduce new algorithms for both settings that take into account and exploit the resulting dependencies. We prove the theoretical guarantees on the performance of the introduced algorithms under different evaluation criteria and, in addition, we compliment the theoretical study by the empirical one, where we evaluate some of the algorithms on two real world datasets to highlight their practical applicability.}, author = {Zimin, Alexander}, issn = {2663-337X}, pages = {92}, publisher = {Institute of Science and Technology Austria}, title = {{Learning from dependent data}}, doi = {10.15479/AT:ISTA:TH1048}, year = {2018}, } @phdthesis{197, abstract = {Modern computer vision systems heavily rely on statistical machine learning models, which typically require large amounts of labeled data to be learned reliably. Moreover, very recently computer vision research widely adopted techniques for representation learning, which further increase the demand for labeled data. However, for many important practical problems there is relatively small amount of labeled data available, so it is problematic to leverage full potential of the representation learning methods. One way to overcome this obstacle is to invest substantial resources into producing large labelled datasets. Unfortunately, this can be prohibitively expensive in practice. In this thesis we focus on the alternative way of tackling the aforementioned issue. We concentrate on methods, which make use of weakly-labeled or even unlabeled data. Specifically, the first half of the thesis is dedicated to the semantic image segmentation task. We develop a technique, which achieves competitive segmentation performance and only requires annotations in a form of global image-level labels instead of dense segmentation masks. Subsequently, we present a new methodology, which further improves segmentation performance by leveraging tiny additional feedback from a human annotator. By using our methods practitioners can greatly reduce the amount of data annotation effort, which is required to learn modern image segmentation models. In the second half of the thesis we focus on methods for learning from unlabeled visual data. We study a family of autoregressive models for modeling structure of natural images and discuss potential applications of these models. Moreover, we conduct in-depth study of one of these applications, where we develop the state-of-the-art model for the probabilistic image colorization task.}, author = {Kolesnikov, Alexander}, issn = {2663-337X}, pages = {113}, publisher = {Institute of Science and Technology Austria}, title = {{Weakly-Supervised Segmentation and Unsupervised Modeling of Natural Images}}, doi = {10.15479/AT:ISTA:th_1021}, year = {2018}, } @article{563, abstract = {In continuous populations with local migration, nearby pairs of individuals have on average more similar genotypes than geographically well separated pairs. A barrier to gene flow distorts this classical pattern of isolation by distance. Genetic similarity is decreased for sample pairs on different sides of the barrier and increased for pairs on the same side near the barrier. Here, we introduce an inference scheme that utilizes this signal to detect and estimate the strength of a linear barrier to gene flow in two-dimensions. We use a diffusion approximation to model the effects of a barrier on the geographical spread of ancestry backwards in time. This approach allows us to calculate the chance of recent coalescence and probability of identity by descent. We introduce an inference scheme that fits these theoretical results to the geographical covariance structure of bialleleic genetic markers. It can estimate the strength of the barrier as well as several demographic parameters. We investigate the power of our inference scheme to detect barriers by applying it to a wide range of simulated data. We also showcase an example application to a Antirrhinum majus (snapdragon) flower color hybrid zone, where we do not detect any signal of a strong genome wide barrier to gene flow.}, author = {Ringbauer, Harald and Kolesnikov, Alexander and Field, David and Barton, Nicholas H}, journal = {Genetics}, number = {3}, pages = {1231--1245}, publisher = {Genetics Society of America}, title = {{Estimating barriers to gene flow from distorted isolation-by-distance patterns}}, doi = {10.1534/genetics.117.300638}, volume = {208}, year = {2018}, } @article{321, abstract = {The twelve papers in this special section focus on learning systems with shared information for computer vision and multimedia communication analysis. In the real world, a realistic setting for computer vision or multimedia recognition problems is that we have some classes containing lots of training data and many classes containing a small amount of training data. Therefore, how to use frequent classes to help learning rare classes for which it is harder to collect the training data is an open question. Learning with shared information is an emerging topic in machine learning, computer vision and multimedia analysis. There are different levels of components that can be shared during concept modeling and machine learning stages, such as sharing generic object parts, sharing attributes, sharing transformations, sharing regularization parameters and sharing training examples, etc. Regarding the specific methods, multi-task learning, transfer learning and deep learning can be seen as using different strategies to share information. These learning with shared information methods are very effective in solving real-world large-scale problems.}, author = {Darrell, Trevor and Lampert, Christoph and Sebe, Nico and Wu, Ying and Yan, Yan}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {5}, pages = {1029 -- 1031}, publisher = {IEEE}, title = {{Guest editors' introduction to the special section on learning with Shared information for computer vision and multimedia analysis}}, doi = {10.1109/TPAMI.2018.2804998}, volume = {40}, year = {2018}, } @inproceedings{10882, abstract = {We introduce Intelligent Annotation Dialogs for bounding box annotation. We train an agent to automatically choose a sequence of actions for a human annotator to produce a bounding box in a minimal amount of time. Specifically, we consider two actions: box verification [34], where the annotator verifies a box generated by an object detector, and manual box drawing. We explore two kinds of agents, one based on predicting the probability that a box will be positively verified, and the other based on reinforcement learning. We demonstrate that (1) our agents are able to learn efficient annotation strategies in several scenarios, automatically adapting to the image difficulty, the desired quality of the boxes, and the detector strength; (2) in all scenarios the resulting annotation dialogs speed up annotation compared to manual box drawing alone and box verification alone, while also outperforming any fixed combination of verification and drawing in most scenarios; (3) in a realistic scenario where the detector is iteratively re-trained, our agents evolve a series of strategies that reflect the shifting trade-off between verification and drawing as the detector grows stronger.}, author = {Uijlings, Jasper and Konyushkova, Ksenia and Lampert, Christoph and Ferrari, Vittorio}, booktitle = {2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition}, isbn = {9781538664209}, issn = {2575-7075}, location = {Salt Lake City, UT, United States}, pages = {9175--9184}, publisher = {IEEE}, title = {{Learning intelligent dialogs for bounding box annotation}}, doi = {10.1109/cvpr.2018.00956}, year = {2018}, } @inproceedings{6012, abstract = {We present an approach to identify concise equations from data using a shallow neural network approach. In contrast to ordinary black-box regression, this approach allows understanding functional relations and generalizing them from observed data to unseen parts of the parameter space. We show how to extend the class of learnable equations for a recently proposed equation learning network to include divisions, and we improve the learning and model selection strategy to be useful for challenging real-world data. For systems governed by analytical expressions, our method can in many cases identify the true underlying equation and extrapolate to unseen domains. We demonstrate its effectiveness by experiments on a cart-pendulum system, where only 2 random rollouts are required to learn the forward dynamics and successfully achieve the swing-up task.}, author = {Sahoo, Subham and Lampert, Christoph and Martius, Georg S}, booktitle = {Proceedings of the 35th International Conference on Machine Learning}, location = {Stockholm, Sweden}, pages = {4442--4450}, publisher = {ML Research Press}, title = {{Learning equations for extrapolation and control}}, volume = {80}, year = {2018}, } @inproceedings{6011, abstract = {We establish a data-dependent notion of algorithmic stability for Stochastic Gradient Descent (SGD), and employ it to develop novel generalization bounds. This is in contrast to previous distribution-free algorithmic stability results for SGD which depend on the worst-case constants. By virtue of the data-dependent argument, our bounds provide new insights into learning with SGD on convex and non-convex problems. In the convex case, we show that the bound on the generalization error depends on the risk at the initialization point. In the non-convex case, we prove that the expected curvature of the objective function around the initialization point has crucial influence on the generalization error. In both cases, our results suggest a simple data-driven strategy to stabilize SGD by pre-screening its initialization. As a corollary, our results allow us to show optimistic generalization bounds that exhibit fast convergence rates for SGD subject to a vanishing empirical risk and low noise of stochastic gradient. }, author = {Kuzborskij, Ilja and Lampert, Christoph}, booktitle = {Proceedings of the 35 th International Conference on Machine Learning}, location = {Stockholm, Sweden}, pages = {2815--2824}, publisher = {ML Research Press}, title = {{Data-dependent stability of stochastic gradient descent}}, volume = {80}, year = {2018}, } @inproceedings{6589, abstract = {Distributed training of massive machine learning models, in particular deep neural networks, via Stochastic Gradient Descent (SGD) is becoming commonplace. Several families of communication-reduction methods, such as quantization, large-batch methods, and gradient sparsification, have been proposed. To date, gradient sparsification methods--where each node sorts gradients by magnitude, and only communicates a subset of the components, accumulating the rest locally--are known to yield some of the largest practical gains. Such methods can reduce the amount of communication per step by up to \emph{three orders of magnitude}, while preserving model accuracy. Yet, this family of methods currently has no theoretical justification. This is the question we address in this paper. We prove that, under analytic assumptions, sparsifying gradients by magnitude with local error correction provides convergence guarantees, for both convex and non-convex smooth objectives, for data-parallel SGD. The main insight is that sparsification methods implicitly maintain bounds on the maximum impact of stale updates, thanks to selection by magnitude. Our analysis and empirical validation also reveal that these methods do require analytical conditions to converge well, justifying existing heuristics.}, author = {Alistarh, Dan-Adrian and Hoefler, Torsten and Johansson, Mikael and Konstantinov, Nikola H and Khirirat, Sarit and Renggli, Cedric}, booktitle = {Advances in Neural Information Processing Systems 31}, location = {Montreal, Canada}, pages = {5973--5983}, publisher = {Neural Information Processing Systems Foundation}, title = {{The convergence of sparsified gradient methods}}, volume = {Volume 2018}, year = {2018}, } @misc{5584, abstract = {This package contains data for the publication "Nonlinear decoding of a complex movie from the mammalian retina" by Deny S. et al, PLOS Comput Biol (2018). The data consists of (i) 91 spike sorted, isolated rat retinal ganglion cells that pass stability and quality criteria, recorded on the multi-electrode array, in response to the presentation of the complex movie with many randomly moving dark discs. The responses are represented as 648000 x 91 binary matrix, where the first index indicates the timebin of duration 12.5 ms, and the second index the neural identity. The matrix entry is 0/1 if the neuron didn't/did spike in the particular time bin. (ii) README file and a graphical illustration of the structure of the experiment, specifying how the 648000 timebins are split into epochs where 1, 2, 4, or 10 discs were displayed, and which stimulus segments are exact repeats or unique ball trajectories. (iii) a 648000 x 400 matrix of luminance traces for each of the 20 x 20 positions ("sites") in the movie frame, with time that is locked to the recorded raster. The luminance traces are produced as described in the manuscript by filtering the raw disc movie with a small gaussian spatial kernel. }, author = {Deny, Stephane and Marre, Olivier and Botella-Soler, Vicente and Martius, Georg S and Tkacik, Gasper}, keywords = {retina, decoding, regression, neural networks, complex stimulus}, publisher = {Institute of Science and Technology Austria}, title = {{Nonlinear decoding of a complex movie from the mammalian retina}}, doi = {10.15479/AT:ISTA:98}, year = {2018}, } @inproceedings{652, abstract = {We present an approach that enables robots to self-organize their sensorimotor behavior from scratch without providing specific information about neither the robot nor its environment. This is achieved by a simple neural control law that increases the consistency between external sensor dynamics and internal neural dynamics of the utterly simple controller. In this way, the embodiment and the agent-environment coupling are the only source of individual development. We show how an anthropomorphic tendon driven arm-shoulder system develops different behaviors depending on that coupling. For instance: Given a bottle half-filled with water, the arm starts to shake it, driven by the physical response of the water. When attaching a brush, the arm can be manipulated into wiping a table, and when connected to a revolvable wheel it finds out how to rotate it. Thus, the robot may be said to discover the affordances of the world. When allowing two (simulated) humanoid robots to interact physically, they engage into a joint behavior development leading to, for instance, spontaneous cooperation. More social effects are observed if the robots can visually perceive each other. Although, as an observer, it is tempting to attribute an apparent intentionality, there is nothing of the kind put in. As a conclusion, we argue that emergent behavior may be much less rooted in explicit intentions, internal motivations, or specific reward systems than is commonly believed.}, author = {Der, Ralf and Martius, Georg S}, isbn = {978-150905069-7}, location = {Cergy-Pontoise, France}, publisher = {IEEE}, title = {{Dynamical self consistency leads to behavioral development and emergent social interactions in robots}}, doi = {10.1109/DEVLRN.2016.7846789}, year = {2017}, } @article{658, abstract = {With the accelerated development of robot technologies, control becomes one of the central themes of research. In traditional approaches, the controller, by its internal functionality, finds appropriate actions on the basis of specific objectives for the task at hand. While very successful in many applications, self-organized control schemes seem to be favored in large complex systems with unknown dynamics or which are difficult to model. Reasons are the expected scalability, robustness, and resilience of self-organizing systems. The paper presents a self-learning neurocontroller based on extrinsic differential plasticity introduced recently, applying it to an anthropomorphic musculoskeletal robot arm with attached objects of unknown physical dynamics. The central finding of the paper is the following effect: by the mere feedback through the internal dynamics of the object, the robot is learning to relate each of the objects with a very specific sensorimotor pattern. Specifically, an attached pendulum pilots the arm into a circular motion, a half-filled bottle produces axis oriented shaking behavior, a wheel is getting rotated, and wiping patterns emerge automatically in a table-plus-brush setting. By these object-specific dynamical patterns, the robot may be said to recognize the object's identity, or in other words, it discovers dynamical affordances of objects. Furthermore, when including hand coordinates obtained from a camera, a dedicated hand-eye coordination self-organizes spontaneously. These phenomena are discussed from a specific dynamical system perspective. Central is the dedicated working regime at the border to instability with its potentially infinite reservoir of (limit cycle) attractors "waiting" to be excited. Besides converging toward one of these attractors, variate behavior is also arising from a self-induced attractor morphing driven by the learning rule. We claim that experimental investigations with this anthropomorphic, self-learning robot not only generate interesting and potentially useful behaviors, but may also help to better understand what subjective human muscle feelings are, how they can be rooted in sensorimotor patterns, and how these concepts may feed back on robotics.}, author = {Der, Ralf and Martius, Georg S}, issn = {16625218}, journal = {Frontiers in Neurorobotics}, number = {MAR}, publisher = {Frontiers Research Foundation}, title = {{Self organized behavior generation for musculoskeletal robots}}, doi = {10.3389/fnbot.2017.00008}, volume = {11}, year = {2017}, } @inproceedings{6841, abstract = {In classical machine learning, regression is treated as a black box process of identifying a suitable function from a hypothesis set without attempting to gain insight into the mechanism connecting inputs and outputs. In the natural sciences, however, finding an interpretable function for a phenomenon is the prime goal as it allows to understand and generalize results. This paper proposes a novel type of function learning network, called equation learner (EQL), that can learn analytical expressions and is able to extrapolate to unseen domains. It is implemented as an end-to-end differentiable feed-forward network and allows for efficient gradient based training. Due to sparsity regularization concise interpretable expressions can be obtained. Often the true underlying source expression is identified.}, author = {Martius, Georg S and Lampert, Christoph}, booktitle = {5th International Conference on Learning Representations, ICLR 2017 - Workshop Track Proceedings}, location = {Toulon, France}, publisher = {International Conference on Learning Representations}, title = {{Extrapolation and learning equations}}, year = {2017}, } @inproceedings{750, abstract = {Modern communication technologies allow first responders to contact thousands of potential volunteers simultaneously for support during a crisis or disaster event. However, such volunteer efforts must be well coordinated and monitored, in order to offer an effective relief to the professionals. In this paper we extend earlier work on optimally assigning volunteers to selected landmark locations. In particular, we emphasize the aspect that obtaining good assignments requires not only advanced computational tools, but also a realistic measure of distance between volunteers and landmarks. Specifically, we propose the use of the Open Street Map (OSM) driving distance instead of he previously used flight distance. We find the OSM driving distance to be better aligned with the interests of volunteers and first responders. Furthermore, we show that relying on the flying distance leads to a substantial underestimation of the number of required volunteers, causing negative side effects in case of an actual crisis situation.}, author = {Pielorz, Jasmin and Prandtstetter, Matthias and Straub, Markus and Lampert, Christoph}, booktitle = {2017 IEEE International Conference on Big Data}, isbn = {978-153862714-3}, location = {Boston, MA, United States}, pages = {3760 -- 3763}, publisher = {IEEE}, title = {{Optimal geospatial volunteer allocation needs realistic distances}}, doi = {10.1109/BigData.2017.8258375}, year = {2017}, } @inproceedings{1000, abstract = {We study probabilistic models of natural images and extend the autoregressive family of PixelCNN models by incorporating latent variables. Subsequently, we describe two new generative image models that exploit different image transformations as latent variables: a quantized grayscale view of the image or a multi-resolution image pyramid. The proposed models tackle two known shortcomings of existing PixelCNN models: 1) their tendency to focus on low-level image details, while largely ignoring high-level image information, such as object shapes, and 2) their computationally costly procedure for image sampling. We experimentally demonstrate benefits of our LatentPixelCNN models, in particular showing that they produce much more realistically looking image samples than previous state-of-the-art probabilistic models. }, author = {Kolesnikov, Alexander and Lampert, Christoph}, booktitle = {34th International Conference on Machine Learning}, isbn = {978-151085514-4}, location = {Sydney, Australia}, pages = {1905 -- 1914}, publisher = {JMLR}, title = {{PixelCNN models with auxiliary variables for natural image modeling}}, volume = {70}, year = {2017}, } @inproceedings{998, abstract = {A major open problem on the road to artificial intelligence is the development of incrementally learning systems that learn about more and more concepts over time from a stream of data. In this work, we introduce a new training strategy, iCaRL, that allows learning in such a class-incremental way: only the training data for a small number of classes has to be present at the same time and new classes can be added progressively. iCaRL learns strong classifiers and a data representation simultaneously. This distinguishes it from earlier works that were fundamentally limited to fixed data representations and therefore incompatible with deep learning architectures. We show by experiments on CIFAR-100 and ImageNet ILSVRC 2012 data that iCaRL can learn many classes incrementally over a long period of time where other strategies quickly fail. }, author = {Rebuffi, Sylvestre Alvise and Kolesnikov, Alexander and Sperl, Georg and Lampert, Christoph}, isbn = {978-153860457-1}, location = {Honolulu, HA, United States}, pages = {5533 -- 5542}, publisher = {IEEE}, title = {{iCaRL: Incremental classifier and representation learning}}, doi = {10.1109/CVPR.2017.587}, volume = {2017}, year = {2017}, } @inproceedings{911, abstract = {We develop a probabilistic technique for colorizing grayscale natural images. In light of the intrinsic uncertainty of this task, the proposed probabilistic framework has numerous desirable properties. In particular, our model is able to produce multiple plausible and vivid colorizations for a given grayscale image and is one of the first colorization models to provide a proper stochastic sampling scheme. Moreover, our training procedure is supported by a rigorous theoretical framework that does not require any ad hoc heuristics and allows for efficient modeling and learning of the joint pixel color distribution.We demonstrate strong quantitative and qualitative experimental results on the CIFAR-10 dataset and the challenging ILSVRC 2012 dataset.}, author = {Royer, Amélie and Kolesnikov, Alexander and Lampert, Christoph}, location = {London, United Kingdom}, pages = {85.1--85.12}, publisher = {BMVA Press}, title = {{Probabilistic image colorization}}, doi = {10.5244/c.31.85}, year = {2017}, } @inproceedings{1108, abstract = {In this work we study the learnability of stochastic processes with respect to the conditional risk, i.e. the existence of a learning algorithm that improves its next-step performance with the amount of observed data. We introduce a notion of pairwise discrepancy between conditional distributions at different times steps and show how certain properties of these discrepancies can be used to construct a successful learning algorithm. Our main results are two theorems that establish criteria for learnability for many classes of stochastic processes, including all special cases studied previously in the literature.}, author = {Zimin, Alexander and Lampert, Christoph}, location = {Fort Lauderdale, FL, United States}, pages = {213 -- 222}, publisher = {ML Research Press}, title = {{Learning theory for conditional risk minimization}}, volume = {54}, year = {2017}, } @inproceedings{999, abstract = {In multi-task learning, a learner is given a collection of prediction tasks and needs to solve all of them. In contrast to previous work, which required that annotated training data must be available for all tasks, we consider a new setting, in which for some tasks, potentially most of them, only unlabeled training data is provided. Consequently, to solve all tasks, information must be transferred between tasks with labels and tasks without labels. Focusing on an instance-based transfer method we analyze two variants of this setting: when the set of labeled tasks is fixed, and when it can be actively selected by the learner. We state and prove a generalization bound that covers both scenarios and derive from it an algorithm for making the choice of labeled tasks (in the active case) and for transferring information between the tasks in a principled way. We also illustrate the effectiveness of the algorithm on synthetic and real data. }, author = {Pentina, Anastasia and Lampert, Christoph}, isbn = {9781510855144}, location = {Sydney, Australia}, pages = {2807 -- 2816}, publisher = {ML Research Press}, title = {{Multi-task learning with labeled and unlabeled tasks}}, volume = {70}, year = {2017}, } @inproceedings{1098, abstract = {Better understanding of the potential benefits of information transfer and representation learning is an important step towards the goal of building intelligent systems that are able to persist in the world and learn over time. In this work, we consider a setting where the learner encounters a stream of tasks but is able to retain only limited information from each encountered task, such as a learned predictor. In contrast to most previous works analyzing this scenario, we do not make any distributional assumptions on the task generating process. Instead, we formulate a complexity measure that captures the diversity of the observed tasks. We provide a lifelong learning algorithm with error guarantees for every observed task (rather than on average). We show sample complexity reductions in comparison to solving every task in isolation in terms of our task complexity measure. Further, our algorithmic framework can naturally be viewed as learning a representation from encountered tasks with a neural network.}, author = {Pentina, Anastasia and Urner, Ruth}, location = {Barcelona, Spain}, pages = {3619--3627}, publisher = {Neural Information Processing Systems}, title = {{Lifelong learning with weighted majority votes}}, volume = {29}, year = {2016}, } @inproceedings{1102, abstract = {Weakly-supervised object localization methods tend to fail for object classes that consistently co-occur with the same background elements, e.g. trains on tracks. We propose a method to overcome these failures by adding a very small amount of model-specific additional annotation. The main idea is to cluster a deep network\'s mid-level representations and assign object or distractor labels to each cluster. Experiments show substantially improved localization results on the challenging ILSVC2014 dataset for bounding box detection and the PASCAL VOC2012 dataset for semantic segmentation.}, author = {Kolesnikov, Alexander and Lampert, Christoph}, booktitle = {Proceedings of the British Machine Vision Conference 2016}, location = {York, United Kingdom}, pages = {92.1--92.12}, publisher = {BMVA Press}, title = {{Improving weakly-supervised object localization by micro-annotation}}, doi = {10.5244/C.30.92}, volume = {2016-September}, year = {2016}, } @inproceedings{1214, abstract = {With the accelerated development of robot technologies, optimal control becomes one of the central themes of research. In traditional approaches, the controller, by its internal functionality, finds appropriate actions on the basis of the history of sensor values, guided by the goals, intentions, objectives, learning schemes, and so forth. While very successful with classical robots, these methods run into severe difficulties when applied to soft robots, a new field of robotics with large interest for human-robot interaction. We claim that a novel controller paradigm opens new perspective for this field. This paper applies a recently developed neuro controller with differential extrinsic synaptic plasticity to a muscle-tendon driven arm-shoulder system from the Myorobotics toolkit. In the experiments, we observe a vast variety of self-organized behavior patterns: when left alone, the arm realizes pseudo-random sequences of different poses. By applying physical forces, the system can be entrained into definite motion patterns like wiping a table. Most interestingly, after attaching an object, the controller gets in a functional resonance with the object's internal dynamics, starting to shake spontaneously bottles half-filled with water or sensitively driving an attached pendulum into a circular mode. When attached to the crank of a wheel the neural system independently develops to rotate it. In this way, the robot discovers affordances of objects its body is interacting with.}, author = {Martius, Georg S and Hostettler, Raphael and Knoll, Alois and Der, Ralf}, location = {Daejeon, Korea}, publisher = {IEEE}, title = {{Compliant control for soft robots: Emergent behavior of a tendon driven anthropomorphic arm}}, doi = {10.1109/IROS.2016.7759138}, volume = {2016-November}, year = {2016}, } @inproceedings{1369, abstract = {We introduce a new loss function for the weakly-supervised training of semantic image segmentation models based on three guiding principles: to seed with weak localization cues, to expand objects based on the information about which classes can occur in an image, and to constrain the segmentations to coincide with object boundaries. We show experimentally that training a deep convolutional neural network using the proposed loss function leads to substantially better segmentations than previous state-of-the-art methods on the challenging PASCAL VOC 2012 dataset. We furthermore give insight into the working mechanism of our method by a detailed experimental study that illustrates how the segmentation quality is affected by each term of the proposed loss function as well as their combinations.}, author = {Kolesnikov, Alexander and Lampert, Christoph}, location = {Amsterdam, The Netherlands}, pages = {695 -- 711}, publisher = {Springer}, title = {{Seed, expand and constrain: Three principles for weakly-supervised image segmentation}}, doi = {10.1007/978-3-319-46493-0_42}, volume = {9908}, year = {2016}, } @inproceedings{1707, abstract = {Volunteer supporters play an important role in modern crisis and disaster management. In the times of mobile Internet devices, help from thousands of volunteers can be requested within a short time span, thus relieving professional helpers from minor chores or geographically spread-out tasks. However, the simultaneous availability of many volunteers also poses new problems. In particular, the volunteer efforts must be well coordinated, or otherwise situations might emerge in which too many idle volunteers at one location become more of a burden than a relief to the professionals. In this work, we study the task of optimally assigning volunteers to selected locations, e.g. in order to perform regular measurements, to report on damage, or to distribute information or resources to the population in a crisis situation. We formulate the assignment tasks as an optimization problem and propose an effective and efficient solution procedure. Experiments on real data of the Team Österreich, consisting of over 36,000 Austrian volunteers, show the effectiveness and efficiency of our approach.}, author = {Pielorz, Jasmin and Lampert, Christoph}, location = {Rennes, France}, publisher = {IEEE}, title = {{Optimal geospatial allocation of volunteers for crisis management}}, doi = {10.1109/ICT-DM.2015.7402041}, year = {2016}, } @inproceedings{8094, abstract = {With the accelerated development of robot technologies, optimal control becomes one of the central themes of research. In traditional approaches, the controller, by its internal functionality, finds appropriate actions on the basis of the history of sensor values, guided by the goals, intentions, objectives, learning schemes, and so forth. The idea is that the controller controls the world---the body plus its environment---as reliably as possible. This paper focuses on new lines of self-organization for developmental robotics. We apply the recently developed differential extrinsic synaptic plasticity to a muscle-tendon driven arm-shoulder system from the Myorobotics toolkit. In the experiments, we observe a vast variety of self-organized behavior patterns: when left alone, the arm realizes pseudo-random sequences of different poses. By applying physical forces, the system can be entrained into definite motion patterns like wiping a table. Most interestingly, after attaching an object, the controller gets in a functional resonance with the object's internal dynamics, starting to shake spontaneously bottles half-filled with water or sensitively driving an attached pendulum into a circular mode. When attached to the crank of a wheel the neural system independently discovers how to rotate it. In this way, the robot discovers affordances of objects its body is interacting with.}, author = {Martius, Georg S and Hostettler, Rafael and Knoll, Alois and Der, Ralf}, booktitle = {Proceedings of the Artificial Life Conference 2016}, isbn = {9780262339360}, location = {Cancun, Mexico}, pages = {142--143}, publisher = {MIT Press}, title = {{Self-organized control of an tendon driven arm by differential extrinsic plasticity}}, doi = {10.7551/978-0-262-33936-0-ch029}, volume = {28}, year = {2016}, } @phdthesis{1126, abstract = {Traditionally machine learning has been focusing on the problem of solving a single task in isolation. While being quite well understood, this approach disregards an important aspect of human learning: when facing a new problem, humans are able to exploit knowledge acquired from previously learned tasks. Intuitively, access to several problems simultaneously or sequentially could also be advantageous for a machine learning system, especially if these tasks are closely related. Indeed, results of many empirical studies have provided justification for this intuition. However, theoretical justifications of this idea are rather limited. The focus of this thesis is to expand the understanding of potential benefits of information transfer between several related learning problems. We provide theoretical analysis for three scenarios of multi-task learning - multiple kernel learning, sequential learning and active task selection. We also provide a PAC-Bayesian perspective on lifelong learning and investigate how the task generation process influences the generalization guarantees in this scenario. In addition, we show how some of the obtained theoretical results can be used to derive principled multi-task and lifelong learning algorithms and illustrate their performance on various synthetic and real-world datasets.}, author = {Pentina, Anastasia}, issn = {2663-337X}, pages = {127}, publisher = {Institute of Science and Technology Austria}, title = {{Theoretical foundations of multi-task lifelong learning}}, doi = {10.15479/AT:ISTA:TH_776}, year = {2016}, } @inproceedings{1425, abstract = {In this work we aim at extending the theoretical foundations of lifelong learning. Previous work analyzing this scenario is based on the assumption that learning tasks are sampled i.i.d. from a task environment or limited to strongly constrained data distributions. Instead, we study two scenarios when lifelong learning is possible, even though the observed tasks do not form an i.i.d. sample: first, when they are sampled from the same environment, but possibly with dependencies, and second, when the task environment is allowed to change over time in a consistent way. In the first case we prove a PAC-Bayesian theorem that can be seen as a direct generalization of the analogous previous result for the i.i.d. case. For the second scenario we propose to learn an inductive bias in form of a transfer procedure. We present a generalization bound and show on a toy example how it can be used to identify a beneficial transfer algorithm.}, author = {Pentina, Anastasia and Lampert, Christoph}, location = {Montreal, Canada}, pages = {1540 -- 1548}, publisher = {Neural Information Processing Systems}, title = {{Lifelong learning with non-i.i.d. tasks}}, volume = {2015}, year = {2015}, } @article{1533, abstract = {This paper addresses the problem of semantic segmentation, where the possible class labels are from a predefined set. We exploit top-down guidance, i.e., the coarse localization of the objects and their class labels provided by object detectors. For each detected bounding box, figure-ground segmentation is performed and the final result is achieved by merging the figure-ground segmentations. The main idea of the proposed approach, which is presented in our preliminary work, is to reformulate the figure-ground segmentation problem as sparse reconstruction pursuing the object mask in a nonparametric manner. The latent segmentation mask should be coherent subject to sparse error caused by intra-category diversity; thus, the object mask is inferred by making use of sparse representations over the training set. To handle local spatial deformations, local patch-level masks are also considered and inferred by sparse representations over the spatially nearby patches. The sparse reconstruction coefficients and the latent mask are alternately optimized by applying the Lasso algorithm and the accelerated proximal gradient method. The proposed formulation results in a convex optimization problem; thus, the global optimal solution is achieved. In this paper, we provide theoretical analysis of the convergence and optimality. We also give an extended numerical analysis of the proposed algorithm and a comprehensive comparison with the related semantic segmentation methods on the challenging PASCAL visual object class object segmentation datasets and the Weizmann horse dataset. The experimental results demonstrate that the proposed algorithm achieves a competitive performance when compared with the state of the arts.}, author = {Xia, Wei and Domokos, Csaba and Xiong, Junjun and Cheong, Loongfah and Yan, Shuicheng}, journal = {IEEE Transactions on Circuits and Systems for Video Technology}, number = {8}, pages = {1295 -- 1308}, publisher = {IEEE}, title = {{Segmentation over detection via optimal sparse reconstructions}}, doi = {10.1109/TCSVT.2014.2379972}, volume = {25}, year = {2015}, } @article{1570, abstract = {Grounding autonomous behavior in the nervous system is a fundamental challenge for neuroscience. In particular, self-organized behavioral development provides more questions than answers. Are there special functional units for curiosity, motivation, and creativity? This paper argues that these features can be grounded in synaptic plasticity itself, without requiring any higher-level constructs. We propose differential extrinsic plasticity (DEP) as a new synaptic rule for self-learning systems and apply it to a number of complex robotic systems as a test case. Without specifying any purpose or goal, seemingly purposeful and adaptive rhythmic behavior is developed, displaying a certain level of sensorimotor intelligence. These surprising results require no systemspecific modifications of the DEP rule. They rather arise from the underlying mechanism of spontaneous symmetry breaking,which is due to the tight brain body environment coupling. The new synaptic rule is biologically plausible and would be an interesting target for neurobiological investigation. We also argue that this neuronal mechanism may have been a catalyst in natural evolution.}, author = {Der, Ralf and Martius, Georg S}, journal = {PNAS}, number = {45}, pages = {E6224 -- E6232}, publisher = {National Academy of Sciences}, title = {{Novel plasticity rule can explain the development of sensorimotor intelligence}}, doi = {10.1073/pnas.1508400112}, volume = {112}, year = {2015}, }