[{"project":[{"name":"Lifelong Learning of Visual Scene Understanding","grant_number":"308036","_id":"2532554C-B435-11E9-9278-68D0E5697425","call_identifier":"FP7"}],"title":"Majority vote of diverse classifiers for late fusion","author":[{"orcid":"0000-0002-8301-7240","full_name":"Morvant, Emilie","last_name":"Morvant","first_name":"Emilie","id":"4BAC2A72-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Amaury","last_name":"Habrard","full_name":"Habrard, Amaury"},{"first_name":"Stéphane","last_name":"Ayache","full_name":"Ayache, Stéphane"}],"publist_id":"4989","external_id":{"arxiv":["1404.7796"]},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"chicago":"Morvant, Emilie, Amaury Habrard, and Stéphane Ayache. “Majority Vote of Diverse Classifiers for Late Fusion.” In Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 8621:153–62. Springer, 2014. https://doi.org/10.1007/978-3-662-44415-3_16.","ista":"Morvant E, Habrard A, Ayache S. 2014. Majority vote of diverse classifiers for late fusion. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics). IAPR: International Workshop on Structural, Syntactic, and Statistical Pattern Recognition, LNCS, vol. 8621, 153–162.","mla":"Morvant, Emilie, et al. “Majority Vote of Diverse Classifiers for Late Fusion.” Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 8621, Springer, 2014, pp. 153–62, doi:10.1007/978-3-662-44415-3_16.","ama":"Morvant E, Habrard A, Ayache S. Majority vote of diverse classifiers for late fusion. In: Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics). Vol 8621. Springer; 2014:153-162. doi:10.1007/978-3-662-44415-3_16","apa":"Morvant, E., Habrard, A., & Ayache, S. (2014). Majority vote of diverse classifiers for late fusion. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) (Vol. 8621, pp. 153–162). Joensuu, Finland: Springer. https://doi.org/10.1007/978-3-662-44415-3_16","ieee":"E. Morvant, A. Habrard, and S. Ayache, “Majority vote of diverse classifiers for late fusion,” in Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), Joensuu, Finland, 2014, vol. 8621, pp. 153–162.","short":"E. Morvant, A. Habrard, S. Ayache, in:, Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), Springer, 2014, pp. 153–162."},"quality_controlled":"1","publisher":"Springer","oa":1,"date_published":"2014-01-01T00:00:00Z","doi":"10.1007/978-3-662-44415-3_16","date_created":"2018-12-11T11:55:28Z","page":"153 - 162","day":"01","publication":"Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)","year":"2014","status":"public","type":"conference","conference":{"start_date":"2014-08-20","end_date":"2014-08-22","location":"Joensuu, Finland","name":"IAPR: International Workshop on Structural, Syntactic, and Statistical Pattern Recognition"},"_id":"2057","department":[{"_id":"ChLa"}],"date_updated":"2021-01-12T06:55:01Z","month":"01","intvolume":" 8621","scopus_import":1,"alternative_title":["LNCS"],"main_file_link":[{"url":"http://arxiv.org/abs/1404.7796","open_access":"1"}],"oa_version":"Preprint","abstract":[{"lang":"eng","text":"In the past few years, a lot of attention has been devoted to multimedia indexing by fusing multimodal informations. Two kinds of fusion schemes are generally considered: The early fusion and the late fusion. We focus on late classifier fusion, where one combines the scores of each modality at the decision level. To tackle this problem, we investigate a recent and elegant well-founded quadratic program named MinCq coming from the machine learning PAC-Bayesian theory. MinCq looks for the weighted combination, over a set of real-valued functions seen as voters, leading to the lowest misclassification rate, while maximizing the voters’ diversity. We propose an extension of MinCq tailored to multimedia indexing. Our method is based on an order-preserving pairwise loss adapted to ranking that allows us to improve Mean Averaged Precision measure while taking into account the diversity of the voters that we want to fuse. We provide evidence that this method is naturally adapted to late fusion procedures and confirm the good behavior of our approach on the challenging PASCAL VOC’07 benchmark."}],"volume":8621,"ec_funded":1,"language":[{"iso":"eng"}],"publication_status":"published"},{"date_updated":"2021-01-12T06:55:46Z","department":[{"_id":"ChLa"}],"_id":"2171","status":"public","conference":{"name":"ECCV: European Conference on Computer Vision","location":"Zurich, Switzerland","end_date":"2014-09-12","start_date":"2014-09-06"},"type":"conference","language":[{"iso":"eng"}],"publication_status":"published","ec_funded":1,"volume":8691,"issue":"PART 3","oa_version":"Submitted Version","abstract":[{"lang":"eng","text":"We present LS-CRF, a new method for training cyclic Conditional Random Fields (CRFs) from large datasets that is inspired by classical closed-form expressions for the maximum likelihood parameters of a generative graphical model with tree topology. Training a CRF with LS-CRF requires only solving a set of independent regression problems, each of which can be solved efficiently in closed form or by an iterative solver. This makes LS-CRF orders of magnitude faster than classical CRF training based on probabilistic inference, and at the same time more flexible and easier to implement than other approximate techniques, such as pseudolikelihood or piecewise training. We apply LS-CRF to the task of semantic image segmentation, showing that it achieves on par accuracy to other training techniques at higher speed, thereby allowing efficient CRF training from very large training sets. For example, training a linearly parameterized pairwise CRF on 150,000 images requires less than one hour on a modern workstation."}],"intvolume":" 8691","month":"09","main_file_link":[{"open_access":"1","url":"http://arxiv.org/abs/1403.7057"}],"alternative_title":["LNCS"],"scopus_import":1,"user_id":"4435EBFC-F248-11E8-B48F-1D18A9856A87","citation":{"ama":"Kolesnikov A, Guillaumin M, Ferrari V, Lampert C. Closed-form approximate CRF training for scalable image segmentation. In: Fleet D, Pajdla T, Schiele B, Tuytelaars T, eds. Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics). Vol 8691. Springer; 2014:550-565. doi:10.1007/978-3-319-10578-9_36","apa":"Kolesnikov, A., Guillaumin, M., Ferrari, V., & Lampert, C. (2014). Closed-form approximate CRF training for scalable image segmentation. In D. Fleet, T. Pajdla, B. Schiele, & T. Tuytelaars (Eds.), Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) (Vol. 8691, pp. 550–565). Zurich, Switzerland: Springer. https://doi.org/10.1007/978-3-319-10578-9_36","short":"A. Kolesnikov, M. Guillaumin, V. Ferrari, C. Lampert, in:, D. Fleet, T. Pajdla, B. Schiele, T. Tuytelaars (Eds.), Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), Springer, 2014, pp. 550–565.","ieee":"A. Kolesnikov, M. Guillaumin, V. Ferrari, and C. Lampert, “Closed-form approximate CRF training for scalable image segmentation,” in Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), Zurich, Switzerland, 2014, vol. 8691, no. PART 3, pp. 550–565.","mla":"Kolesnikov, Alexander, et al. “Closed-Form Approximate CRF Training for Scalable Image Segmentation.” Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), edited by David Fleet et al., vol. 8691, no. PART 3, Springer, 2014, pp. 550–65, doi:10.1007/978-3-319-10578-9_36.","ista":"Kolesnikov A, Guillaumin M, Ferrari V, Lampert C. 2014. Closed-form approximate CRF training for scalable image segmentation. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics). ECCV: European Conference on Computer Vision, LNCS, vol. 8691, 550–565.","chicago":"Kolesnikov, Alexander, Matthieu Guillaumin, Vittorio Ferrari, and Christoph Lampert. “Closed-Form Approximate CRF Training for Scalable Image Segmentation.” In Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), edited by David Fleet, Tomas Pajdla, Bernt Schiele, and Tinne Tuytelaars, 8691:550–65. Springer, 2014. https://doi.org/10.1007/978-3-319-10578-9_36."},"title":"Closed-form approximate CRF training for scalable image segmentation","editor":[{"first_name":"David","last_name":"Fleet","full_name":"Fleet, David"},{"first_name":"Tomas","full_name":"Pajdla, Tomas","last_name":"Pajdla"},{"full_name":"Schiele, Bernt","last_name":"Schiele","first_name":"Bernt"},{"full_name":"Tuytelaars, Tinne","last_name":"Tuytelaars","first_name":"Tinne"}],"author":[{"first_name":"Alexander","id":"2D157DB6-F248-11E8-B48F-1D18A9856A87","last_name":"Kolesnikov","full_name":"Kolesnikov, Alexander"},{"last_name":"Guillaumin","full_name":"Guillaumin, Matthieu","first_name":"Matthieu"},{"last_name":"Ferrari","full_name":"Ferrari, Vittorio","first_name":"Vittorio"},{"orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"}],"publist_id":"4813","project":[{"call_identifier":"FP7","_id":"2532554C-B435-11E9-9278-68D0E5697425","name":"Lifelong Learning of Visual Scene Understanding","grant_number":"308036"}],"publication":"Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)","day":"01","year":"2014","date_created":"2018-12-11T11:56:07Z","date_published":"2014-09-01T00:00:00Z","doi":"10.1007/978-3-319-10578-9_36","page":"550 - 565","oa":1,"publisher":"Springer","quality_controlled":"1"},{"file_date_updated":"2020-07-14T12:45:31Z","department":[{"_id":"ChLa"}],"ddc":["000"],"date_updated":"2021-01-12T06:55:46Z","status":"public","pubrep_id":"490","type":"conference","conference":{"name":"BMVC: British Machine Vision Conference","end_date":"2014-09-05","location":"Nottingham, UK","start_date":"2014-09-01"},"_id":"2173","ec_funded":1,"file":[{"relation":"main_file","access_level":"open_access","content_type":"application/pdf","checksum":"c4c6d3efdb8ee648faf3e76849839ce2","file_id":"4683","creator":"system","file_size":408172,"date_updated":"2020-07-14T12:45:31Z","file_name":"IST-2016-490-v1+1_khamis-bmvc2014.pdf","date_created":"2018-12-12T10:08:23Z"}],"language":[{"iso":"eng"}],"publication_status":"published","month":"09","scopus_import":1,"oa_version":"Published Version","abstract":[{"text":"In this work we introduce a new approach to co-classification, i.e. the task of jointly classifying multiple, otherwise independent, data samples. The method we present, named CoConut, is based on the idea of adding a regularizer in the label space to encode certain priors on the resulting labelings. A regularizer that encourages labelings that are smooth across the test set, for instance, can be seen as a test-time variant of the cluster assumption, which has been proven useful at training time in semi-supervised learning. A regularizer that introduces a preference for certain class proportions can be regarded as a prior distribution on the class labels. CoConut can build on existing classifiers without making any assumptions on how they were obtained and without the need to re-train them. The use of a regularizer adds a new level of flexibility. It allows the integration of potentially new information at test time, even in other modalities than what the classifiers were trained on. We evaluate our framework on six datasets, reporting a clear performance gain in classification accuracy compared to the standard classification setup that predicts labels for each test sample separately.\r\n","lang":"eng"}],"title":"CoConut: Co-classification with output space regularization","publist_id":"4811","author":[{"full_name":"Khamis, Sameh","last_name":"Khamis","first_name":"Sameh"},{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"}],"user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","citation":{"mla":"Khamis, Sameh, and Christoph Lampert. “CoConut: Co-Classification with Output Space Regularization.” Proceedings of the British Machine Vision Conference 2014, BMVA Press, 2014.","ieee":"S. Khamis and C. Lampert, “CoConut: Co-classification with output space regularization,” in Proceedings of the British Machine Vision Conference 2014, Nottingham, UK, 2014.","short":"S. Khamis, C. Lampert, in:, Proceedings of the British Machine Vision Conference 2014, BMVA Press, 2014.","apa":"Khamis, S., & Lampert, C. (2014). CoConut: Co-classification with output space regularization. In Proceedings of the British Machine Vision Conference 2014. Nottingham, UK: BMVA Press.","ama":"Khamis S, Lampert C. CoConut: Co-classification with output space regularization. In: Proceedings of the British Machine Vision Conference 2014. BMVA Press; 2014.","chicago":"Khamis, Sameh, and Christoph Lampert. “CoConut: Co-Classification with Output Space Regularization.” In Proceedings of the British Machine Vision Conference 2014. BMVA Press, 2014.","ista":"Khamis S, Lampert C. 2014. CoConut: Co-classification with output space regularization. Proceedings of the British Machine Vision Conference 2014. BMVC: British Machine Vision Conference."},"project":[{"_id":"2532554C-B435-11E9-9278-68D0E5697425","call_identifier":"FP7","name":"Lifelong Learning of Visual Scene Understanding","grant_number":"308036"}],"date_published":"2014-09-01T00:00:00Z","date_created":"2018-12-11T11:56:08Z","day":"01","publication":"Proceedings of the British Machine Vision Conference 2014","has_accepted_license":"1","year":"2014","publisher":"BMVA Press","quality_controlled":"1","oa":1},{"month":"09","publisher":"IEEE","scopus_import":1,"quality_controlled":"1","oa_version":"None","abstract":[{"text":"Fisher Kernels and Deep Learning were two developments with significant impact on large-scale object categorization in the last years. Both approaches were shown to achieve state-of-the-art results on large-scale object categorization datasets, such as ImageNet. Conceptually, however, they are perceived as very different and it is not uncommon for heated debates to spring up when advocates of both paradigms meet at conferences or workshops. In this work, we emphasize the similarities between both architectures rather than their differences and we argue that such a unified view allows us to transfer ideas from one domain to the other. As a concrete example we introduce a method for learning a support vector machine classifier with Fisher kernel at the same time as a task-specific data representation. We reinterpret the setting as a multi-layer feed forward network. Its final layer is the classifier, parameterized by a weight vector, and the two previous layers compute Fisher vectors, parameterized by the coefficients of a Gaussian mixture model. We introduce a gradient descent based learning algorithm that, in contrast to other feature learning techniques, is not just derived from intuition or biological analogy, but has a theoretical justification in the framework of statistical learning theory. Our experiments show that the new training procedure leads to significant improvements in classification accuracy while preserving the modularity and geometric interpretability of a support vector machine setup.","lang":"eng"}],"date_created":"2018-12-11T11:56:08Z","ec_funded":1,"doi":"10.1109/CVPR.2014.182","date_published":"2014-09-24T00:00:00Z","page":"1402 - 1409","publication":"Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition","language":[{"iso":"eng"}],"day":"24","year":"2014","publication_status":"published","project":[{"call_identifier":"FP7","_id":"2532554C-B435-11E9-9278-68D0E5697425","grant_number":"308036","name":"Lifelong Learning of Visual Scene Understanding"}],"status":"public","conference":{"end_date":"2014-06-28","location":"Columbus, USA","start_date":"2014-06-23","name":"CVPR: Computer Vision and Pattern Recognition"},"type":"conference","_id":"2172","title":"Deep Fisher Kernels – End to end learning of the Fisher Kernel GMM parameters","department":[{"_id":"ChLa"}],"publist_id":"4812","author":[{"full_name":"Sydorov, Vladyslav","last_name":"Sydorov","first_name":"Vladyslav"},{"first_name":"Mayu","full_name":"Sakurada, Mayu","last_name":"Sakurada"},{"first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","last_name":"Lampert"}],"user_id":"4435EBFC-F248-11E8-B48F-1D18A9856A87","date_updated":"2021-01-12T06:55:46Z","citation":{"chicago":"Sydorov, Vladyslav, Mayu Sakurada, and Christoph Lampert. “Deep Fisher Kernels – End to End Learning of the Fisher Kernel GMM Parameters.” In Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 1402–9. IEEE, 2014. https://doi.org/10.1109/CVPR.2014.182.","ista":"Sydorov V, Sakurada M, Lampert C. 2014. Deep Fisher Kernels – End to end learning of the Fisher Kernel GMM parameters. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR: Computer Vision and Pattern Recognition, 1402–1409.","mla":"Sydorov, Vladyslav, et al. “Deep Fisher Kernels – End to End Learning of the Fisher Kernel GMM Parameters.” Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, IEEE, 2014, pp. 1402–09, doi:10.1109/CVPR.2014.182.","apa":"Sydorov, V., Sakurada, M., & Lampert, C. (2014). Deep Fisher Kernels – End to end learning of the Fisher Kernel GMM parameters. In Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition (pp. 1402–1409). Columbus, USA: IEEE. https://doi.org/10.1109/CVPR.2014.182","ama":"Sydorov V, Sakurada M, Lampert C. Deep Fisher Kernels – End to end learning of the Fisher Kernel GMM parameters. In: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition. IEEE; 2014:1402-1409. doi:10.1109/CVPR.2014.182","short":"V. Sydorov, M. Sakurada, C. Lampert, in:, Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, IEEE, 2014, pp. 1402–1409.","ieee":"V. Sydorov, M. Sakurada, and C. Lampert, “Deep Fisher Kernels – End to end learning of the Fisher Kernel GMM parameters,” in Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, Columbus, USA, 2014, pp. 1402–1409."}},{"publisher":"Springer","quality_controlled":"1","oa":1,"acknowledgement":"This work was funded by the French project SoLSTiCe ANR-13-BS02-01 of the ANR. ","date_published":"2014-10-01T00:00:00Z","doi":"10.1007/s10994-014-5462-z","date_created":"2018-12-11T11:56:10Z","page":"129 - 154","day":"01","publication":"Machine Learning","year":"2014","project":[{"grant_number":"308036","name":"Lifelong Learning of Visual Scene Understanding","call_identifier":"FP7","_id":"2532554C-B435-11E9-9278-68D0E5697425"}],"title":"Learning a priori constrained weighted majority votes","author":[{"first_name":"Aurélien","full_name":"Bellet, Aurélien","last_name":"Bellet"},{"first_name":"Amaury","last_name":"Habrard","full_name":"Habrard, Amaury"},{"first_name":"Emilie","id":"4BAC2A72-F248-11E8-B48F-1D18A9856A87","full_name":"Morvant, Emilie","orcid":"0000-0002-8301-7240","last_name":"Morvant"},{"first_name":"Marc","last_name":"Sebban","full_name":"Sebban, Marc"}],"publist_id":"4802","user_id":"4435EBFC-F248-11E8-B48F-1D18A9856A87","citation":{"apa":"Bellet, A., Habrard, A., Morvant, E., & Sebban, M. (2014). Learning a priori constrained weighted majority votes. Machine Learning. Springer. https://doi.org/10.1007/s10994-014-5462-z","ama":"Bellet A, Habrard A, Morvant E, Sebban M. Learning a priori constrained weighted majority votes. Machine Learning. 2014;97(1-2):129-154. doi:10.1007/s10994-014-5462-z","short":"A. Bellet, A. Habrard, E. Morvant, M. Sebban, Machine Learning 97 (2014) 129–154.","ieee":"A. Bellet, A. Habrard, E. Morvant, and M. Sebban, “Learning a priori constrained weighted majority votes,” Machine Learning, vol. 97, no. 1–2. Springer, pp. 129–154, 2014.","mla":"Bellet, Aurélien, et al. “Learning a Priori Constrained Weighted Majority Votes.” Machine Learning, vol. 97, no. 1–2, Springer, 2014, pp. 129–54, doi:10.1007/s10994-014-5462-z.","ista":"Bellet A, Habrard A, Morvant E, Sebban M. 2014. Learning a priori constrained weighted majority votes. Machine Learning. 97(1–2), 129–154.","chicago":"Bellet, Aurélien, Amaury Habrard, Emilie Morvant, and Marc Sebban. “Learning a Priori Constrained Weighted Majority Votes.” Machine Learning. Springer, 2014. https://doi.org/10.1007/s10994-014-5462-z."},"month":"10","intvolume":" 97","scopus_import":1,"main_file_link":[{"url":"https://hal.archives-ouvertes.fr/hal-01009578/document","open_access":"1"}],"oa_version":"Submitted Version","abstract":[{"text":"Weighted majority votes allow one to combine the output of several classifiers or voters. MinCq is a recent algorithm for optimizing the weight of each voter based on the minimization of a theoretical bound over the risk of the vote with elegant PAC-Bayesian generalization guarantees. However, while it has demonstrated good performance when combining weak classifiers, MinCq cannot make use of the useful a priori knowledge that one may have when using a mixture of weak and strong voters. In this paper, we propose P-MinCq, an extension of MinCq that can incorporate such knowledge in the form of a constraint over the distribution of the weights, along with general proofs of convergence that stand in the sample compression setting for data-dependent voters. The approach is applied to a vote of k-NN classifiers with a specific modeling of the voters' performance. P-MinCq significantly outperforms the classic k-NN classifier, a symmetric NN and MinCq using the same voters. We show that it is also competitive with LMNN, a popular metric learning algorithm, and that combining both approaches further reduces the error.","lang":"eng"}],"issue":"1-2","volume":97,"ec_funded":1,"language":[{"iso":"eng"}],"publication_status":"published","status":"public","type":"journal_article","_id":"2180","department":[{"_id":"ChLa"}],"date_updated":"2021-01-12T06:55:49Z"},{"abstract":[{"text":"En apprentissage automatique, nous parlons d'adaptation de domaine lorsque les données de test (cibles) et d'apprentissage (sources) sont générées selon différentes distributions. Nous devons donc développer des algorithmes de classification capables de s'adapter à une nouvelle distribution, pour laquelle aucune information sur les étiquettes n'est disponible. Nous attaquons cette problématique sous l'angle de l'approche PAC-Bayésienne qui se focalise sur l'apprentissage de modèles définis comme des votes de majorité sur un ensemble de fonctions. Dans ce contexte, nous introduisons PV-MinCq une version adaptative de l'algorithme (non adaptatif) MinCq. PV-MinCq suit le principe suivant. Nous transférons les étiquettes sources aux points cibles proches pour ensuite appliquer MinCq sur l'échantillon cible ``auto-étiqueté'' (justifié par une borne théorique). Plus précisément, nous définissons un auto-étiquetage non itératif qui se focalise dans les régions où les distributions marginales source et cible sont les plus similaires. Dans un second temps, nous étudions l'influence de notre auto-étiquetage pour en déduire une procédure de validation des hyperparamètres. Finalement, notre approche montre des résultats empiriques prometteurs.","lang":"fre"}],"oa_version":"Preprint","quality_controlled":"1","publisher":"Elsevier","main_file_link":[{"url":"https://hal.archives-ouvertes.fr/hal-01005776/","open_access":"1"}],"oa":1,"month":"07","intvolume":" 1","publication_status":"published","year":"2014","day":"01","language":[{"iso":"eng"}],"page":"49-58","volume":1,"date_published":"2014-07-01T00:00:00Z","date_created":"2018-12-11T11:56:13Z","_id":"2189","type":"conference","conference":{"location":"Saint-Etienne, France","name":"CAP: Conférence Francophone sur l'Apprentissage Automatique (Machine Learning French Conference)"},"status":"public","date_updated":"2021-01-12T06:55:52Z","citation":{"ista":"Morvant E. 2014. Adaptation de domaine de vote de majorité par auto-étiquetage non itératif. CAP: Conférence Francophone sur l’Apprentissage Automatique (Machine Learning French Conference) vol. 1, 49–58.","chicago":"Morvant, Emilie. “Adaptation de Domaine de Vote de Majorité Par Auto-Étiquetage Non Itératif,” 1:49–58. Elsevier, 2014.","apa":"Morvant, E. (2014). Adaptation de domaine de vote de majorité par auto-étiquetage non itératif (Vol. 1, pp. 49–58). Presented at the CAP: Conférence Francophone sur l’Apprentissage Automatique (Machine Learning French Conference), Saint-Etienne, France: Elsevier.","ama":"Morvant E. Adaptation de domaine de vote de majorité par auto-étiquetage non itératif. In: Vol 1. Elsevier; 2014:49-58.","short":"E. Morvant, in:, Elsevier, 2014, pp. 49–58.","ieee":"E. Morvant, “Adaptation de domaine de vote de majorité par auto-étiquetage non itératif,” presented at the CAP: Conférence Francophone sur l’Apprentissage Automatique (Machine Learning French Conference), Saint-Etienne, France, 2014, vol. 1, pp. 49–58.","mla":"Morvant, Emilie. Adaptation de Domaine de Vote de Majorité Par Auto-Étiquetage Non Itératif. Vol. 1, Elsevier, 2014, pp. 49–58."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"orcid":"0000-0002-8301-7240","full_name":"Morvant, Emilie","last_name":"Morvant","id":"4BAC2A72-F248-11E8-B48F-1D18A9856A87","first_name":"Emilie"}],"publist_id":"4785","article_processing_charge":"No","title":"Adaptation de domaine de vote de majorité par auto-étiquetage non itératif","department":[{"_id":"ChLa"}]},{"date_published":"2014-05-10T00:00:00Z","volume":32,"date_created":"2018-12-11T11:56:03Z","page":"991 - 999","day":"10","language":[{"iso":"eng"}],"publication_status":"published","year":"2014","month":"05","intvolume":" 32","scopus_import":"1","publisher":"ML Research Press","quality_controlled":"1","main_file_link":[{"open_access":"1","url":"https://dl.acm.org/citation.cfm?id=3045003"}],"oa":1,"oa_version":"Submitted Version","abstract":[{"text":"Transfer learning has received a lot of attention in the machine learning community over the last years, and several effective algorithms have been developed. However, relatively little is known about their theoretical properties, especially in the setting of lifelong learning, where the goal is to transfer information to tasks for which no data have been observed so far. In this work we study lifelong learning from a theoretical perspective. Our main result is a PAC-Bayesian generalization bound that offers a unified view on existing paradigms for transfer learning, such as the transfer of parameters or the transfer of low-dimensional representations. We also use the bound to derive two principled lifelong learning algorithms, and we show that these yield results comparable with existing methods.","lang":"eng"}],"title":"A PAC-Bayesian bound for Lifelong Learning","department":[{"_id":"ChLa"}],"author":[{"first_name":"Anastasia","id":"42E87FC6-F248-11E8-B48F-1D18A9856A87","last_name":"Pentina","full_name":"Pentina, Anastasia"},{"orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","last_name":"Lampert","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"}],"publist_id":"4844","article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"mla":"Pentina, Anastasia, and Christoph Lampert. A PAC-Bayesian Bound for Lifelong Learning. Vol. 32, ML Research Press, 2014, pp. 991–99.","short":"A. Pentina, C. Lampert, in:, ML Research Press, 2014, pp. 991–999.","ieee":"A. Pentina and C. Lampert, “A PAC-Bayesian bound for Lifelong Learning,” presented at the ICML: International Conference on Machine Learning, Beijing, China, 2014, vol. 32, pp. 991–999.","ama":"Pentina A, Lampert C. A PAC-Bayesian bound for Lifelong Learning. In: Vol 32. ML Research Press; 2014:991-999.","apa":"Pentina, A., & Lampert, C. (2014). A PAC-Bayesian bound for Lifelong Learning (Vol. 32, pp. 991–999). Presented at the ICML: International Conference on Machine Learning, Beijing, China: ML Research Press.","chicago":"Pentina, Anastasia, and Christoph Lampert. “A PAC-Bayesian Bound for Lifelong Learning,” 32:991–99. ML Research Press, 2014.","ista":"Pentina A, Lampert C. 2014. A PAC-Bayesian bound for Lifelong Learning. ICML: International Conference on Machine Learning vol. 32, 991–999."},"date_updated":"2023-10-17T11:54:24Z","status":"public","type":"conference","conference":{"location":"Beijing, China","end_date":"2014-06-26","start_date":"2014-06-21","name":"ICML: International Conference on Machine Learning"},"_id":"2160"},{"language":[{"iso":"eng"}],"day":"01","publication_status":"published","year":"2013","date_created":"2018-12-11T11:56:49Z","ec_funded":1,"doi":"10.1109/ICCV.2013.139","date_published":"2013-12-01T00:00:00Z","oa_version":"Submitted Version","abstract":[{"lang":"eng","text":"In this work we propose a system for automatic classification of Drosophila embryos into developmental stages.\r\nWhile the system is designed to solve an actual problem in biological research, we believe that the principle underly-\r\ning it is interesting not only for biologists, but also for researchers in computer vision. The main idea is to combine two orthogonal sources of information: one is a classifier trained on strongly invariant features, which makes it applicable to images of very different conditions, but also leads to rather noisy predictions. The other is a label propagation step based on a more powerful similarity measure that however is only consistent within specific subsets of the data at a time.\r\nIn our biological setup, the information sources are the shape and the staining patterns of embryo images. We show\r\nexperimentally that while neither of the methods can be used by itself to achieve satisfactory results, their combina-\r\ntion achieves prediction quality comparable to human performance."}],"month":"12","oa":1,"main_file_link":[{"open_access":"1","url":"http://www.cv-foundation.org/openaccess/ICCV2013.py"}],"publisher":"IEEE","quality_controlled":"1","scopus_import":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2021-01-12T06:56:35Z","citation":{"chicago":"Kazmar, Tomas, Evgeny Kvon, Alexander Stark, and Christoph Lampert. “Drosophila Embryo Stage Annotation Using Label Propagation.” IEEE, 2013. https://doi.org/10.1109/ICCV.2013.139.","ista":"Kazmar T, Kvon E, Stark A, Lampert C. 2013. Drosophila Embryo Stage Annotation using Label Propagation. ICCV: International Conference on Computer Vision.","mla":"Kazmar, Tomas, et al. Drosophila Embryo Stage Annotation Using Label Propagation. IEEE, 2013, doi:10.1109/ICCV.2013.139.","apa":"Kazmar, T., Kvon, E., Stark, A., & Lampert, C. (2013). Drosophila Embryo Stage Annotation using Label Propagation. Presented at the ICCV: International Conference on Computer Vision, Sydney, Australia: IEEE. https://doi.org/10.1109/ICCV.2013.139","ama":"Kazmar T, Kvon E, Stark A, Lampert C. Drosophila Embryo Stage Annotation using Label Propagation. In: IEEE; 2013. doi:10.1109/ICCV.2013.139","ieee":"T. Kazmar, E. Kvon, A. Stark, and C. Lampert, “Drosophila Embryo Stage Annotation using Label Propagation,” presented at the ICCV: International Conference on Computer Vision, Sydney, Australia, 2013.","short":"T. Kazmar, E. Kvon, A. Stark, C. Lampert, in:, IEEE, 2013."},"department":[{"_id":"ChLa"}],"title":"Drosophila Embryo Stage Annotation using Label Propagation","author":[{"last_name":"Kazmar","full_name":"Kazmar, Tomas","first_name":"Tomas"},{"first_name":"Evgeny","last_name":"Kvon","full_name":"Kvon, Evgeny"},{"full_name":"Stark, Alexander","last_name":"Stark","first_name":"Alexander"},{"last_name":"Lampert","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"}],"publist_id":"4634","_id":"2294","project":[{"call_identifier":"FP7","_id":"2532554C-B435-11E9-9278-68D0E5697425","grant_number":"308036","name":"Lifelong Learning of Visual Scene Understanding"}],"status":"public","conference":{"start_date":"2013-12-01","end_date":"2013-12-08","location":"Sydney, Australia","name":"ICCV: International Conference on Computer Vision"},"type":"conference"},{"oa_version":"Submitted Version","abstract":[{"text":"Many computer vision problems have an asymmetric distribution of information between training and test time. In this work, we study the case where we are given additional information about the training data, which however will not be available at test time. This situation is called learning using privileged information (LUPI). We introduce two maximum-margin techniques that are able to make use of this additional source of information, and we show that the framework is applicable to several scenarios that have been studied in computer vision before. Experiments with attributes, bounding boxes, image tags and rationales as additional information in object classification show promising results.","lang":"eng"}],"month":"12","publisher":"IEEE","scopus_import":1,"quality_controlled":"1","main_file_link":[{"url":"www.cv-foundation.org/openaccess/content_iccv_2013/papers/Sharmanska_Learning_to_Rank_2013_ICCV_paper.pdf","open_access":"1"}],"oa":1,"day":"01","language":[{"iso":"eng"}],"publication_status":"published","year":"2013","doi":"10.1109/ICCV.2013.107","date_published":"2013-12-01T00:00:00Z","ec_funded":1,"date_created":"2018-12-11T11:56:49Z","page":"825 - 832","_id":"2293","project":[{"name":"Lifelong Learning of Visual Scene Understanding","grant_number":"308036","call_identifier":"FP7","_id":"2532554C-B435-11E9-9278-68D0E5697425"}],"status":"public","type":"conference","conference":{"end_date":"2013-12-08","location":"Sydney, Australia","start_date":"2013-12-01","name":"ICCV: International Conference on Computer Vision"},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2023-02-23T10:36:41Z","citation":{"mla":"Sharmanska, Viktoriia, et al. Learning to Rank Using Privileged Information. IEEE, 2013, pp. 825–32, doi:10.1109/ICCV.2013.107.","short":"V. Sharmanska, N. Quadrianto, C. Lampert, in:, IEEE, 2013, pp. 825–832.","ieee":"V. Sharmanska, N. Quadrianto, and C. Lampert, “Learning to rank using privileged information,” presented at the ICCV: International Conference on Computer Vision, Sydney, Australia, 2013, pp. 825–832.","ama":"Sharmanska V, Quadrianto N, Lampert C. Learning to rank using privileged information. In: IEEE; 2013:825-832. doi:10.1109/ICCV.2013.107","apa":"Sharmanska, V., Quadrianto, N., & Lampert, C. (2013). Learning to rank using privileged information (pp. 825–832). Presented at the ICCV: International Conference on Computer Vision, Sydney, Australia: IEEE. https://doi.org/10.1109/ICCV.2013.107","chicago":"Sharmanska, Viktoriia, Novi Quadrianto, and Christoph Lampert. “Learning to Rank Using Privileged Information,” 825–32. IEEE, 2013. https://doi.org/10.1109/ICCV.2013.107.","ista":"Sharmanska V, Quadrianto N, Lampert C. 2013. Learning to rank using privileged information. ICCV: International Conference on Computer Vision, 825–832."},"department":[{"_id":"ChLa"}],"title":"Learning to rank using privileged information","author":[{"full_name":"Sharmanska, Viktoriia","orcid":"0000-0003-0192-9308","last_name":"Sharmanska","first_name":"Viktoriia","id":"2EA6D09E-F248-11E8-B48F-1D18A9856A87"},{"last_name":"Quadrianto","full_name":"Quadrianto, Novi","first_name":"Novi"},{"first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","last_name":"Lampert"}],"publist_id":"4635"},{"citation":{"chicago":"Lampert, Christoph, Hannes Nickisch, and Stefan Harmeling. “Attribute-Based Classification for Zero-Shot Learning of Object Categories.” IEEE Transactions on Pattern Analysis and Machine Intelligence. IEEE, 2013. https://doi.org/10.1109/TPAMI.2013.140.","ista":"Lampert C, Nickisch H, Harmeling S. 2013. Attribute-based classification for zero-shot learning of object categories. IEEE Transactions on Pattern Analysis and Machine Intelligence. 36(3), 453–465.","mla":"Lampert, Christoph, et al. “Attribute-Based Classification for Zero-Shot Learning of Object Categories.” IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 36, no. 3, IEEE, 2013, pp. 453–65, doi:10.1109/TPAMI.2013.140.","apa":"Lampert, C., Nickisch, H., & Harmeling, S. (2013). Attribute-based classification for zero-shot learning of object categories. IEEE Transactions on Pattern Analysis and Machine Intelligence. IEEE. https://doi.org/10.1109/TPAMI.2013.140","ama":"Lampert C, Nickisch H, Harmeling S. Attribute-based classification for zero-shot learning of object categories. IEEE Transactions on Pattern Analysis and Machine Intelligence. 2013;36(3):453-465. doi:10.1109/TPAMI.2013.140","ieee":"C. Lampert, H. Nickisch, and S. Harmeling, “Attribute-based classification for zero-shot learning of object categories,” IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 36, no. 3. IEEE, pp. 453–465, 2013.","short":"C. Lampert, H. Nickisch, S. Harmeling, IEEE Transactions on Pattern Analysis and Machine Intelligence 36 (2013) 453–465."},"date_updated":"2021-01-12T06:57:58Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publist_id":"4385","author":[{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Nickisch, Hannes","last_name":"Nickisch","first_name":"Hannes"},{"first_name":"Stefan","full_name":"Harmeling, Stefan","last_name":"Harmeling"}],"department":[{"_id":"ChLa"}],"title":"Attribute-based classification for zero-shot learning of object categories","_id":"2516","type":"journal_article","status":"public","year":"2013","publication_status":"published","day":"30","language":[{"iso":"eng"}],"publication":"IEEE Transactions on Pattern Analysis and Machine Intelligence","page":"453 - 465","issue":"3","volume":36,"doi":"10.1109/TPAMI.2013.140","date_published":"2013-07-30T00:00:00Z","date_created":"2018-12-11T11:58:08Z","abstract":[{"text":"We study the problem of object recognition for categories for which we have no training examples, a task also called zero-data or zero-shot learning. This situation has hardly been studied in computer vision research, even though it occurs frequently: the world contains tens of thousands of different object classes and for only few of them image collections have been formed and suitably annotated. To tackle the problem we introduce attribute-based classification: objects are identified based on a high-level description that is phrased in terms of semantic attributes, such as the object's color or shape. Because the identification of each such property transcends the specific learning task at hand, the attribute classifiers can be pre-learned independently, e.g. from existing image datasets unrelated to the current task. Afterwards, new classes can be detected based on their attribute representation, without the need for a new training phase. In this paper we also introduce a new dataset, Animals with Attributes, of over 30,000 images of 50 animal classes, annotated with 85 semantic attributes. Extensive experiments on this and two more datasets show that attribute-based classification indeed is able to categorize images without access to any training images of the target classes.","lang":"eng"}],"oa_version":"None","quality_controlled":"1","scopus_import":1,"publisher":"IEEE","month":"07","intvolume":" 36"},{"_id":"2520","type":"conference","conference":{"start_date":"2013-07-11","end_date":"2013-07-15","location":"Bellevue, WA, United States","name":"UAI: Uncertainty in Artificial Intelligence"},"status":"public","pubrep_id":"137","date_updated":"2023-02-23T10:46:36Z","ddc":["000"],"file_date_updated":"2020-07-14T12:45:42Z","department":[{"_id":"ChLa"}],"abstract":[{"text":"We propose a probabilistic model to infer supervised latent variables in\r\nthe Hamming space from observed data. Our model allows simultaneous\r\ninference of the number of binary latent variables, and their values. The\r\nlatent variables preserve neighbourhood structure of the data in a sense\r\nthat objects in the same semantic concept have similar latent values, and\r\nobjects in different concepts have dissimilar latent values. We formulate\r\nthe supervised infinite latent variable problem based on an intuitive\r\nprinciple of pulling objects together if they are of the same type, and\r\npushing them apart if they are not. We then combine this principle with a\r\nflexible Indian Buffet Process prior on the latent variables. We show that\r\nthe inferred supervised latent variables can be directly used to perform a\r\nnearest neighbour search for the purpose of retrieval. We introduce a new\r\napplication of dynamically extending hash codes, and show how to\r\neffectively couple the structure of the hash codes with continuously\r\ngrowing structure of the neighbourhood preserving infinite latent feature\r\nspace.","lang":"eng"}],"oa_version":"Submitted Version","scopus_import":1,"month":"07","publication_identifier":{"isbn":["9780974903996"]},"publication_status":"published","file":[{"access_level":"open_access","relation":"main_file","content_type":"application/pdf","checksum":"325f20c4b926bd74d39006b97df572bd","file_id":"5134","creator":"system","date_updated":"2020-07-14T12:45:42Z","file_size":1117100,"date_created":"2018-12-12T10:15:16Z","file_name":"IST-2013-137-v1+1_QuaShaKnoGha13.pdf"}],"language":[{"iso":"eng"}],"citation":{"short":"N. Quadrianto, V. Sharmanska, D. Knowles, Z. Ghahramani, in:, Proceedings of the 29th Conference Uncertainty in Artificial Intelligence, AUAI Press, 2013, pp. 527–536.","ieee":"N. Quadrianto, V. Sharmanska, D. Knowles, and Z. Ghahramani, “The supervised IBP: Neighbourhood preserving infinite latent feature models,” in Proceedings of the 29th conference uncertainty in Artificial Intelligence, Bellevue, WA, United States, 2013, pp. 527–536.","ama":"Quadrianto N, Sharmanska V, Knowles D, Ghahramani Z. The supervised IBP: Neighbourhood preserving infinite latent feature models. In: Proceedings of the 29th Conference Uncertainty in Artificial Intelligence. AUAI Press; 2013:527-536.","apa":"Quadrianto, N., Sharmanska, V., Knowles, D., & Ghahramani, Z. (2013). The supervised IBP: Neighbourhood preserving infinite latent feature models. In Proceedings of the 29th conference uncertainty in Artificial Intelligence (pp. 527–536). Bellevue, WA, United States: AUAI Press.","mla":"Quadrianto, Novi, et al. “The Supervised IBP: Neighbourhood Preserving Infinite Latent Feature Models.” Proceedings of the 29th Conference Uncertainty in Artificial Intelligence, AUAI Press, 2013, pp. 527–36.","ista":"Quadrianto N, Sharmanska V, Knowles D, Ghahramani Z. 2013. The supervised IBP: Neighbourhood preserving infinite latent feature models. Proceedings of the 29th conference uncertainty in Artificial Intelligence. UAI: Uncertainty in Artificial Intelligence, 527–536.","chicago":"Quadrianto, Novi, Viktoriia Sharmanska, David Knowles, and Zoubin Ghahramani. “The Supervised IBP: Neighbourhood Preserving Infinite Latent Feature Models.” In Proceedings of the 29th Conference Uncertainty in Artificial Intelligence, 527–36. AUAI Press, 2013."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publist_id":"4381","author":[{"last_name":"Quadrianto","full_name":"Quadrianto, Novi","first_name":"Novi"},{"first_name":"Viktoriia","id":"2EA6D09E-F248-11E8-B48F-1D18A9856A87","full_name":"Sharmanska, Viktoriia","orcid":"0000-0003-0192-9308","last_name":"Sharmanska"},{"first_name":"David","last_name":"Knowles","full_name":"Knowles, David"},{"last_name":"Ghahramani","full_name":"Ghahramani, Zoubin","first_name":"Zoubin"}],"title":"The supervised IBP: Neighbourhood preserving infinite latent feature models","quality_controlled":"1","publisher":"AUAI Press","oa":1,"has_accepted_license":"1","year":"2013","day":"11","publication":"Proceedings of the 29th conference uncertainty in Artificial Intelligence","page":"527 - 536","date_published":"2013-07-11T00:00:00Z","date_created":"2018-12-11T11:58:09Z"},{"date_created":"2018-12-11T12:00:14Z","volume":31,"date_published":"2013-01-01T00:00:00Z","page":"161 - 169","language":[{"iso":"eng"}],"day":"01","publication_status":"published","year":"2013","intvolume":" 31","month":"01","oa":1,"main_file_link":[{"open_access":"1","url":"http://jmlr.org/proceedings/papers/v31/chen13a.html"}],"alternative_title":[" JMLR: W&CP"],"scopus_import":1,"publisher":"JMLR","quality_controlled":"1","oa_version":"None","abstract":[{"lang":"eng","text":" We introduce the M-modes problem for graphical models: predicting the M label configurations of highest probability that are at the same time local maxima of the probability landscape. M-modes have multiple possible applications: because they are intrinsically diverse, they provide a principled alternative to non-maximum suppression techniques for structured prediction, they can act as codebook vectors for quantizing the configuration space, or they can form component centers for mixture model approximation. We present two algorithms for solving the M-modes problem. The first algorithm solves the problem in polynomial time when the underlying graphical model is a simple chain. The second algorithm solves the problem for junction chains. In synthetic and real dataset, we demonstrate how M-modes can improve the performance of prediction. We also use the generated modes as a tool to understand the topography of the probability distribution of configurations, for example with relation to the training set size and amount of noise in the data. "}],"department":[{"_id":"HeEd"},{"_id":"VlKo"},{"_id":"ChLa"}],"title":"Computing the M most probable modes of a graphical model","publist_id":"3846","author":[{"first_name":"Chao","id":"3E92416E-F248-11E8-B48F-1D18A9856A87","last_name":"Chen","full_name":"Chen, Chao"},{"full_name":"Kolmogorov, Vladimir","last_name":"Kolmogorov","id":"3D50B0BA-F248-11E8-B48F-1D18A9856A87","first_name":"Vladimir"},{"first_name":"Zhu","full_name":"Yan, Zhu","last_name":"Yan"},{"first_name":"Dimitris","full_name":"Metaxas, Dimitris","last_name":"Metaxas"},{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"mla":"Chen, Chao, et al. Computing the M Most Probable Modes of a Graphical Model. Vol. 31, JMLR, 2013, pp. 161–69.","apa":"Chen, C., Kolmogorov, V., Yan, Z., Metaxas, D., & Lampert, C. (2013). Computing the M most probable modes of a graphical model (Vol. 31, pp. 161–169). Presented at the AISTATS: Conference on Uncertainty in Artificial Intelligence, Scottsdale, AZ, United States: JMLR.","ama":"Chen C, Kolmogorov V, Yan Z, Metaxas D, Lampert C. Computing the M most probable modes of a graphical model. In: Vol 31. JMLR; 2013:161-169.","ieee":"C. Chen, V. Kolmogorov, Z. Yan, D. Metaxas, and C. Lampert, “Computing the M most probable modes of a graphical model,” presented at the AISTATS: Conference on Uncertainty in Artificial Intelligence, Scottsdale, AZ, United States, 2013, vol. 31, pp. 161–169.","short":"C. Chen, V. Kolmogorov, Z. Yan, D. Metaxas, C. Lampert, in:, JMLR, 2013, pp. 161–169.","chicago":"Chen, Chao, Vladimir Kolmogorov, Zhu Yan, Dimitris Metaxas, and Christoph Lampert. “Computing the M Most Probable Modes of a Graphical Model,” 31:161–69. JMLR, 2013.","ista":"Chen C, Kolmogorov V, Yan Z, Metaxas D, Lampert C. 2013. Computing the M most probable modes of a graphical model. AISTATS: Conference on Uncertainty in Artificial Intelligence, JMLR: W&CP, vol. 31, 161–169."},"date_updated":"2021-01-12T07:00:35Z","status":"public","conference":{"location":"Scottsdale, AZ, United States","end_date":"2013-05-01","start_date":"2013-04-29","name":" AISTATS: Conference on Uncertainty in Artificial Intelligence"},"type":"conference","_id":"2901"},{"abstract":[{"text":"Many visual datasets are traditionally used to analyze the performance of different learning techniques. The evaluation is usually done within each dataset, therefore it is questionable if such results are a reliable indicator of true generalization ability. We propose here an algorithm to exploit the existing data resources when learning on a new multiclass problem. Our main idea is to identify an image representation that decomposes orthogonally into two subspaces: a part specific to each dataset, and a part generic to, and therefore shared between, all the considered source sets. This allows us to use the generic representation as un-biased reference knowledge for a novel classification task. By casting the method in the multi-view setting, we also make it possible to use different features for different databases. We call the algorithm MUST, Multitask Unaligned Shared knowledge Transfer. Through extensive experiments on five public datasets, we show that MUST consistently improves the cross-datasets generalization performance.","lang":"eng"}],"oa_version":"Submitted Version","alternative_title":["LNCS"],"scopus_import":1,"intvolume":" 7724","month":"04","publication_status":"published","language":[{"iso":"eng"}],"file":[{"date_created":"2019-01-22T14:03:11Z","file_name":"2012_ACCV_Tommasi.pdf","creator":"dernst","date_updated":"2020-07-14T12:45:55Z","file_size":1513620,"checksum":"a0a7234a89e2192af655b0d0ae3bf445","file_id":"5874","access_level":"open_access","relation":"main_file","content_type":"application/pdf"}],"volume":7724,"_id":"2948","series_title":"Lecture Notes in Computer Science","conference":{"location":"Daejeon, Korea","end_date":"2012-11-09","start_date":"2012-11-05","name":"ACCV: Asian Conference on Computer Vision"},"type":"conference","status":"public","date_updated":"2020-08-11T10:09:54Z","ddc":["000"],"department":[{"_id":"ChLa"}],"file_date_updated":"2020-07-14T12:45:55Z","acknowledgement":"This work was supported by the PASCAL 2 Network of Excellence (TT) and by the Newton International Fellowship (NQ)","oa":1,"publisher":"Springer","quality_controlled":"1","year":"2013","has_accepted_license":"1","day":"04","page":"1 - 15","date_created":"2018-12-11T12:00:30Z","date_published":"2013-04-04T00:00:00Z","doi":"10.1007/978-3-642-37331-2_1","citation":{"ista":"Tommasi T, Quadrianto N, Caputo B, Lampert C. 2013. Beyond dataset bias: Multi-task unaligned shared knowledge transfer. 7724, 1–15.","chicago":"Tommasi, Tatiana, Novi Quadrianto, Barbara Caputo, and Christoph Lampert. “Beyond Dataset Bias: Multi-Task Unaligned Shared Knowledge Transfer.” Lecture Notes in Computer Science. Springer, 2013. https://doi.org/10.1007/978-3-642-37331-2_1.","apa":"Tommasi, T., Quadrianto, N., Caputo, B., & Lampert, C. (2013). Beyond dataset bias: Multi-task unaligned shared knowledge transfer. Presented at the ACCV: Asian Conference on Computer Vision, Daejeon, Korea: Springer. https://doi.org/10.1007/978-3-642-37331-2_1","ama":"Tommasi T, Quadrianto N, Caputo B, Lampert C. Beyond dataset bias: Multi-task unaligned shared knowledge transfer. 2013;7724:1-15. doi:10.1007/978-3-642-37331-2_1","short":"T. Tommasi, N. Quadrianto, B. Caputo, C. Lampert, 7724 (2013) 1–15.","ieee":"T. Tommasi, N. Quadrianto, B. Caputo, and C. Lampert, “Beyond dataset bias: Multi-task unaligned shared knowledge transfer,” vol. 7724. Springer, pp. 1–15, 2013.","mla":"Tommasi, Tatiana, et al. Beyond Dataset Bias: Multi-Task Unaligned Shared Knowledge Transfer. Vol. 7724, Springer, 2013, pp. 1–15, doi:10.1007/978-3-642-37331-2_1."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publist_id":"3784","author":[{"full_name":"Tommasi, Tatiana","last_name":"Tommasi","first_name":"Tatiana"},{"first_name":"Novi","last_name":"Quadrianto","full_name":"Quadrianto, Novi"},{"last_name":"Caputo","full_name":"Caputo, Barbara","first_name":"Barbara"},{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"}],"title":"Beyond dataset bias: Multi-task unaligned shared knowledge transfer"},{"publist_id":"3314","author":[{"full_name":"Quadrianto, Novi","last_name":"Quadrianto","first_name":"Novi"},{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"}],"title":"Kernel based learning","editor":[{"last_name":"Dubitzky","full_name":"Dubitzky, Werner","first_name":"Werner"},{"first_name":"Olaf","full_name":"Wolkenhauer, Olaf","last_name":"Wolkenhauer"},{"last_name":"Cho","full_name":"Cho, Kwang","first_name":"Kwang"},{"full_name":"Yokota, Hiroki","last_name":"Yokota","first_name":"Hiroki"}],"department":[{"_id":"ChLa"}],"citation":{"mla":"Quadrianto, Novi, and Christoph Lampert. “Kernel Based Learning.” Encyclopedia of Systems Biology, edited by Werner Dubitzky et al., vol. 3, Springer, 2013, pp. 1069–1069, doi:10.1007/978-1-4419-9863-7_604.","ama":"Quadrianto N, Lampert C. Kernel based learning. In: Dubitzky W, Wolkenhauer O, Cho K, Yokota H, eds. Encyclopedia of Systems Biology. Vol 3. Springer; 2013:1069-1069. doi:10.1007/978-1-4419-9863-7_604","apa":"Quadrianto, N., & Lampert, C. (2013). Kernel based learning. In W. Dubitzky, O. Wolkenhauer, K. Cho, & H. Yokota (Eds.), Encyclopedia of Systems Biology (Vol. 3, pp. 1069–1069). Springer. https://doi.org/10.1007/978-1-4419-9863-7_604","ieee":"N. Quadrianto and C. Lampert, “Kernel based learning,” in Encyclopedia of Systems Biology, vol. 3, W. Dubitzky, O. Wolkenhauer, K. Cho, and H. Yokota, Eds. Springer, 2013, pp. 1069–1069.","short":"N. Quadrianto, C. Lampert, in:, W. Dubitzky, O. Wolkenhauer, K. Cho, H. Yokota (Eds.), Encyclopedia of Systems Biology, Springer, 2013, pp. 1069–1069.","chicago":"Quadrianto, Novi, and Christoph Lampert. “Kernel Based Learning.” In Encyclopedia of Systems Biology, edited by Werner Dubitzky, Olaf Wolkenhauer, Kwang Cho, and Hiroki Yokota, 3:1069–1069. Springer, 2013. https://doi.org/10.1007/978-1-4419-9863-7_604.","ista":"Quadrianto N, Lampert C. 2013.Kernel based learning. In: Encyclopedia of Systems Biology. vol. 3, 1069–1069."},"date_updated":"2021-01-12T07:42:38Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"encyclopedia_article","status":"public","_id":"3321","page":"1069 - 1069","doi":"10.1007/978-1-4419-9863-7_604","volume":3,"date_published":"2013-01-01T00:00:00Z","date_created":"2018-12-11T12:02:39Z","publication_status":"published","year":"2013","day":"01","publication":"Encyclopedia of Systems Biology","language":[{"iso":"eng"}],"quality_controlled":"1","publisher":"Springer","month":"01","intvolume":" 3","oa_version":"None"},{"page":"82 - 90","date_created":"2018-12-11T11:59:48Z","volume":1,"date_published":"2012-12-01T00:00:00Z","year":"2012","publication_status":"published","language":[{"iso":"eng"}],"day":"01","quality_controlled":"1","publisher":"Neural Information Processing Systems","scopus_import":1,"intvolume":" 1","month":"12","abstract":[{"lang":"eng","text":"We study the problem of maximum marginal prediction (MMP) in probabilistic graphical models, a task that occurs, for example, as the Bayes optimal decision rule under a Hamming loss. MMP is typically performed as a two-stage procedure: one estimates each variable's marginal probability and then forms a prediction from the states of maximal probability. In this work we propose a simple yet effective technique for accelerating MMP when inference is sampling-based: instead of the above two-stage procedure we directly estimate the posterior probability of each decision variable. This allows us to identify the point of time when we are sufficiently certain about any individual decision. Whenever this is the case, we dynamically prune the variables we are confident about from the underlying factor graph. Consequently, at any time only samples of variables whose decision is still uncertain need to be created. Experiments in two prototypical scenarios, multi-label classification and image inpainting, show that adaptive sampling can drastically accelerate MMP without sacrificing prediction accuracy."}],"oa_version":"None","publist_id":"3975","author":[{"first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","last_name":"Lampert"}],"department":[{"_id":"ChLa"}],"title":"Dynamic pruning of factor graphs for maximum marginal prediction","date_updated":"2021-01-12T06:59:59Z","citation":{"chicago":"Lampert, Christoph. “Dynamic Pruning of Factor Graphs for Maximum Marginal Prediction,” 1:82–90. Neural Information Processing Systems, 2012.","ista":"Lampert C. 2012. Dynamic pruning of factor graphs for maximum marginal prediction. NIPS: Neural Information Processing Systems vol. 1, 82–90.","mla":"Lampert, Christoph. Dynamic Pruning of Factor Graphs for Maximum Marginal Prediction. Vol. 1, Neural Information Processing Systems, 2012, pp. 82–90.","short":"C. Lampert, in:, Neural Information Processing Systems, 2012, pp. 82–90.","ieee":"C. Lampert, “Dynamic pruning of factor graphs for maximum marginal prediction,” presented at the NIPS: Neural Information Processing Systems, Lake Tahoe, NV, United States, 2012, vol. 1, pp. 82–90.","apa":"Lampert, C. (2012). Dynamic pruning of factor graphs for maximum marginal prediction (Vol. 1, pp. 82–90). Presented at the NIPS: Neural Information Processing Systems, Lake Tahoe, NV, United States: Neural Information Processing Systems.","ama":"Lampert C. Dynamic pruning of factor graphs for maximum marginal prediction. In: Vol 1. Neural Information Processing Systems; 2012:82-90."},"user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","conference":{"name":"NIPS: Neural Information Processing Systems","start_date":"2012-12-03","end_date":"2012-12-06","location":"Lake Tahoe, NV, United States"},"type":"conference","status":"public","_id":"2825"},{"status":"public","type":"journal_article","_id":"3164","title":"Guest editorial: Special issue on structured prediction and inference","department":[{"_id":"ChLa"}],"publist_id":"3521","author":[{"full_name":"Blaschko, Matthew","last_name":"Blaschko","first_name":"Matthew"},{"full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","last_name":"Lampert","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"}],"user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","citation":{"ista":"Blaschko M, Lampert C. 2012. Guest editorial: Special issue on structured prediction and inference. International Journal of Computer Vision. 99(3), 257–258.","chicago":"Blaschko, Matthew, and Christoph Lampert. “Guest Editorial: Special Issue on Structured Prediction and Inference.” International Journal of Computer Vision. Springer, 2012. https://doi.org/10.1007/s11263-012-0530-y.","apa":"Blaschko, M., & Lampert, C. (2012). Guest editorial: Special issue on structured prediction and inference. International Journal of Computer Vision. Springer. https://doi.org/10.1007/s11263-012-0530-y","ama":"Blaschko M, Lampert C. Guest editorial: Special issue on structured prediction and inference. International Journal of Computer Vision. 2012;99(3):257-258. doi:10.1007/s11263-012-0530-y","ieee":"M. Blaschko and C. Lampert, “Guest editorial: Special issue on structured prediction and inference,” International Journal of Computer Vision, vol. 99, no. 3. Springer, pp. 257–258, 2012.","short":"M. Blaschko, C. Lampert, International Journal of Computer Vision 99 (2012) 257–258.","mla":"Blaschko, Matthew, and Christoph Lampert. “Guest Editorial: Special Issue on Structured Prediction and Inference.” International Journal of Computer Vision, vol. 99, no. 3, Springer, 2012, pp. 257–58, doi:10.1007/s11263-012-0530-y."},"date_updated":"2021-01-12T07:41:30Z","intvolume":" 99","month":"09","scopus_import":1,"quality_controlled":"1","publisher":"Springer","oa_version":"None","abstract":[{"text":"Overview of the Special Issue on structured prediction and inference.","lang":"eng"}],"date_created":"2018-12-11T12:01:46Z","doi":"10.1007/s11263-012-0530-y","issue":"3","volume":99,"date_published":"2012-09-01T00:00:00Z","page":"257 - 258","publication":"International Journal of Computer Vision","language":[{"iso":"eng"}],"day":"01","publication_status":"published","year":"2012"},{"intvolume":" 7576","month":"10","scopus_import":1,"alternative_title":["LNCS"],"oa_version":"Submitted Version","abstract":[{"lang":"eng","text":"We propose a new learning method to infer a mid-level feature representation that combines the advantage of semantic attribute representations with the higher expressive power of non-semantic features. The idea lies in augmenting an existing attribute-based representation with additional dimensions for which an autoencoder model is coupled with a large-margin principle. This construction allows a smooth transition between the zero-shot regime with no training example, the unsupervised regime with training examples but without class labels, and the supervised regime with training examples and with class labels. The resulting optimization problem can be solved efficiently, because several of the necessity steps have closed-form solutions. Through extensive experiments we show that the augmented representation achieves better results in terms of object categorization accuracy than the semantic representation alone."}],"issue":"PART 5","volume":7576,"language":[{"iso":"eng"}],"file":[{"file_name":"2012_ECCV_Sharmanska.pdf","date_created":"2020-05-15T12:29:04Z","file_size":6073897,"date_updated":"2020-07-14T12:46:00Z","creator":"dernst","file_id":"7861","checksum":"bccdbe0663780d25a1e0524002b2d896","content_type":"application/pdf","relation":"main_file","access_level":"open_access"}],"publication_status":"published","status":"public","conference":{"start_date":"2012-10-07","location":"Florence, Italy","end_date":"2012-10-13","name":"ECCV: European Conference on Computer Vision"},"type":"conference","_id":"3125","file_date_updated":"2020-07-14T12:46:00Z","department":[{"_id":"ChLa"}],"ddc":["000"],"date_updated":"2023-02-23T11:13:25Z","oa":1,"publisher":"Springer","quality_controlled":"1","date_created":"2018-12-11T12:01:32Z","date_published":"2012-10-01T00:00:00Z","doi":"10.1007/978-3-642-33715-4_18","page":"242 - 255","day":"01","year":"2012","has_accepted_license":"1","title":"Augmented attribute representations","article_processing_charge":"No","author":[{"last_name":"Sharmanska","full_name":"Sharmanska, Viktoriia","orcid":"0000-0003-0192-9308","first_name":"Viktoriia","id":"2EA6D09E-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Quadrianto, Novi","last_name":"Quadrianto","first_name":"Novi"},{"first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887"}],"publist_id":"3574","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"ista":"Sharmanska V, Quadrianto N, Lampert C. 2012. Augmented attribute representations. ECCV: European Conference on Computer Vision, LNCS, vol. 7576, 242–255.","chicago":"Sharmanska, Viktoriia, Novi Quadrianto, and Christoph Lampert. “Augmented Attribute Representations,” 7576:242–55. Springer, 2012. https://doi.org/10.1007/978-3-642-33715-4_18.","apa":"Sharmanska, V., Quadrianto, N., & Lampert, C. (2012). Augmented attribute representations (Vol. 7576, pp. 242–255). Presented at the ECCV: European Conference on Computer Vision, Florence, Italy: Springer. https://doi.org/10.1007/978-3-642-33715-4_18","ama":"Sharmanska V, Quadrianto N, Lampert C. Augmented attribute representations. In: Vol 7576. Springer; 2012:242-255. doi:10.1007/978-3-642-33715-4_18","ieee":"V. Sharmanska, N. Quadrianto, and C. Lampert, “Augmented attribute representations,” presented at the ECCV: European Conference on Computer Vision, Florence, Italy, 2012, vol. 7576, no. PART 5, pp. 242–255.","short":"V. Sharmanska, N. Quadrianto, C. Lampert, in:, Springer, 2012, pp. 242–255.","mla":"Sharmanska, Viktoriia, et al. Augmented Attribute Representations. Vol. 7576, no. PART 5, Springer, 2012, pp. 242–55, doi:10.1007/978-3-642-33715-4_18."}},{"abstract":[{"text":"In this work we propose a new information-theoretic clustering algorithm that infers cluster memberships by direct optimization of a non-parametric mutual information estimate between data distribution and cluster assignment. Although the optimization objective has a solid theoretical foundation it is hard to optimize. We propose an approximate optimization formulation that leads to an efficient algorithm with low runtime complexity. The algorithm has a single free parameter, the number of clusters to find. We demonstrate superior performance on several synthetic and real datasets.\r\n","lang":"eng"}],"oa_version":"None","quality_controlled":"1","scopus_import":1,"publisher":"Springer","alternative_title":["LNCS"],"intvolume":" 7476","month":"08","publication_status":"published","year":"2012","language":[{"iso":"eng"}],"day":"14","page":"205 - 215","date_created":"2018-12-11T12:01:32Z","doi":"10.1007/978-3-642-32717-9_21","date_published":"2012-08-14T00:00:00Z","volume":7476,"_id":"3126","conference":{"start_date":"2012-08-28","location":"Graz, Austria","end_date":"2012-08-31","name":"DAGM: German Association For Pattern Recognition"},"type":"conference","status":"public","date_updated":"2021-01-12T07:41:14Z","citation":{"chicago":"Müller, Andreas, Sebastian Nowozin, and Christoph Lampert. “Information Theoretic Clustering Using Minimal Spanning Trees,” 7476:205–15. Springer, 2012. https://doi.org/10.1007/978-3-642-32717-9_21.","ista":"Müller A, Nowozin S, Lampert C. 2012. Information theoretic clustering using minimal spanning trees. DAGM: German Association For Pattern Recognition, LNCS, vol. 7476, 205–215.","mla":"Müller, Andreas, et al. Information Theoretic Clustering Using Minimal Spanning Trees. Vol. 7476, Springer, 2012, pp. 205–15, doi:10.1007/978-3-642-32717-9_21.","ama":"Müller A, Nowozin S, Lampert C. Information theoretic clustering using minimal spanning trees. In: Vol 7476. Springer; 2012:205-215. doi:10.1007/978-3-642-32717-9_21","apa":"Müller, A., Nowozin, S., & Lampert, C. (2012). Information theoretic clustering using minimal spanning trees (Vol. 7476, pp. 205–215). Presented at the DAGM: German Association For Pattern Recognition, Graz, Austria: Springer. https://doi.org/10.1007/978-3-642-32717-9_21","short":"A. Müller, S. Nowozin, C. Lampert, in:, Springer, 2012, pp. 205–215.","ieee":"A. Müller, S. Nowozin, and C. Lampert, “Information theoretic clustering using minimal spanning trees,” presented at the DAGM: German Association For Pattern Recognition, Graz, Austria, 2012, vol. 7476, pp. 205–215."},"user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","publist_id":"3573","author":[{"last_name":"Müller","full_name":"Müller, Andreas","first_name":"Andreas"},{"first_name":"Sebastian","last_name":"Nowozin","full_name":"Nowozin, Sebastian"},{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"}],"department":[{"_id":"ChLa"}],"title":"Information theoretic clustering using minimal spanning trees"},{"citation":{"apa":"Lampert, C., & Peters, J. (2012). Real-time detection of colored objects in multiple camera streams with off-the-shelf hardware components. Journal of Real-Time Image Processing. Springer. https://doi.org/10.1007/s11554-010-0168-3","ama":"Lampert C, Peters J. Real-time detection of colored objects in multiple camera streams with off-the-shelf hardware components. Journal of Real-Time Image Processing. 2012;7(1):31-41. doi:10.1007/s11554-010-0168-3","short":"C. Lampert, J. Peters, Journal of Real-Time Image Processing 7 (2012) 31–41.","ieee":"C. Lampert and J. Peters, “Real-time detection of colored objects in multiple camera streams with off-the-shelf hardware components,” Journal of Real-Time Image Processing, vol. 7, no. 1. Springer, pp. 31–41, 2012.","mla":"Lampert, Christoph, and Jan Peters. “Real-Time Detection of Colored Objects in Multiple Camera Streams with off-the-Shelf Hardware Components.” Journal of Real-Time Image Processing, vol. 7, no. 1, Springer, 2012, pp. 31–41, doi:10.1007/s11554-010-0168-3.","ista":"Lampert C, Peters J. 2012. Real-time detection of colored objects in multiple camera streams with off-the-shelf hardware components. Journal of Real-Time Image Processing. 7(1), 31–41.","chicago":"Lampert, Christoph, and Jan Peters. “Real-Time Detection of Colored Objects in Multiple Camera Streams with off-the-Shelf Hardware Components.” Journal of Real-Time Image Processing. Springer, 2012. https://doi.org/10.1007/s11554-010-0168-3."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_processing_charge":"No","author":[{"first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","last_name":"Lampert"},{"last_name":"Peters","full_name":"Peters, Jan","first_name":"Jan"}],"publist_id":"3417","title":"Real-time detection of colored objects in multiple camera streams with off-the-shelf hardware components","oa":1,"publisher":"Springer","quality_controlled":"1","year":"2012","has_accepted_license":"1","publication":"Journal of Real-Time Image Processing","day":"01","page":"31 - 41","date_created":"2018-12-11T12:02:15Z","doi":"10.1007/s11554-010-0168-3","date_published":"2012-03-01T00:00:00Z","_id":"3248","article_type":"original","type":"journal_article","status":"public","date_updated":"2022-05-24T08:05:40Z","ddc":["000"],"department":[{"_id":"ChLa"}],"file_date_updated":"2020-07-14T12:46:04Z","abstract":[{"lang":"eng","text":"We describe RTblob, a high speed vision system that detects objects in cluttered scenes based on their color and shape at a speed of over 800 frames/s. Because the system is available as open-source software and relies only on off-the-shelf PC hardware components, it can provide the basis for multiple application scenarios. As an illustrative example, we show how RTblob can be used in a robotic table tennis scenario to estimate ball trajectories through 3D space simultaneously from four cameras images at a speed of 200 Hz."}],"oa_version":"Submitted Version","scopus_import":"1","intvolume":" 7","month":"03","publication_status":"published","publication_identifier":{"eissn":["1861-8219"],"issn":["1861-8200"]},"language":[{"iso":"eng"}],"file":[{"checksum":"241be47ea50e81a283bcf4c45b07e8cc","file_id":"5958","relation":"main_file","access_level":"open_access","content_type":"application/pdf","file_name":"2012_Springer_Lampert.pdf","date_created":"2019-02-12T10:52:25Z","creator":"kschuh","file_size":2933187,"date_updated":"2020-07-14T12:46:04Z"}],"volume":7,"issue":"1"},{"abstract":[{"lang":"eng","text":"We consider the problem of inference in a graphical model with binary variables. While in theory it is arguably preferable to compute marginal probabilities, in practice researchers often use MAP inference due to the availability of efficient discrete optimization algorithms. We bridge the gap between the two approaches by introducing the Discrete Marginals technique in which approximate marginals are obtained by minimizing an objective function with unary and pairwise terms over a discretized domain. This allows the use of techniques originally developed for MAP-MRF inference and learning. We explore two ways to set up the objective function - by discretizing the Bethe free energy and by learning it from training data. Experimental results show that for certain types of graphs a learned function can outperform the Bethe approximation. We also establish a link between the Bethe free energy and submodular functions.\r\n"}],"oa_version":"Submitted Version","oa":1,"alternative_title":["Inferning 2012"],"publisher":"ICML","quality_controlled":"1","month":"06","publication_status":"published","year":"2012","has_accepted_license":"1","language":[{"iso":"eng"}],"day":"30","file":[{"creator":"system","date_updated":"2020-07-14T12:46:00Z","file_size":305836,"date_created":"2018-12-12T10:11:34Z","file_name":"IST-2016-565-v1+1_DM-inferning2012.pdf","access_level":"open_access","relation":"main_file","content_type":"application/pdf","file_id":"4889","checksum":"3d0d4246548c736857302aadb2ff5d15"}],"date_created":"2018-12-11T12:01:31Z","date_published":"2012-06-30T00:00:00Z","related_material":{"record":[{"id":"5396","status":"public","relation":"later_version"}]},"_id":"3124","conference":{"start_date":"2012-06-26","location":"Edinburgh, Scotland","end_date":"2012-07-01","name":"ICML: International Conference on Machine Learning"},"type":"conference","pubrep_id":"565","status":"public","citation":{"mla":"Korc, Filip, et al. Approximating Marginals Using Discrete Energy Minimization. ICML, 2012.","ieee":"F. Korc, V. Kolmogorov, and C. Lampert, “Approximating marginals using discrete energy minimization,” presented at the ICML: International Conference on Machine Learning, Edinburgh, Scotland, 2012.","short":"F. Korc, V. Kolmogorov, C. Lampert, in:, ICML, 2012.","ama":"Korc F, Kolmogorov V, Lampert C. Approximating marginals using discrete energy minimization. In: ICML; 2012.","apa":"Korc, F., Kolmogorov, V., & Lampert, C. (2012). Approximating marginals using discrete energy minimization. Presented at the ICML: International Conference on Machine Learning, Edinburgh, Scotland: ICML.","chicago":"Korc, Filip, Vladimir Kolmogorov, and Christoph Lampert. “Approximating Marginals Using Discrete Energy Minimization.” ICML, 2012.","ista":"Korc F, Kolmogorov V, Lampert C. 2012. Approximating marginals using discrete energy minimization. ICML: International Conference on Machine Learning, Inferning 2012, ."},"date_updated":"2023-02-23T12:24:24Z","ddc":["000"],"user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","author":[{"last_name":"Korc","full_name":"Korc, Filip","id":"476A2FD6-F248-11E8-B48F-1D18A9856A87","first_name":"Filip"},{"full_name":"Kolmogorov, Vladimir","last_name":"Kolmogorov","first_name":"Vladimir","id":"3D50B0BA-F248-11E8-B48F-1D18A9856A87"},{"last_name":"Lampert","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"}],"publist_id":"3575","department":[{"_id":"ChLa"},{"_id":"VlKo"}],"title":"Approximating marginals using discrete energy minimization","file_date_updated":"2020-07-14T12:46:00Z"}]