[{"date_published":"2019-10-01T00:00:00Z","citation":{"mla":"Kolesnikov, Alexander, et al. “Detecting Visual Relationships Using Box Attention.” Proceedings of the 2019 International Conference on Computer Vision Workshop, 1749–1753, IEEE, 2019, doi:10.1109/ICCVW.2019.00217.","short":"A. Kolesnikov, A. Kuznetsova, C. Lampert, V. Ferrari, in:, Proceedings of the 2019 International Conference on Computer Vision Workshop, IEEE, 2019.","chicago":"Kolesnikov, Alexander, Alina Kuznetsova, Christoph Lampert, and Vittorio Ferrari. “Detecting Visual Relationships Using Box Attention.” In Proceedings of the 2019 International Conference on Computer Vision Workshop. IEEE, 2019. https://doi.org/10.1109/ICCVW.2019.00217.","ama":"Kolesnikov A, Kuznetsova A, Lampert C, Ferrari V. Detecting visual relationships using box attention. In: Proceedings of the 2019 International Conference on Computer Vision Workshop. IEEE; 2019. doi:10.1109/ICCVW.2019.00217","ista":"Kolesnikov A, Kuznetsova A, Lampert C, Ferrari V. 2019. Detecting visual relationships using box attention. Proceedings of the 2019 International Conference on Computer Vision Workshop. ICCVW: International Conference on Computer Vision Workshop, 1749–1753.","apa":"Kolesnikov, A., Kuznetsova, A., Lampert, C., & Ferrari, V. (2019). Detecting visual relationships using box attention. In Proceedings of the 2019 International Conference on Computer Vision Workshop. Seoul, South Korea: IEEE. https://doi.org/10.1109/ICCVW.2019.00217","ieee":"A. Kolesnikov, A. Kuznetsova, C. Lampert, and V. Ferrari, “Detecting visual relationships using box attention,” in Proceedings of the 2019 International Conference on Computer Vision Workshop, Seoul, South Korea, 2019."},"publication":"Proceedings of the 2019 International Conference on Computer Vision Workshop","article_processing_charge":"No","day":"01","scopus_import":"1","oa_version":"Preprint","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","_id":"7640","title":"Detecting visual relationships using box attention","status":"public","abstract":[{"text":"We propose a new model for detecting visual relationships, such as \"person riding motorcycle\" or \"bottle on table\". This task is an important step towards comprehensive structured mage understanding, going beyond detecting individual objects. Our main novelty is a Box Attention mechanism that allows to model pairwise interactions between objects using standard object detection pipelines. The resulting model is conceptually clean, expressive and relies on well-justified training and prediction procedures. Moreover, unlike previously proposed approaches, our model does not introduce any additional complex components or hyperparameters on top of those already required by the underlying detection model. We conduct an experimental evaluation on two datasets, V-COCO and Open Images, demonstrating strong quantitative and qualitative results.","lang":"eng"}],"type":"conference","doi":"10.1109/ICCVW.2019.00217","conference":{"start_date":"2019-10-27","location":"Seoul, South Korea","end_date":"2019-10-28","name":"ICCVW: International Conference on Computer Vision Workshop"},"language":[{"iso":"eng"}],"external_id":{"arxiv":["1807.02136"],"isi":["000554591601098"]},"main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/1807.02136"}],"oa":1,"project":[{"name":"Lifelong Learning of Visual Scene Understanding","call_identifier":"FP7","_id":"2532554C-B435-11E9-9278-68D0E5697425","grant_number":"308036"}],"isi":1,"quality_controlled":"1","publication_identifier":{"isbn":["9781728150239"]},"month":"10","author":[{"full_name":"Kolesnikov, Alexander","id":"2D157DB6-F248-11E8-B48F-1D18A9856A87","first_name":"Alexander","last_name":"Kolesnikov"},{"full_name":"Kuznetsova, Alina","last_name":"Kuznetsova","first_name":"Alina"},{"id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","first_name":"Christoph","last_name":"Lampert","full_name":"Lampert, Christoph"},{"full_name":"Ferrari, Vittorio","last_name":"Ferrari","first_name":"Vittorio"}],"date_updated":"2023-09-08T11:18:37Z","date_created":"2020-04-05T22:00:51Z","year":"2019","department":[{"_id":"ChLa"}],"publisher":"IEEE","publication_status":"published","ec_funded":1,"article_number":"1749-1753"},{"date_published":"2018-05-25T00:00:00Z","page":"113","citation":{"short":"A. Kolesnikov, Weakly-Supervised Segmentation and Unsupervised Modeling of Natural Images, Institute of Science and Technology Austria, 2018.","mla":"Kolesnikov, Alexander. Weakly-Supervised Segmentation and Unsupervised Modeling of Natural Images. Institute of Science and Technology Austria, 2018, doi:10.15479/AT:ISTA:th_1021.","chicago":"Kolesnikov, Alexander. “Weakly-Supervised Segmentation and Unsupervised Modeling of Natural Images.” Institute of Science and Technology Austria, 2018. https://doi.org/10.15479/AT:ISTA:th_1021.","ama":"Kolesnikov A. Weakly-Supervised Segmentation and Unsupervised Modeling of Natural Images. 2018. doi:10.15479/AT:ISTA:th_1021","ieee":"A. Kolesnikov, “Weakly-Supervised Segmentation and Unsupervised Modeling of Natural Images,” Institute of Science and Technology Austria, 2018.","apa":"Kolesnikov, A. (2018). Weakly-Supervised Segmentation and Unsupervised Modeling of Natural Images. Institute of Science and Technology Austria. https://doi.org/10.15479/AT:ISTA:th_1021","ista":"Kolesnikov A. 2018. Weakly-Supervised Segmentation and Unsupervised Modeling of Natural Images. Institute of Science and Technology Austria."},"article_processing_charge":"No","has_accepted_license":"1","day":"25","oa_version":"Published Version","file":[{"creator":"system","file_size":12918758,"content_type":"application/pdf","file_name":"IST-2018-1021-v1+1_thesis-unsigned-pdfa.pdf","access_level":"open_access","date_created":"2018-12-12T10:14:57Z","date_updated":"2020-07-14T12:45:22Z","checksum":"bc678e02468d8ebc39dc7267dfb0a1c4","file_id":"5113","relation":"main_file"},{"creator":"dernst","content_type":"application/zip","file_size":55973760,"file_name":"2018_Thesis_Kolesnikov_source.zip","access_level":"closed","date_created":"2019-04-05T09:34:49Z","date_updated":"2020-07-14T12:45:22Z","checksum":"bc66973b086da5a043f1162dcfb1fde4","file_id":"6225","relation":"source_file"}],"pubrep_id":"1021","status":"public","ddc":["004"],"title":"Weakly-Supervised Segmentation and Unsupervised Modeling of Natural Images","_id":"197","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","abstract":[{"lang":"eng","text":"Modern computer vision systems heavily rely on statistical machine learning models, which typically require large amounts of labeled data to be learned reliably. Moreover, very recently computer vision research widely adopted techniques for representation learning, which further increase the demand for labeled data. However, for many important practical problems there is relatively small amount of labeled data available, so it is problematic to leverage full potential of the representation learning methods. One way to overcome this obstacle is to invest substantial resources into producing large labelled datasets. Unfortunately, this can be prohibitively expensive in practice. In this thesis we focus on the alternative way of tackling the aforementioned issue. We concentrate on methods, which make use of weakly-labeled or even unlabeled data. Specifically, the first half of the thesis is dedicated to the semantic image segmentation task. We develop a technique, which achieves competitive segmentation performance and only requires annotations in a form of global image-level labels instead of dense segmentation masks. Subsequently, we present a new methodology, which further improves segmentation performance by leveraging tiny additional feedback from a human annotator. By using our methods practitioners can greatly reduce the amount of data annotation effort, which is required to learn modern image segmentation models. In the second half of the thesis we focus on methods for learning from unlabeled visual data. We study a family of autoregressive models for modeling structure of natural images and discuss potential applications of these models. Moreover, we conduct in-depth study of one of these applications, where we develop the state-of-the-art model for the probabilistic image colorization task."}],"alternative_title":["ISTA Thesis"],"type":"dissertation","language":[{"iso":"eng"}],"degree_awarded":"PhD","supervisor":[{"full_name":"Lampert, Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","first_name":"Christoph","last_name":"Lampert"}],"doi":"10.15479/AT:ISTA:th_1021","project":[{"_id":"2532554C-B435-11E9-9278-68D0E5697425","grant_number":"308036","name":"Lifelong Learning of Visual Scene Understanding","call_identifier":"FP7"}],"oa":1,"publication_identifier":{"issn":["2663-337X"]},"month":"05","date_updated":"2023-09-07T12:51:46Z","date_created":"2018-12-11T11:45:09Z","author":[{"full_name":"Kolesnikov, Alexander","last_name":"Kolesnikov","first_name":"Alexander","id":"2D157DB6-F248-11E8-B48F-1D18A9856A87"}],"department":[{"_id":"ChLa"}],"publisher":"Institute of Science and Technology Austria","publication_status":"published","acknowledgement":"I also gratefully acknowledge the support of NVIDIA Corporation with the donation of the GPUs used for this research.","year":"2018","ec_funded":1,"publist_id":"7718","file_date_updated":"2020-07-14T12:45:22Z"},{"page":"1231-1245","citation":{"chicago":"Ringbauer, Harald, Alexander Kolesnikov, David Field, and Nicholas H Barton. “Estimating Barriers to Gene Flow from Distorted Isolation-by-Distance Patterns.” Genetics. Genetics Society of America, 2018. https://doi.org/10.1534/genetics.117.300638.","short":"H. Ringbauer, A. Kolesnikov, D. Field, N.H. Barton, Genetics 208 (2018) 1231–1245.","mla":"Ringbauer, Harald, et al. “Estimating Barriers to Gene Flow from Distorted Isolation-by-Distance Patterns.” Genetics, vol. 208, no. 3, Genetics Society of America, 2018, pp. 1231–45, doi:10.1534/genetics.117.300638.","apa":"Ringbauer, H., Kolesnikov, A., Field, D., & Barton, N. H. (2018). Estimating barriers to gene flow from distorted isolation-by-distance patterns. Genetics. Genetics Society of America. https://doi.org/10.1534/genetics.117.300638","ieee":"H. Ringbauer, A. Kolesnikov, D. Field, and N. H. Barton, “Estimating barriers to gene flow from distorted isolation-by-distance patterns,” Genetics, vol. 208, no. 3. Genetics Society of America, pp. 1231–1245, 2018.","ista":"Ringbauer H, Kolesnikov A, Field D, Barton NH. 2018. Estimating barriers to gene flow from distorted isolation-by-distance patterns. Genetics. 208(3), 1231–1245.","ama":"Ringbauer H, Kolesnikov A, Field D, Barton NH. Estimating barriers to gene flow from distorted isolation-by-distance patterns. Genetics. 2018;208(3):1231-1245. doi:10.1534/genetics.117.300638"},"publication":"Genetics","date_published":"2018-03-01T00:00:00Z","scopus_import":"1","article_processing_charge":"No","day":"01","intvolume":" 208","title":"Estimating barriers to gene flow from distorted isolation-by-distance patterns","status":"public","_id":"563","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","oa_version":"Preprint","type":"journal_article","issue":"3","abstract":[{"text":"In continuous populations with local migration, nearby pairs of individuals have on average more similar genotypes\r\nthan geographically well separated pairs. A barrier to gene flow distorts this classical pattern of isolation by distance. Genetic similarity is decreased for sample pairs on different sides of the barrier and increased for pairs on the same side near the barrier. Here, we introduce an inference scheme that utilizes this signal to detect and estimate the strength of a linear barrier to gene flow in two-dimensions. We use a diffusion approximation to model the effects of a barrier on the geographical spread of ancestry backwards in time. This approach allows us to calculate the chance of recent coalescence and probability of identity by descent. We introduce an inference scheme that fits these theoretical results to the geographical covariance structure of bialleleic genetic markers. It can estimate the strength of the barrier as well as several demographic parameters. We investigate the power of our inference scheme to detect barriers by applying it to a wide range of simulated data. We also showcase an example application to a Antirrhinum majus (snapdragon) flower color hybrid zone, where we do not detect any signal of a strong genome wide barrier to gene flow.","lang":"eng"}],"quality_controlled":"1","isi":1,"oa":1,"main_file_link":[{"open_access":"1","url":"https://www.biorxiv.org/content/10.1101/205484v1"}],"external_id":{"isi":["000426219600025"]},"language":[{"iso":"eng"}],"doi":"10.1534/genetics.117.300638","month":"03","publisher":"Genetics Society of America","department":[{"_id":"NiBa"},{"_id":"ChLa"}],"publication_status":"published","year":"2018","volume":208,"date_created":"2018-12-11T11:47:12Z","date_updated":"2023-09-11T13:42:38Z","related_material":{"record":[{"id":"200","status":"public","relation":"dissertation_contains"}]},"author":[{"full_name":"Ringbauer, Harald","last_name":"Ringbauer","first_name":"Harald","orcid":"0000-0002-4884-9682","id":"417FCFF4-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Alexander","last_name":"Kolesnikov","id":"2D157DB6-F248-11E8-B48F-1D18A9856A87","full_name":"Kolesnikov, Alexander"},{"last_name":"Field","first_name":"David","full_name":"Field, David"},{"first_name":"Nicholas H","last_name":"Barton","id":"4880FE40-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-8548-5240","full_name":"Barton, Nicholas H"}],"publist_id":"7251"},{"oa_version":"Submitted Version","intvolume":" 70","title":"PixelCNN models with auxiliary variables for natural image modeling","status":"public","_id":"1000","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","abstract":[{"text":"We study probabilistic models of natural images and extend the autoregressive family of PixelCNN models by incorporating latent variables. Subsequently, we describe two new generative image models that exploit different image transformations as latent variables: a quantized grayscale view of the image or a multi-resolution image pyramid. The proposed models tackle two known shortcomings of existing PixelCNN models: 1) their tendency to focus on low-level image details, while largely ignoring high-level image information, such as object shapes, and 2) their computationally costly procedure for image sampling. We experimentally demonstrate benefits of our LatentPixelCNN models, in particular showing that they produce much more realistically looking image samples than previous state-of-the-art probabilistic models. ","lang":"eng"}],"type":"conference","date_published":"2017-08-01T00:00:00Z","page":"1905 - 1914","citation":{"chicago":"Kolesnikov, Alexander, and Christoph Lampert. “PixelCNN Models with Auxiliary Variables for Natural Image Modeling.” In 34th International Conference on Machine Learning, 70:1905–14. JMLR, 2017.","mla":"Kolesnikov, Alexander, and Christoph Lampert. “PixelCNN Models with Auxiliary Variables for Natural Image Modeling.” 34th International Conference on Machine Learning, vol. 70, JMLR, 2017, pp. 1905–14.","short":"A. Kolesnikov, C. Lampert, in:, 34th International Conference on Machine Learning, JMLR, 2017, pp. 1905–1914.","ista":"Kolesnikov A, Lampert C. 2017. PixelCNN models with auxiliary variables for natural image modeling. 34th International Conference on Machine Learning. ICML: International Conference on Machine Learning vol. 70, 1905–1914.","apa":"Kolesnikov, A., & Lampert, C. (2017). PixelCNN models with auxiliary variables for natural image modeling. In 34th International Conference on Machine Learning (Vol. 70, pp. 1905–1914). Sydney, Australia: JMLR.","ieee":"A. Kolesnikov and C. Lampert, “PixelCNN models with auxiliary variables for natural image modeling,” in 34th International Conference on Machine Learning, Sydney, Australia, 2017, vol. 70, pp. 1905–1914.","ama":"Kolesnikov A, Lampert C. PixelCNN models with auxiliary variables for natural image modeling. In: 34th International Conference on Machine Learning. Vol 70. JMLR; 2017:1905-1914."},"publication":"34th International Conference on Machine Learning","has_accepted_license":"1","article_processing_charge":"No","day":"01","scopus_import":"1","volume":70,"date_created":"2018-12-11T11:49:37Z","date_updated":"2023-09-22T09:50:41Z","author":[{"full_name":"Kolesnikov, Alexander","first_name":"Alexander","last_name":"Kolesnikov","id":"2D157DB6-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Christoph","last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph"}],"publisher":"JMLR","department":[{"_id":"ChLa"}],"publication_status":"published","year":"2017","acknowledgement":"We thank Tim Salimans for spotting a mistake in our preliminary arXiv manuscript. This work was funded by the European Research Council under the European Unions Seventh Framework Programme (FP7/2007-2013)/ERC grant agreement no 308036.","ec_funded":1,"publist_id":"6398","language":[{"iso":"eng"}],"conference":{"name":"ICML: International Conference on Machine Learning","location":"Sydney, Australia","start_date":"2017-08-06","end_date":"2017-08-11"},"project":[{"call_identifier":"FP7","name":"Lifelong Learning of Visual Scene Understanding","grant_number":"308036","_id":"2532554C-B435-11E9-9278-68D0E5697425"}],"isi":1,"quality_controlled":"1","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/1612.08185"}],"external_id":{"arxiv":["1612.08185"],"isi":["000683309501102"]},"oa":1,"publication_identifier":{"isbn":["978-151085514-4"]},"month":"08"},{"day":"14","article_processing_charge":"No","scopus_import":"1","date_published":"2017-04-14T00:00:00Z","citation":{"ama":"Rebuffi SA, Kolesnikov A, Sperl G, Lampert C. iCaRL: Incremental classifier and representation learning. In: Vol 2017. IEEE; 2017:5533-5542. doi:10.1109/CVPR.2017.587","apa":"Rebuffi, S. A., Kolesnikov, A., Sperl, G., & Lampert, C. (2017). iCaRL: Incremental classifier and representation learning (Vol. 2017, pp. 5533–5542). Presented at the CVPR: Computer Vision and Pattern Recognition, Honolulu, HA, United States: IEEE. https://doi.org/10.1109/CVPR.2017.587","ieee":"S. A. Rebuffi, A. Kolesnikov, G. Sperl, and C. Lampert, “iCaRL: Incremental classifier and representation learning,” presented at the CVPR: Computer Vision and Pattern Recognition, Honolulu, HA, United States, 2017, vol. 2017, pp. 5533–5542.","ista":"Rebuffi SA, Kolesnikov A, Sperl G, Lampert C. 2017. iCaRL: Incremental classifier and representation learning. CVPR: Computer Vision and Pattern Recognition vol. 2017, 5533–5542.","short":"S.A. Rebuffi, A. Kolesnikov, G. Sperl, C. Lampert, in:, IEEE, 2017, pp. 5533–5542.","mla":"Rebuffi, Sylvestre Alvise, et al. ICaRL: Incremental Classifier and Representation Learning. Vol. 2017, IEEE, 2017, pp. 5533–42, doi:10.1109/CVPR.2017.587.","chicago":"Rebuffi, Sylvestre Alvise, Alexander Kolesnikov, Georg Sperl, and Christoph Lampert. “ICaRL: Incremental Classifier and Representation Learning,” 2017:5533–42. IEEE, 2017. https://doi.org/10.1109/CVPR.2017.587."},"page":"5533 - 5542","abstract":[{"text":"A major open problem on the road to artificial intelligence is the development of incrementally learning systems that learn about more and more concepts over time from a stream of data. In this work, we introduce a new training strategy, iCaRL, that allows learning in such a class-incremental way: only the training data for a small number of classes has to be present at the same time and new classes can be added progressively. iCaRL learns strong classifiers and a data representation simultaneously. This distinguishes it from earlier works that were fundamentally limited to fixed data representations and therefore incompatible with deep learning architectures. We show by experiments on CIFAR-100 and ImageNet ILSVRC 2012 data that iCaRL can learn many classes incrementally over a long period of time where other strategies quickly fail. ","lang":"eng"}],"type":"conference","oa_version":"Submitted Version","_id":"998","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","title":"iCaRL: Incremental classifier and representation learning","status":"public","intvolume":" 2017","month":"04","publication_identifier":{"isbn":["978-153860457-1"]},"conference":{"name":"CVPR: Computer Vision and Pattern Recognition","location":"Honolulu, HA, United States","start_date":"2017-07-21","end_date":"2017-07-26"},"doi":"10.1109/CVPR.2017.587","language":[{"iso":"eng"}],"main_file_link":[{"url":"https://arxiv.org/abs/1611.07725","open_access":"1"}],"oa":1,"external_id":{"isi":["000418371405066"]},"quality_controlled":"1","isi":1,"project":[{"grant_number":"308036","_id":"2532554C-B435-11E9-9278-68D0E5697425","call_identifier":"FP7","name":"Lifelong Learning of Visual Scene Understanding"}],"publist_id":"6400","ec_funded":1,"author":[{"first_name":"Sylvestre Alvise","last_name":"Rebuffi","full_name":"Rebuffi, Sylvestre Alvise"},{"last_name":"Kolesnikov","first_name":"Alexander","id":"2D157DB6-F248-11E8-B48F-1D18A9856A87","full_name":"Kolesnikov, Alexander"},{"full_name":"Sperl, Georg","first_name":"Georg","last_name":"Sperl","id":"4DD40360-F248-11E8-B48F-1D18A9856A87"},{"id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","first_name":"Christoph","last_name":"Lampert","full_name":"Lampert, Christoph"}],"date_updated":"2023-09-22T09:51:58Z","date_created":"2018-12-11T11:49:37Z","volume":2017,"year":"2017","publication_status":"published","publisher":"IEEE","department":[{"_id":"ChLa"},{"_id":"ChWo"}]},{"abstract":[{"text":"We develop a probabilistic technique for colorizing grayscale natural images. In light of the intrinsic uncertainty of this task, the proposed probabilistic framework has numerous desirable properties. In particular, our model is able to produce multiple plausible and vivid colorizations for a given grayscale image and is one of the first colorization models to provide a proper stochastic sampling scheme. Moreover, our training procedure is supported by a rigorous theoretical framework that does not require any ad hoc heuristics and allows for efficient modeling and learning of the joint pixel color distribution.We demonstrate strong quantitative and qualitative experimental results on the CIFAR-10 dataset and the challenging ILSVRC 2012 dataset.","lang":"eng"}],"type":"conference","oa_version":"Published Version","file":[{"relation":"main_file","file_id":"8224","success":1,"date_updated":"2020-08-10T07:14:33Z","date_created":"2020-08-10T07:14:33Z","access_level":"open_access","file_name":"2017_BMVC_Royer.pdf","content_type":"application/pdf","file_size":1625363,"creator":"dernst"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"911","status":"public","title":"Probabilistic image colorization","ddc":["000"],"has_accepted_license":"1","article_processing_charge":"No","day":"01","scopus_import":"1","date_published":"2017-09-01T00:00:00Z","citation":{"chicago":"Royer, Amélie, Alexander Kolesnikov, and Christoph Lampert. “Probabilistic Image Colorization,” 85.1-85.12. BMVA Press, 2017. https://doi.org/10.5244/c.31.85.","mla":"Royer, Amélie, et al. Probabilistic Image Colorization. BMVA Press, 2017, p. 85.1-85.12, doi:10.5244/c.31.85.","short":"A. Royer, A. Kolesnikov, C. Lampert, in:, BMVA Press, 2017, p. 85.1-85.12.","ista":"Royer A, Kolesnikov A, Lampert C. 2017. Probabilistic image colorization. BMVC: British Machine Vision Conference, 85.1-85.12.","ieee":"A. Royer, A. Kolesnikov, and C. Lampert, “Probabilistic image colorization,” presented at the BMVC: British Machine Vision Conference, London, United Kingdom, 2017, p. 85.1-85.12.","apa":"Royer, A., Kolesnikov, A., & Lampert, C. (2017). Probabilistic image colorization (p. 85.1-85.12). Presented at the BMVC: British Machine Vision Conference, London, United Kingdom: BMVA Press. https://doi.org/10.5244/c.31.85","ama":"Royer A, Kolesnikov A, Lampert C. Probabilistic image colorization. In: BMVA Press; 2017:85.1-85.12. doi:10.5244/c.31.85"},"page":"85.1-85.12","ec_funded":1,"publist_id":"6532","file_date_updated":"2020-08-10T07:14:33Z","related_material":{"record":[{"id":"8390","relation":"dissertation_contains","status":"public"}]},"author":[{"full_name":"Royer, Amélie","orcid":"0000-0002-8407-0705","id":"3811D890-F248-11E8-B48F-1D18A9856A87","last_name":"Royer","first_name":"Amélie"},{"first_name":"Alexander","last_name":"Kolesnikov","id":"2D157DB6-F248-11E8-B48F-1D18A9856A87","full_name":"Kolesnikov, Alexander"},{"last_name":"Lampert","first_name":"Christoph","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","full_name":"Lampert, Christoph"}],"date_updated":"2023-10-16T10:04:02Z","date_created":"2018-12-11T11:49:09Z","year":"2017","department":[{"_id":"ChLa"}],"publisher":"BMVA Press","publication_status":"published","publication_identifier":{"eisbn":["190172560X"]},"month":"09","doi":"10.5244/c.31.85","conference":{"start_date":"2017-09-04","location":"London, United Kingdom","end_date":"2017-09-07","name":"BMVC: British Machine Vision Conference"},"language":[{"iso":"eng"}],"external_id":{"arxiv":["1705.04258"]},"oa":1,"project":[{"_id":"2532554C-B435-11E9-9278-68D0E5697425","grant_number":"308036","call_identifier":"FP7","name":"Lifelong Learning of Visual Scene Understanding"}],"quality_controlled":"1"},{"ec_funded":1,"publist_id":"6273","volume":"2016-September","date_created":"2018-12-11T11:50:09Z","date_updated":"2021-01-12T06:48:18Z","author":[{"full_name":"Kolesnikov, Alexander","first_name":"Alexander","last_name":"Kolesnikov","id":"2D157DB6-F248-11E8-B48F-1D18A9856A87"},{"last_name":"Lampert","first_name":"Christoph","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","full_name":"Lampert, Christoph"}],"publisher":"BMVA Press","department":[{"_id":"ChLa"}],"publication_status":"published","acknowledgement":"This work was funded in parts by the European Research Council\r\nunder the European Union’s Seventh Framework Programme (FP7/2007-2013)/ERC grant\r\nagreement no 308036. We gratefully acknowledge the support of NVIDIA Corporation with\r\nthe donation of the GPUs used for this research.","year":"2016","month":"09","language":[{"iso":"eng"}],"doi":"10.5244/C.30.92","conference":{"location":"York, United Kingdom","start_date":"2016-09-19","end_date":"2016-09-22","name":"BMVC: British Machine Vision Conference"},"project":[{"call_identifier":"FP7","name":"Lifelong Learning of Visual Scene Understanding","_id":"2532554C-B435-11E9-9278-68D0E5697425","grant_number":"308036"}],"quality_controlled":"1","oa":1,"main_file_link":[{"url":"http://www.bmva.org/bmvc/2016/papers/paper092/paper092.pdf","open_access":"1"}],"abstract":[{"lang":"eng","text":"Weakly-supervised object localization methods tend to fail for object classes that consistently co-occur with the same background elements, e.g. trains on tracks. We propose a method to overcome these failures by adding a very small amount of model-specific additional annotation. The main idea is to cluster a deep network\\'s mid-level representations and assign object or distractor labels to each cluster. Experiments show substantially improved localization results on the challenging ILSVC2014 dataset for bounding box detection and the PASCAL VOC2012 dataset for semantic segmentation."}],"type":"conference","oa_version":"Published Version","title":"Improving weakly-supervised object localization by micro-annotation","status":"public","user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","_id":"1102","day":"01","scopus_import":1,"date_published":"2016-09-01T00:00:00Z","page":"92.1-92.12","citation":{"ama":"Kolesnikov A, Lampert C. Improving weakly-supervised object localization by micro-annotation. In: Proceedings of the British Machine Vision Conference 2016. Vol 2016-September. BMVA Press; 2016:92.1-92.12. doi:10.5244/C.30.92","ieee":"A. Kolesnikov and C. Lampert, “Improving weakly-supervised object localization by micro-annotation,” in Proceedings of the British Machine Vision Conference 2016, York, United Kingdom, 2016, vol. 2016–September, p. 92.1-92.12.","apa":"Kolesnikov, A., & Lampert, C. (2016). Improving weakly-supervised object localization by micro-annotation. In Proceedings of the British Machine Vision Conference 2016 (Vol. 2016–September, p. 92.1-92.12). York, United Kingdom: BMVA Press. https://doi.org/10.5244/C.30.92","ista":"Kolesnikov A, Lampert C. 2016. Improving weakly-supervised object localization by micro-annotation. Proceedings of the British Machine Vision Conference 2016. BMVC: British Machine Vision Conference vol. 2016–September, 92.1-92.12.","short":"A. Kolesnikov, C. Lampert, in:, Proceedings of the British Machine Vision Conference 2016, BMVA Press, 2016, p. 92.1-92.12.","mla":"Kolesnikov, Alexander, and Christoph Lampert. “Improving Weakly-Supervised Object Localization by Micro-Annotation.” Proceedings of the British Machine Vision Conference 2016, vol. 2016–September, BMVA Press, 2016, p. 92.1-92.12, doi:10.5244/C.30.92.","chicago":"Kolesnikov, Alexander, and Christoph Lampert. “Improving Weakly-Supervised Object Localization by Micro-Annotation.” In Proceedings of the British Machine Vision Conference 2016, 2016–September:92.1-92.12. BMVA Press, 2016. https://doi.org/10.5244/C.30.92."},"publication":"Proceedings of the British Machine Vision Conference 2016"},{"main_file_link":[{"url":"https://arxiv.org/abs/1603.06098","open_access":"1"}],"oa":1,"quality_controlled":"1","project":[{"name":"Lifelong Learning of Visual Scene Understanding","call_identifier":"FP7","_id":"2532554C-B435-11E9-9278-68D0E5697425","grant_number":"308036"}],"conference":{"end_date":"2016-10-14","location":"Amsterdam, The Netherlands","start_date":"2016-10-11","name":"ECCV: European Conference on Computer Vision"},"doi":"10.1007/978-3-319-46493-0_42","language":[{"iso":"eng"}],"month":"09","year":"2016","publication_status":"published","publisher":"Springer","department":[{"_id":"ChLa"}],"author":[{"id":"2D157DB6-F248-11E8-B48F-1D18A9856A87","first_name":"Alexander","last_name":"Kolesnikov","full_name":"Kolesnikov, Alexander"},{"first_name":"Christoph","last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph"}],"date_updated":"2021-01-12T06:50:12Z","date_created":"2018-12-11T11:51:37Z","volume":9908,"ec_funded":1,"publist_id":"5842","citation":{"short":"A. Kolesnikov, C. Lampert, in:, Springer, 2016, pp. 695–711.","mla":"Kolesnikov, Alexander, and Christoph Lampert. Seed, Expand and Constrain: Three Principles for Weakly-Supervised Image Segmentation. Vol. 9908, Springer, 2016, pp. 695–711, doi:10.1007/978-3-319-46493-0_42.","chicago":"Kolesnikov, Alexander, and Christoph Lampert. “Seed, Expand and Constrain: Three Principles for Weakly-Supervised Image Segmentation,” 9908:695–711. Springer, 2016. https://doi.org/10.1007/978-3-319-46493-0_42.","ama":"Kolesnikov A, Lampert C. Seed, expand and constrain: Three principles for weakly-supervised image segmentation. In: Vol 9908. Springer; 2016:695-711. doi:10.1007/978-3-319-46493-0_42","ieee":"A. Kolesnikov and C. Lampert, “Seed, expand and constrain: Three principles for weakly-supervised image segmentation,” presented at the ECCV: European Conference on Computer Vision, Amsterdam, The Netherlands, 2016, vol. 9908, pp. 695–711.","apa":"Kolesnikov, A., & Lampert, C. (2016). Seed, expand and constrain: Three principles for weakly-supervised image segmentation (Vol. 9908, pp. 695–711). Presented at the ECCV: European Conference on Computer Vision, Amsterdam, The Netherlands: Springer. https://doi.org/10.1007/978-3-319-46493-0_42","ista":"Kolesnikov A, Lampert C. 2016. Seed, expand and constrain: Three principles for weakly-supervised image segmentation. ECCV: European Conference on Computer Vision, LNCS, vol. 9908, 695–711."},"page":"695 - 711","date_published":"2016-09-15T00:00:00Z","scopus_import":1,"day":"15","user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","_id":"1369","status":"public","title":"Seed, expand and constrain: Three principles for weakly-supervised image segmentation","intvolume":" 9908","oa_version":"Preprint","type":"conference","alternative_title":["LNCS"],"abstract":[{"lang":"eng","text":"We introduce a new loss function for the weakly-supervised training of semantic image segmentation models based on three guiding principles: to seed with weak localization cues, to expand objects based on the information about which classes can occur in an image, and to constrain the segmentations to coincide with object boundaries. We show experimentally that training a deep convolutional neural network using the proposed loss function leads to substantially better segmentations than previous state-of-the-art methods on the challenging PASCAL VOC 2012 dataset. We furthermore give insight into the working mechanism of our method by a detailed experimental study that illustrates how the segmentation quality is affected by each term of the proposed loss function as well as their combinations."}]},{"author":[{"last_name":"Kolesnikov","first_name":"Alexander","id":"2D157DB6-F248-11E8-B48F-1D18A9856A87","full_name":"Kolesnikov, Alexander"},{"last_name":"Guillaumin","first_name":"Matthieu","full_name":"Guillaumin, Matthieu"},{"last_name":"Ferrari","first_name":"Vittorio","full_name":"Ferrari, Vittorio"},{"full_name":"Lampert, Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","first_name":"Christoph","last_name":"Lampert"}],"volume":8691,"date_created":"2018-12-11T11:56:07Z","date_updated":"2021-01-12T06:55:46Z","year":"2014","editor":[{"full_name":"Fleet, David","last_name":"Fleet","first_name":"David"},{"full_name":"Pajdla, Tomas","first_name":"Tomas","last_name":"Pajdla"},{"first_name":"Bernt","last_name":"Schiele","full_name":"Schiele, Bernt"},{"last_name":"Tuytelaars","first_name":"Tinne","full_name":"Tuytelaars, Tinne"}],"department":[{"_id":"ChLa"}],"publisher":"Springer","publication_status":"published","publist_id":"4813","ec_funded":1,"doi":"10.1007/978-3-319-10578-9_36","conference":{"name":"ECCV: European Conference on Computer Vision","end_date":"2014-09-12","start_date":"2014-09-06","location":"Zurich, Switzerland"},"language":[{"iso":"eng"}],"main_file_link":[{"open_access":"1","url":"http://arxiv.org/abs/1403.7057"}],"oa":1,"project":[{"_id":"2532554C-B435-11E9-9278-68D0E5697425","grant_number":"308036","call_identifier":"FP7","name":"Lifelong Learning of Visual Scene Understanding"}],"quality_controlled":"1","month":"09","oa_version":"Submitted Version","user_id":"4435EBFC-F248-11E8-B48F-1D18A9856A87","_id":"2171","intvolume":" 8691","status":"public","title":"Closed-form approximate CRF training for scalable image segmentation","issue":"PART 3","abstract":[{"text":"We present LS-CRF, a new method for training cyclic Conditional Random Fields (CRFs) from large datasets that is inspired by classical closed-form expressions for the maximum likelihood parameters of a generative graphical model with tree topology. Training a CRF with LS-CRF requires only solving a set of independent regression problems, each of which can be solved efficiently in closed form or by an iterative solver. This makes LS-CRF orders of magnitude faster than classical CRF training based on probabilistic inference, and at the same time more flexible and easier to implement than other approximate techniques, such as pseudolikelihood or piecewise training. We apply LS-CRF to the task of semantic image segmentation, showing that it achieves on par accuracy to other training techniques at higher speed, thereby allowing efficient CRF training from very large training sets. For example, training a linearly parameterized pairwise CRF on 150,000 images requires less than one hour on a modern workstation.","lang":"eng"}],"type":"conference","alternative_title":["LNCS"],"date_published":"2014-09-01T00:00:00Z","citation":{"short":"A. Kolesnikov, M. Guillaumin, V. Ferrari, C. Lampert, in:, D. Fleet, T. Pajdla, B. Schiele, T. Tuytelaars (Eds.), Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), Springer, 2014, pp. 550–565.","mla":"Kolesnikov, Alexander, et al. “Closed-Form Approximate CRF Training for Scalable Image Segmentation.” Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), edited by David Fleet et al., vol. 8691, no. PART 3, Springer, 2014, pp. 550–65, doi:10.1007/978-3-319-10578-9_36.","chicago":"Kolesnikov, Alexander, Matthieu Guillaumin, Vittorio Ferrari, and Christoph Lampert. “Closed-Form Approximate CRF Training for Scalable Image Segmentation.” In Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), edited by David Fleet, Tomas Pajdla, Bernt Schiele, and Tinne Tuytelaars, 8691:550–65. Springer, 2014. https://doi.org/10.1007/978-3-319-10578-9_36.","ama":"Kolesnikov A, Guillaumin M, Ferrari V, Lampert C. Closed-form approximate CRF training for scalable image segmentation. In: Fleet D, Pajdla T, Schiele B, Tuytelaars T, eds. Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics). Vol 8691. Springer; 2014:550-565. doi:10.1007/978-3-319-10578-9_36","ieee":"A. Kolesnikov, M. Guillaumin, V. Ferrari, and C. Lampert, “Closed-form approximate CRF training for scalable image segmentation,” in Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), Zurich, Switzerland, 2014, vol. 8691, no. PART 3, pp. 550–565.","apa":"Kolesnikov, A., Guillaumin, M., Ferrari, V., & Lampert, C. (2014). Closed-form approximate CRF training for scalable image segmentation. In D. Fleet, T. Pajdla, B. Schiele, & T. Tuytelaars (Eds.), Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) (Vol. 8691, pp. 550–565). Zurich, Switzerland: Springer. https://doi.org/10.1007/978-3-319-10578-9_36","ista":"Kolesnikov A, Guillaumin M, Ferrari V, Lampert C. 2014. Closed-form approximate CRF training for scalable image segmentation. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics). ECCV: European Conference on Computer Vision, LNCS, vol. 8691, 550–565."},"publication":"Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)","page":"550 - 565","day":"01","scopus_import":1}]