[{"quality_controlled":"1","publisher":"ML Research Press","oa":1,"acknowledgement":"The authors would like to thank Bernd Prach, Elias Frantar, Alexandra Peste, Mahdi Nikdan, and Peter Súkeník for their helpful feedback. This research was supported by the Scientific Service Units (SSU) of IST Austria through resources provided by Scientific Computing (SciComp). This publication was made possible by an ETH AI Center postdoctoral fellowship granted to Nikola Konstantinov. Eugenia Iofinova was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35. ","date_published":"2022-12-22T00:00:00Z","date_created":"2023-02-02T20:29:57Z","has_accepted_license":"1","year":"2022","day":"22","publication":"Transactions on Machine Learning Research","project":[{"name":"Vienna Graduate School on Computational Optimization","grant_number":" W1260-N35","_id":"9B9290DE-BA93-11EA-9121-9846C619BF3A"}],"author":[{"id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","first_name":"Eugenia B","last_name":"Iofinova","orcid":"0000-0002-7778-3221","full_name":"Iofinova, Eugenia B"},{"last_name":"Konstantinov","full_name":"Konstantinov, Nikola H","first_name":"Nikola H","id":"4B9D76E4-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","last_name":"Lampert"}],"article_processing_charge":"No","external_id":{"arxiv":["2106.11732"]},"title":"FLEA: Provably robust fair multisource learning from unreliable training data","citation":{"ieee":"E. B. Iofinova, N. H. Konstantinov, and C. Lampert, “FLEA: Provably robust fair multisource learning from unreliable training data,” Transactions on Machine Learning Research. ML Research Press, 2022.","short":"E.B. Iofinova, N.H. Konstantinov, C. Lampert, Transactions on Machine Learning Research (2022).","apa":"Iofinova, E. B., Konstantinov, N. H., & Lampert, C. (2022). FLEA: Provably robust fair multisource learning from unreliable training data. Transactions on Machine Learning Research. ML Research Press.","ama":"Iofinova EB, Konstantinov NH, Lampert C. FLEA: Provably robust fair multisource learning from unreliable training data. Transactions on Machine Learning Research. 2022.","mla":"Iofinova, Eugenia B., et al. “FLEA: Provably Robust Fair Multisource Learning from Unreliable Training Data.” Transactions on Machine Learning Research, ML Research Press, 2022.","ista":"Iofinova EB, Konstantinov NH, Lampert C. 2022. FLEA: Provably robust fair multisource learning from unreliable training data. Transactions on Machine Learning Research.","chicago":"Iofinova, Eugenia B, Nikola H Konstantinov, and Christoph Lampert. “FLEA: Provably Robust Fair Multisource Learning from Unreliable Training Data.” Transactions on Machine Learning Research. ML Research Press, 2022."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","main_file_link":[{"url":"https://openreview.net/forum?id=XsPopigZXV","open_access":"1"}],"month":"12","abstract":[{"text":"Fairness-aware learning aims at constructing classifiers that not only make accurate predictions, but also do not discriminate against specific groups. It is a fast-growing area of\r\nmachine learning with far-reaching societal impact. However, existing fair learning methods\r\nare vulnerable to accidental or malicious artifacts in the training data, which can cause\r\nthem to unknowingly produce unfair classifiers. In this work we address the problem of\r\nfair learning from unreliable training data in the robust multisource setting, where the\r\navailable training data comes from multiple sources, a fraction of which might not be representative of the true data distribution. We introduce FLEA, a filtering-based algorithm\r\nthat identifies and suppresses those data sources that would have a negative impact on\r\nfairness or accuracy if they were used for training. As such, FLEA is not a replacement of\r\nprior fairness-aware learning methods but rather an augmentation that makes any of them\r\nrobust against unreliable training data. We show the effectiveness of our approach by a\r\ndiverse range of experiments on multiple datasets. Additionally, we prove formally that\r\n–given enough data– FLEA protects the learner against corruptions as long as the fraction of\r\naffected data sources is less than half. Our source code and documentation are available at\r\nhttps://github.com/ISTAustria-CVML/FLEA.","lang":"eng"}],"acknowledged_ssus":[{"_id":"ScienComp"}],"oa_version":"Published Version","related_material":{"link":[{"description":"source code","url":"https://github.com/ISTAustria-CVML/FLEA","relation":"software"}]},"license":"https://creativecommons.org/licenses/by/4.0/","publication_identifier":{"issn":["2835-8856"]},"publication_status":"published","file":[{"checksum":"97c8a8470759cab597abb973ca137a3b","file_id":"12673","success":1,"content_type":"application/pdf","access_level":"open_access","relation":"main_file","date_created":"2023-02-23T10:30:04Z","file_name":"2022_TMLR_Iofinova.pdf","date_updated":"2023-02-23T10:30:04Z","file_size":1948063,"creator":"dernst"}],"language":[{"iso":"eng"}],"type":"journal_article","article_type":"original","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"status":"public","_id":"12495","file_date_updated":"2023-02-23T10:30:04Z","department":[{"_id":"ChLa"}],"date_updated":"2023-02-23T10:30:54Z","ddc":["000"]},{"citation":{"mla":"Prach, Bernd, and Christoph Lampert. “Almost-Orthogonal Layers for Efficient General-Purpose Lipschitz Networks.” Computer Vision – ECCV 2022, vol. 13681, Springer Nature, 2022, pp. 350–65, doi:10.1007/978-3-031-19803-8_21.","short":"B. Prach, C. Lampert, in:, Computer Vision – ECCV 2022, Springer Nature, 2022, pp. 350–365.","ieee":"B. Prach and C. Lampert, “Almost-orthogonal layers for efficient general-purpose Lipschitz networks,” in Computer Vision – ECCV 2022, Tel Aviv, Israel, 2022, vol. 13681, pp. 350–365.","ama":"Prach B, Lampert C. Almost-orthogonal layers for efficient general-purpose Lipschitz networks. In: Computer Vision – ECCV 2022. Vol 13681. Springer Nature; 2022:350-365. doi:10.1007/978-3-031-19803-8_21","apa":"Prach, B., & Lampert, C. (2022). Almost-orthogonal layers for efficient general-purpose Lipschitz networks. In Computer Vision – ECCV 2022 (Vol. 13681, pp. 350–365). Tel Aviv, Israel: Springer Nature. https://doi.org/10.1007/978-3-031-19803-8_21","chicago":"Prach, Bernd, and Christoph Lampert. “Almost-Orthogonal Layers for Efficient General-Purpose Lipschitz Networks.” In Computer Vision – ECCV 2022, 13681:350–65. Springer Nature, 2022. https://doi.org/10.1007/978-3-031-19803-8_21.","ista":"Prach B, Lampert C. 2022. Almost-orthogonal layers for efficient general-purpose Lipschitz networks. Computer Vision – ECCV 2022. ECCV: European Conference on Computer Vision, LNCS, vol. 13681, 350–365."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"last_name":"Prach","full_name":"Prach, Bernd","first_name":"Bernd","id":"2D561D42-C427-11E9-89B4-9C1AE6697425"},{"first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","last_name":"Lampert"}],"article_processing_charge":"No","external_id":{"arxiv":["2208.03160"]},"title":"Almost-orthogonal layers for efficient general-purpose Lipschitz networks","year":"2022","day":"23","publication":"Computer Vision – ECCV 2022","page":"350-365","doi":"10.1007/978-3-031-19803-8_21","date_published":"2022-10-23T00:00:00Z","date_created":"2022-08-12T15:09:47Z","quality_controlled":"1","publisher":"Springer Nature","oa":1,"date_updated":"2023-05-03T08:00:46Z","department":[{"_id":"GradSch"},{"_id":"ChLa"}],"_id":"11839","type":"conference","conference":{"name":"ECCV: European Conference on Computer Vision","location":"Tel Aviv, Israel","end_date":"2022-10-27","start_date":"2022-10-23"},"status":"public","publication_identifier":{"isbn":["9783031198021"],"eisbn":["9783031198038"]},"publication_status":"published","language":[{"iso":"eng"}],"volume":13681,"abstract":[{"lang":"eng","text":"It is a highly desirable property for deep networks to be robust against\r\nsmall input changes. One popular way to achieve this property is by designing\r\nnetworks with a small Lipschitz constant. In this work, we propose a new\r\ntechnique for constructing such Lipschitz networks that has a number of\r\ndesirable properties: it can be applied to any linear network layer\r\n(fully-connected or convolutional), it provides formal guarantees on the\r\nLipschitz constant, it is easy to implement and efficient to run, and it can be\r\ncombined with any training objective and optimization method. In fact, our\r\ntechnique is the first one in the literature that achieves all of these\r\nproperties simultaneously. Our main contribution is a rescaling-based weight\r\nmatrix parametrization that guarantees each network layer to have a Lipschitz\r\nconstant of at most 1 and results in the learned weight matrices to be close to\r\northogonal. Hence we call such layers almost-orthogonal Lipschitz (AOL).\r\nExperiments and ablation studies in the context of image classification with\r\ncertified robust accuracy confirm that AOL layers achieve results that are on\r\npar with most existing methods. Yet, they are simpler to implement and more\r\nbroadly applicable, because they do not require computationally expensive\r\nmatrix orthogonalization or inversion steps as part of the network\r\narchitecture. We provide code at https://github.com/berndprach/AOL."}],"oa_version":"Preprint","scopus_import":"1","alternative_title":["LNCS"],"main_file_link":[{"url":" https://doi.org/10.48550/arXiv.2208.03160","open_access":"1"}],"month":"10","intvolume":" 13681"},{"_id":"10752","status":"public","type":"conference","conference":{"start_date":"2021-12-15","location":"Orlando, FL, United States","end_date":"2021-12-18","name":"Big Data: International Conference on Big Data"},"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","date_updated":"2023-08-02T14:27:50Z","citation":{"ama":"Lampert J, Lampert C. Overcoming rare-language discrimination in multi-lingual sentiment analysis. In: 2021 IEEE International Conference on Big Data. IEEE; 2022:5185-5192. doi:10.1109/bigdata52589.2021.9672003","apa":"Lampert, J., & Lampert, C. (2022). Overcoming rare-language discrimination in multi-lingual sentiment analysis. In 2021 IEEE International Conference on Big Data (pp. 5185–5192). Orlando, FL, United States: IEEE. https://doi.org/10.1109/bigdata52589.2021.9672003","ieee":"J. Lampert and C. Lampert, “Overcoming rare-language discrimination in multi-lingual sentiment analysis,” in 2021 IEEE International Conference on Big Data, Orlando, FL, United States, 2022, pp. 5185–5192.","short":"J. Lampert, C. Lampert, in:, 2021 IEEE International Conference on Big Data, IEEE, 2022, pp. 5185–5192.","mla":"Lampert, Jasmin, and Christoph Lampert. “Overcoming Rare-Language Discrimination in Multi-Lingual Sentiment Analysis.” 2021 IEEE International Conference on Big Data, IEEE, 2022, pp. 5185–92, doi:10.1109/bigdata52589.2021.9672003.","ista":"Lampert J, Lampert C. 2022. Overcoming rare-language discrimination in multi-lingual sentiment analysis. 2021 IEEE International Conference on Big Data. Big Data: International Conference on Big Data, 5185–5192.","chicago":"Lampert, Jasmin, and Christoph Lampert. “Overcoming Rare-Language Discrimination in Multi-Lingual Sentiment Analysis.” In 2021 IEEE International Conference on Big Data, 5185–92. IEEE, 2022. https://doi.org/10.1109/bigdata52589.2021.9672003."},"department":[{"_id":"ChLa"}],"title":"Overcoming rare-language discrimination in multi-lingual sentiment analysis","author":[{"last_name":"Lampert","full_name":"Lampert, Jasmin","first_name":"Jasmin"},{"id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0002-4561-241X"}],"article_processing_charge":"No","external_id":{"isi":["000800559505036"]},"oa_version":"None","abstract":[{"lang":"eng","text":"The digitalization of almost all aspects of our everyday lives has led to unprecedented amounts of data being freely available on the Internet. In particular social media platforms provide rich sources of user-generated data, though typically in unstructured form, and with high diversity, such as written in many different languages. Automatically identifying meaningful information in such big data resources and extracting it efficiently is one of the ongoing challenges of our time. A common step for this is sentiment analysis, which forms the foundation for tasks such as opinion mining or trend prediction. Unfortunately, publicly available tools for this task are almost exclusively available for English-language texts. Consequently, a large fraction of the Internet users, who do not communicate in English, are ignored in automatized studies, a phenomenon called rare-language discrimination.In this work we propose a technique to overcome this problem by a truly multi-lingual model, which can be trained automatically without linguistic knowledge or even the ability to read the many target languages. The main step is to combine self-annotation, specifically the use of emoticons as a proxy for labels, with multi-lingual sentence representations.To evaluate our method we curated several large datasets from data obtained via the free Twitter streaming API. The results show that our proposed multi-lingual training is able to achieve sentiment predictions at the same quality level for rare languages as for frequent ones, and in particular clearly better than what mono-lingual training achieves on the same data. "}],"month":"01","quality_controlled":"1","publisher":"IEEE","day":"13","language":[{"iso":"eng"}],"publication":"2021 IEEE International Conference on Big Data","isi":1,"publication_identifier":{"isbn":["9781665439022"]},"year":"2022","publication_status":"published","doi":"10.1109/bigdata52589.2021.9672003","date_published":"2022-01-13T00:00:00Z","date_created":"2022-02-10T14:08:23Z","page":"5185-5192"},{"month":"11","intvolume":" 2022","scopus_import":"1","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2206.05181"}],"oa_version":"Preprint","abstract":[{"lang":"eng","text":"We introduce LIMES, a new method for learning with non-stationary streaming data, inspired by the recent success of meta-learning. The main idea is not to attempt to learn a single classifier that would have to work well across all occurring data distributions, nor many separate classifiers, but to exploit a hybrid strategy: we learn a single set of model parameters from which a specific classifier for any specific data distribution is derived via classifier adaptation. Assuming a multiclass classification setting with class-prior shift, the adaptation step can be performed analytically with only the classifier’s bias terms being affected. Another contribution of our work is an extrapolation step that predicts suitable adaptation parameters for future time steps based on the previous data. In combination, we obtain a lightweight procedure for learning from streaming data with varying class distribution that adds no trainable parameters and almost no memory or computational overhead compared to training a single model. Experiments on a set of exemplary tasks using Twitter data show that LIMES achieves higher accuracy than alternative approaches, especially with respect to the relevant real-world metric of lowest within-day accuracy."}],"volume":2022,"language":[{"iso":"eng"}],"publication_identifier":{"eissn":["2831-7475"],"eisbn":["9781665490627"]},"publication_status":"published","status":"public","type":"conference","conference":{"start_date":"2022-08-21","location":"Montreal, Canada","end_date":"2022-08-25","name":"ICPR: International Conference on Pattern Recognition"},"_id":"12161","department":[{"_id":"ChLa"}],"date_updated":"2023-08-04T09:06:34Z","publisher":"Institute of Electrical and Electronics Engineers","quality_controlled":"1","oa":1,"date_published":"2022-11-29T00:00:00Z","doi":"10.1109/icpr56361.2022.9956195","date_created":"2023-01-12T12:09:38Z","page":"2128-2134","day":"29","publication":"26th International Conference on Pattern Recognition","isi":1,"year":"2022","title":"Lightweight conditional model extrapolation for streaming data under class-prior shift","author":[{"last_name":"Tomaszewska","full_name":"Tomaszewska, Paulina","first_name":"Paulina"},{"id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","last_name":"Lampert","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph"}],"external_id":{"isi":["000897707602018"],"arxiv":["2206.05181"]},"article_processing_charge":"No","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","citation":{"chicago":"Tomaszewska, Paulina, and Christoph Lampert. “Lightweight Conditional Model Extrapolation for Streaming Data under Class-Prior Shift.” In 26th International Conference on Pattern Recognition, 2022:2128–34. Institute of Electrical and Electronics Engineers, 2022. https://doi.org/10.1109/icpr56361.2022.9956195.","ista":"Tomaszewska P, Lampert C. 2022. Lightweight conditional model extrapolation for streaming data under class-prior shift. 26th International Conference on Pattern Recognition. ICPR: International Conference on Pattern Recognition vol. 2022, 2128–2134.","mla":"Tomaszewska, Paulina, and Christoph Lampert. “Lightweight Conditional Model Extrapolation for Streaming Data under Class-Prior Shift.” 26th International Conference on Pattern Recognition, vol. 2022, Institute of Electrical and Electronics Engineers, 2022, pp. 2128–34, doi:10.1109/icpr56361.2022.9956195.","apa":"Tomaszewska, P., & Lampert, C. (2022). Lightweight conditional model extrapolation for streaming data under class-prior shift. In 26th International Conference on Pattern Recognition (Vol. 2022, pp. 2128–2134). Montreal, Canada: Institute of Electrical and Electronics Engineers. https://doi.org/10.1109/icpr56361.2022.9956195","ama":"Tomaszewska P, Lampert C. Lightweight conditional model extrapolation for streaming data under class-prior shift. In: 26th International Conference on Pattern Recognition. Vol 2022. Institute of Electrical and Electronics Engineers; 2022:2128-2134. doi:10.1109/icpr56361.2022.9956195","ieee":"P. Tomaszewska and C. Lampert, “Lightweight conditional model extrapolation for streaming data under class-prior shift,” in 26th International Conference on Pattern Recognition, Montreal, Canada, 2022, vol. 2022, pp. 2128–2134.","short":"P. Tomaszewska, C. Lampert, in:, 26th International Conference on Pattern Recognition, Institute of Electrical and Electronics Engineers, 2022, pp. 2128–2134."}},{"quality_controlled":"1","publisher":"Institute of Electrical and Electronics Engineers","oa":1,"acknowledgement":"he authors would like to sincerely thank Christoph Lampert and Nir Shavit for fruitful discussions during the development of this work, and Eldar Kurtic for experimental support. EI was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35, while AP and DA acknowledge generous support by the ERC, via Starting Grant 805223 ScaleML.","date_published":"2022-09-27T00:00:00Z","doi":"10.1109/cvpr52688.2022.01195","date_created":"2023-01-16T10:06:00Z","page":"12256-12266","day":"27","publication":"2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition","isi":1,"year":"2022","project":[{"_id":"9B9290DE-BA93-11EA-9121-9846C619BF3A","name":"Vienna Graduate School on Computational Optimization","grant_number":" W1260-N35"},{"name":"Elastic Coordination for Scalable Machine Learning","grant_number":"805223","_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020"}],"title":"How well do sparse ImageNet models transfer?","author":[{"id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","first_name":"Eugenia B","orcid":"0000-0002-7778-3221","full_name":"Iofinova, Eugenia B","last_name":"Iofinova"},{"last_name":"Peste","full_name":"Peste, Elena-Alexandra","first_name":"Elena-Alexandra","id":"32D78294-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Mark","full_name":"Kurtz, Mark","last_name":"Kurtz"},{"first_name":"Dan-Adrian","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian","orcid":"0000-0003-3650-940X"}],"article_processing_charge":"No","external_id":{"isi":["000870759105034"],"arxiv":["2111.13445"]},"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","citation":{"chicago":"Iofinova, Eugenia B, Elena-Alexandra Peste, Mark Kurtz, and Dan-Adrian Alistarh. “How Well Do Sparse ImageNet Models Transfer?” In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition, 12256–66. Institute of Electrical and Electronics Engineers, 2022. https://doi.org/10.1109/cvpr52688.2022.01195.","ista":"Iofinova EB, Peste E-A, Kurtz M, Alistarh D-A. 2022. How well do sparse ImageNet models transfer? 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Computer Vision and Pattern Recognition, 12256–12266.","mla":"Iofinova, Eugenia B., et al. “How Well Do Sparse ImageNet Models Transfer?” 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition, Institute of Electrical and Electronics Engineers, 2022, pp. 12256–66, doi:10.1109/cvpr52688.2022.01195.","ieee":"E. B. Iofinova, E.-A. Peste, M. Kurtz, and D.-A. Alistarh, “How well do sparse ImageNet models transfer?,” in 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition, New Orleans, LA, United States, 2022, pp. 12256–12266.","short":"E.B. Iofinova, E.-A. Peste, M. Kurtz, D.-A. Alistarh, in:, 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition, Institute of Electrical and Electronics Engineers, 2022, pp. 12256–12266.","ama":"Iofinova EB, Peste E-A, Kurtz M, Alistarh D-A. How well do sparse ImageNet models transfer? In: 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition. Institute of Electrical and Electronics Engineers; 2022:12256-12266. doi:10.1109/cvpr52688.2022.01195","apa":"Iofinova, E. B., Peste, E.-A., Kurtz, M., & Alistarh, D.-A. (2022). How well do sparse ImageNet models transfer? In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 12256–12266). New Orleans, LA, United States: Institute of Electrical and Electronics Engineers. https://doi.org/10.1109/cvpr52688.2022.01195"},"month":"09","scopus_import":"1","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2111.13445"}],"oa_version":"Preprint","abstract":[{"text":"Transfer learning is a classic paradigm by which models pretrained on large “upstream” datasets are adapted to yield good results on “downstream” specialized datasets. Generally, more accurate models on the “upstream” dataset tend to provide better transfer accuracy “downstream”. In this work, we perform an in-depth investigation of this phenomenon in the context of convolutional neural networks (CNNs) trained on the ImageNet dataset, which have been pruned-that is, compressed by sparsifiying their connections. We consider transfer using unstructured pruned models obtained by applying several state-of-the-art pruning methods, including magnitude-based, second-order, regrowth, lottery-ticket, and regularization approaches, in the context of twelve standard transfer tasks. In a nutshell, our study shows that sparse models can match or even outperform the transfer performance of dense models, even at high sparsities, and, while doing so, can lead to significant inference and even training speedups. At the same time, we observe and analyze significant differences in the behaviour of different pruning methods. The code is available at: https://github.com/IST-DASLab/sparse-imagenet-transfer.","lang":"eng"}],"related_material":{"record":[{"status":"public","id":"13074","relation":"dissertation_contains"}]},"ec_funded":1,"language":[{"iso":"eng"}],"publication_identifier":{"eissn":["2575-7075"]},"publication_status":"published","status":"public","type":"conference","conference":{"start_date":"2022-06-18","location":"New Orleans, LA, United States","end_date":"2022-06-24","name":"CVPR: Computer Vision and Pattern Recognition"},"_id":"12299","department":[{"_id":"DaAl"},{"_id":"ChLa"}],"date_updated":"2023-08-04T10:33:28Z"},{"month":"05","intvolume":" 23","scopus_import":"1","oa_version":"Published Version","abstract":[{"text":"Addressing fairness concerns about machine learning models is a crucial step towards their long-term adoption in real-world automated systems. While many approaches have been developed for training fair models from data, little is known about the robustness of these methods to data corruption. In this work we consider fairness-aware learning under worst-case data manipulations. We show that an adversary can in some situations force any learner to return an overly biased classifier, regardless of the sample size and with or without degrading\r\naccuracy, and that the strength of the excess bias increases for learning problems with underrepresented protected groups in the data. We also prove that our hardness results are tight up to constant factors. To this end, we study two natural learning algorithms that optimize for both accuracy and fairness and show that these algorithms enjoy guarantees that are order-optimal in terms of the corruption ratio and the protected groups frequencies in the large data\r\nlimit.","lang":"eng"}],"volume":23,"related_material":{"record":[{"id":"10799","status":"public","relation":"dissertation_contains"},{"id":"13241","status":"public","relation":"shorter_version"}]},"file":[{"creator":"kschuh","date_updated":"2022-07-12T15:08:28Z","file_size":551862,"date_created":"2022-07-12T15:08:28Z","file_name":"2022_JournalMachineLearningResearch_Konstantinov.pdf","access_level":"open_access","relation":"main_file","content_type":"application/pdf","checksum":"9cac897b54a0ddf3a553a2c33e88cfda","file_id":"11570","success":1}],"language":[{"iso":"eng"}],"publication_identifier":{"issn":["1532-4435"],"eissn":["1533-7928"]},"publication_status":"published","status":"public","keyword":["Fairness","robustness","data poisoning","trustworthy machine learning","PAC learning"],"type":"journal_article","article_type":"original","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"_id":"10802","department":[{"_id":"ChLa"}],"file_date_updated":"2022-07-12T15:08:28Z","ddc":["004"],"date_updated":"2023-09-26T10:44:37Z","quality_controlled":"1","publisher":"ML Research Press","oa":1,"acknowledgement":"The authors thank Eugenia Iofinova and Bernd Prach for providing feedback on early versions of this paper. This publication was made possible by an ETH AI Center postdoctoral fellowship to Nikola Konstantinov.","date_published":"2022-05-01T00:00:00Z","date_created":"2022-02-28T14:05:42Z","page":"1-60","day":"01","publication":"Journal of Machine Learning Research","has_accepted_license":"1","year":"2022","title":"Fairness-aware PAC learning from corrupted data","author":[{"last_name":"Konstantinov","full_name":"Konstantinov, Nikola H","first_name":"Nikola H","id":"4B9D76E4-F248-11E8-B48F-1D18A9856A87"},{"id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0002-4561-241X"}],"external_id":{"arxiv":["2102.06004"]},"article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"ista":"Konstantinov NH, Lampert C. 2022. Fairness-aware PAC learning from corrupted data. Journal of Machine Learning Research. 23, 1–60.","chicago":"Konstantinov, Nikola H, and Christoph Lampert. “Fairness-Aware PAC Learning from Corrupted Data.” Journal of Machine Learning Research. ML Research Press, 2022.","apa":"Konstantinov, N. H., & Lampert, C. (2022). Fairness-aware PAC learning from corrupted data. Journal of Machine Learning Research. ML Research Press.","ama":"Konstantinov NH, Lampert C. Fairness-aware PAC learning from corrupted data. Journal of Machine Learning Research. 2022;23:1-60.","short":"N.H. Konstantinov, C. Lampert, Journal of Machine Learning Research 23 (2022) 1–60.","ieee":"N. H. Konstantinov and C. Lampert, “Fairness-aware PAC learning from corrupted data,” Journal of Machine Learning Research, vol. 23. ML Research Press, pp. 1–60, 2022.","mla":"Konstantinov, Nikola H., and Christoph Lampert. “Fairness-Aware PAC Learning from Corrupted Data.” Journal of Machine Learning Research, vol. 23, ML Research Press, 2022, pp. 1–60."}},{"acknowledgement":"This paper is a shortened, workshop version of Konstantinov and Lampert (2021),\r\nhttps://arxiv.org/abs/2102.06004. For further results, including an analysis of algorithms achieving the lower bounds from this paper, we refer to the full version.","publisher":"ML Research Press","quality_controlled":"1","oa":1,"year":"2022","day":"01","publication":"Proceedings of Machine Learning Research","page":"59-83","date_published":"2022-12-01T00:00:00Z","date_created":"2023-07-16T22:01:13Z","citation":{"ama":"Konstantinov NH, Lampert C. On the impossibility of fairness-aware learning from corrupted data. In: Proceedings of Machine Learning Research. Vol 171. ML Research Press; 2022:59-83.","apa":"Konstantinov, N. H., & Lampert, C. (2022). On the impossibility of fairness-aware learning from corrupted data. In Proceedings of Machine Learning Research (Vol. 171, pp. 59–83). ML Research Press.","short":"N.H. Konstantinov, C. Lampert, in:, Proceedings of Machine Learning Research, ML Research Press, 2022, pp. 59–83.","ieee":"N. H. Konstantinov and C. Lampert, “On the impossibility of fairness-aware learning from corrupted data,” in Proceedings of Machine Learning Research, 2022, vol. 171, pp. 59–83.","mla":"Konstantinov, Nikola H., and Christoph Lampert. “On the Impossibility of Fairness-Aware Learning from Corrupted Data.” Proceedings of Machine Learning Research, vol. 171, ML Research Press, 2022, pp. 59–83.","ista":"Konstantinov NH, Lampert C. 2022. On the impossibility of fairness-aware learning from corrupted data. Proceedings of Machine Learning Research. vol. 171, 59–83.","chicago":"Konstantinov, Nikola H, and Christoph Lampert. “On the Impossibility of Fairness-Aware Learning from Corrupted Data.” In Proceedings of Machine Learning Research, 171:59–83. ML Research Press, 2022."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"last_name":"Konstantinov","full_name":"Konstantinov, Nikola H","id":"4B9D76E4-F248-11E8-B48F-1D18A9856A87","first_name":"Nikola H"},{"id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887"}],"article_processing_charge":"No","external_id":{"arxiv":["2102.06004"]},"title":"On the impossibility of fairness-aware learning from corrupted data","abstract":[{"text":"Addressing fairness concerns about machine learning models is a crucial step towards their long-term adoption in real-world automated systems. Many approaches for training fair models from data have been developed and an implicit assumption about such algorithms is that they are able to recover a fair model, despite potential historical biases in the data. In this work we show a number of impossibility results that indicate that there is no learning algorithm that can recover a fair model when a proportion of the dataset is subject to arbitrary manipulations. Specifically, we prove that there are situations in which an adversary can force any learner to return a biased classifier, with or without degrading accuracy, and that the strength of this bias increases for learning problems with underrepresented protected groups in the data. Our results emphasize on the importance of studying further data corruption models of various strength and of establishing stricter data collection practices for fairness-aware learning.","lang":"eng"}],"oa_version":"Preprint","scopus_import":"1","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2102.06004"}],"month":"12","intvolume":" 171","publication_identifier":{"eissn":["2640-3498"]},"publication_status":"published","language":[{"iso":"eng"}],"volume":171,"related_material":{"record":[{"relation":"extended_version","status":"public","id":"10802"}]},"_id":"13241","type":"conference","status":"public","date_updated":"2023-09-26T10:44:37Z","department":[{"_id":"ChLa"}]},{"ec_funded":1,"related_material":{"record":[{"relation":"part_of_dissertation","status":"public","id":"8724"},{"relation":"part_of_dissertation","status":"public","id":"10803"},{"status":"public","id":"10802","relation":"part_of_dissertation"},{"relation":"part_of_dissertation","id":"6590","status":"public"}]},"language":[{"iso":"eng"}],"file":[{"date_updated":"2022-03-06T11:42:54Z","file_size":4204905,"creator":"nkonstan","date_created":"2022-03-06T11:42:54Z","file_name":"thesis.pdf","content_type":"application/pdf","access_level":"open_access","relation":"main_file","checksum":"626bc523ae8822d20e635d0e2d95182e","file_id":"10823","success":1},{"date_updated":"2022-03-10T12:11:48Z","file_size":22841103,"creator":"nkonstan","date_created":"2022-03-06T11:42:57Z","file_name":"thesis.zip","content_type":"application/x-zip-compressed","access_level":"closed","relation":"source_file","checksum":"e2ca2b88350ac8ea1515b948885cbcb1","file_id":"10824"}],"degree_awarded":"PhD","publication_status":"published","publication_identifier":{"isbn":["978-3-99078-015-2"],"issn":["2663-337X"]},"month":"03","alternative_title":["ISTA Thesis"],"oa_version":"Published Version","abstract":[{"lang":"eng","text":"Because of the increasing popularity of machine learning methods, it is becoming important to understand the impact of learned components on automated decision-making systems and to guarantee that their consequences are beneficial to society. In other words, it is necessary to ensure that machine learning is sufficiently trustworthy to be used in real-world applications. This thesis studies two properties of machine learning models that are highly desirable for the\r\nsake of reliability: robustness and fairness. In the first part of the thesis we study the robustness of learning algorithms to training data corruption. Previous work has shown that machine learning models are vulnerable to a range\r\nof training set issues, varying from label noise through systematic biases to worst-case data manipulations. This is an especially relevant problem from a present perspective, since modern machine learning methods are particularly data hungry and therefore practitioners often have to rely on data collected from various external sources, e.g. from the Internet, from app users or via crowdsourcing. Naturally, such sources vary greatly in the quality and reliability of the\r\ndata they provide. With these considerations in mind, we study the problem of designing machine learning algorithms that are robust to corruptions in data coming from multiple sources. We show that, in contrast to the case of a single dataset with outliers, successful learning within this model is possible both theoretically and practically, even under worst-case data corruptions. The second part of this thesis deals with fairness-aware machine learning. There are multiple areas where machine learning models have shown promising results, but where careful considerations are required, in order to avoid discrimanative decisions taken by such learned components. Ensuring fairness can be particularly challenging, because real-world training datasets are expected to contain various forms of historical bias that may affect the learning process. In this thesis we show that data corruption can indeed render the problem of achieving fairness impossible, by tightly characterizing the theoretical limits of fair learning under worst-case data manipulations. However, assuming access to clean data, we also show how fairness-aware learning can be made practical in contexts beyond binary classification, in particular in the challenging learning to rank setting."}],"department":[{"_id":"GradSch"},{"_id":"ChLa"}],"file_date_updated":"2022-03-10T12:11:48Z","ddc":["000"],"date_updated":"2023-10-17T12:31:54Z","supervisor":[{"last_name":"Lampert","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"}],"keyword":["robustness","fairness","machine learning","PAC learning","adversarial learning"],"status":"public","type":"dissertation","_id":"10799","date_created":"2022-02-28T13:03:49Z","date_published":"2022-03-08T00:00:00Z","doi":"10.15479/at:ista:10799","page":"176","day":"08","year":"2022","has_accepted_license":"1","oa":1,"publisher":"Institute of Science and Technology Austria","title":"Robustness and fairness in machine learning","article_processing_charge":"No","author":[{"first_name":"Nikola H","id":"4B9D76E4-F248-11E8-B48F-1D18A9856A87","last_name":"Konstantinov","full_name":"Konstantinov, Nikola H"}],"user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","citation":{"ista":"Konstantinov NH. 2022. Robustness and fairness in machine learning. Institute of Science and Technology Austria.","chicago":"Konstantinov, Nikola H. “Robustness and Fairness in Machine Learning.” Institute of Science and Technology Austria, 2022. https://doi.org/10.15479/at:ista:10799.","short":"N.H. Konstantinov, Robustness and Fairness in Machine Learning, Institute of Science and Technology Austria, 2022.","ieee":"N. H. Konstantinov, “Robustness and fairness in machine learning,” Institute of Science and Technology Austria, 2022.","apa":"Konstantinov, N. H. (2022). Robustness and fairness in machine learning. Institute of Science and Technology Austria. https://doi.org/10.15479/at:ista:10799","ama":"Konstantinov NH. Robustness and fairness in machine learning. 2022. doi:10.15479/at:ista:10799","mla":"Konstantinov, Nikola H. Robustness and Fairness in Machine Learning. Institute of Science and Technology Austria, 2022, doi:10.15479/at:ista:10799."},"project":[{"_id":"2564DBCA-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"665385","name":"International IST Doctoral Program"}]},{"citation":{"ista":"Volhejn V, Lampert C. 2021. Does SGD implicitly optimize for smoothness? 42nd German Conference on Pattern Recognition. DAGM GCPR: German Conference on Pattern Recognition LNCS vol. 12544, 246–259.","chicago":"Volhejn, Vaclav, and Christoph Lampert. “Does SGD Implicitly Optimize for Smoothness?” In 42nd German Conference on Pattern Recognition, 12544:246–59. LNCS. Springer, 2021. https://doi.org/10.1007/978-3-030-71278-5_18.","ieee":"V. Volhejn and C. Lampert, “Does SGD implicitly optimize for smoothness?,” in 42nd German Conference on Pattern Recognition, Tübingen, Germany, 2021, vol. 12544, pp. 246–259.","short":"V. Volhejn, C. Lampert, in:, 42nd German Conference on Pattern Recognition, Springer, 2021, pp. 246–259.","ama":"Volhejn V, Lampert C. Does SGD implicitly optimize for smoothness? In: 42nd German Conference on Pattern Recognition. Vol 12544. LNCS. Springer; 2021:246-259. doi:10.1007/978-3-030-71278-5_18","apa":"Volhejn, V., & Lampert, C. (2021). Does SGD implicitly optimize for smoothness? In 42nd German Conference on Pattern Recognition (Vol. 12544, pp. 246–259). Tübingen, Germany: Springer. https://doi.org/10.1007/978-3-030-71278-5_18","mla":"Volhejn, Vaclav, and Christoph Lampert. “Does SGD Implicitly Optimize for Smoothness?” 42nd German Conference on Pattern Recognition, vol. 12544, Springer, 2021, pp. 246–59, doi:10.1007/978-3-030-71278-5_18."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_processing_charge":"No","author":[{"full_name":"Volhejn, Vaclav","last_name":"Volhejn","first_name":"Vaclav","id":"d5235fb4-7a6d-11eb-b254-f25d12d631a8"},{"id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","last_name":"Lampert"}],"title":"Does SGD implicitly optimize for smoothness?","year":"2021","has_accepted_license":"1","publication":"42nd German Conference on Pattern Recognition","day":"17","page":"246-259","date_created":"2021-03-01T09:01:16Z","doi":"10.1007/978-3-030-71278-5_18","date_published":"2021-03-17T00:00:00Z","oa":1,"publisher":"Springer","quality_controlled":"1","date_updated":"2022-08-12T07:28:47Z","ddc":["510"],"file_date_updated":"2022-08-12T07:27:58Z","department":[{"_id":"ChLa"}],"_id":"9210","series_title":"LNCS","conference":{"name":"DAGM GCPR: German Conference on Pattern Recognition ","start_date":"2020-09-28","end_date":"2020-10-01","location":"Tübingen, Germany"},"type":"conference","status":"public","publication_status":"published","publication_identifier":{"isbn":["9783030712778"],"eissn":["1611-3349"],"issn":["0302-9743"]},"language":[{"iso":"eng"}],"file":[{"content_type":"application/pdf","access_level":"open_access","relation":"main_file","file_id":"11820","checksum":"3e3628ab1cf658d82524963f808004ea","success":1,"date_updated":"2022-08-12T07:27:58Z","file_size":420234,"creator":"dernst","date_created":"2022-08-12T07:27:58Z","file_name":"2020_GCPR_submitted_Volhejn.pdf"}],"volume":12544,"abstract":[{"lang":"eng","text":"Modern neural networks can easily fit their training set perfectly. Surprisingly, despite being “overfit” in this way, they tend to generalize well to future data, thereby defying the classic bias–variance trade-off of machine learning theory. Of the many possible explanations, a prevalent one is that training by stochastic gradient descent (SGD) imposes an implicit bias that leads it to learn simple functions, and these simple functions generalize well. However, the specifics of this implicit bias are not well understood.\r\nIn this work, we explore the smoothness conjecture which states that SGD is implicitly biased towards learning functions that are smooth. We propose several measures to formalize the intuitive notion of smoothness, and we conduct experiments to determine whether SGD indeed implicitly optimizes for these measures. Our findings rule out the possibility that smoothness measures based on first-order derivatives are being implicitly enforced. They are supportive, though, of the smoothness conjecture for measures based on second-order derivatives."}],"oa_version":"Submitted Version","scopus_import":"1","intvolume":" 12544","month":"03"},{"oa_version":"Published Version","abstract":[{"text":"We study the inductive bias of two-layer ReLU networks trained by gradient flow. We identify a class of easy-to-learn (`orthogonally separable') datasets, and characterise the solution that ReLU networks trained on such datasets converge to. Irrespective of network width, the solution turns out to be a combination of two max-margin classifiers: one corresponding to the positive data subset and one corresponding to the negative data subset. The proof is based on the recently introduced concept of extremal sectors, for which we prove a number of properties in the context of orthogonal separability. In particular, we prove stationarity of activation patterns from some time onwards, which enables a reduction of the ReLU network to an ensemble of linear subnetworks.","lang":"eng"}],"month":"05","quality_controlled":"1","scopus_import":"1","main_file_link":[{"open_access":"1","url":"https://openreview.net/pdf?id=krz7T0xU9Z_"}],"oa":1,"day":"01","file":[{"content_type":"application/pdf","relation":"main_file","access_level":"open_access","file_id":"9417","checksum":"f34ff17017527db5ba6927f817bdd125","file_size":502356,"date_updated":"2021-05-24T11:15:57Z","creator":"bphuong","file_name":"iclr2021_conference.pdf","date_created":"2021-05-24T11:15:57Z"}],"publication":"9th International Conference on Learning Representations","language":[{"iso":"eng"}],"has_accepted_license":"1","publication_status":"published","year":"2021","related_material":{"record":[{"relation":"dissertation_contains","id":"9418","status":"public"}]},"date_published":"2021-05-01T00:00:00Z","date_created":"2021-05-24T11:16:46Z","_id":"9416","status":"public","type":"conference","conference":{"name":" ICLR: International Conference on Learning Representations","start_date":"2021-05-03","location":"Virtual","end_date":"2021-05-07"},"ddc":["000"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2023-09-07T13:29:50Z","citation":{"chicago":"Phuong, Mary, and Christoph Lampert. “The Inductive Bias of ReLU Networks on Orthogonally Separable Data.” In 9th International Conference on Learning Representations, 2021.","ista":"Phuong M, Lampert C. 2021. The inductive bias of ReLU networks on orthogonally separable data. 9th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","mla":"Phuong, Mary, and Christoph Lampert. “The Inductive Bias of ReLU Networks on Orthogonally Separable Data.” 9th International Conference on Learning Representations, 2021.","ama":"Phuong M, Lampert C. The inductive bias of ReLU networks on orthogonally separable data. In: 9th International Conference on Learning Representations. ; 2021.","apa":"Phuong, M., & Lampert, C. (2021). The inductive bias of ReLU networks on orthogonally separable data. In 9th International Conference on Learning Representations. Virtual.","short":"M. Phuong, C. Lampert, in:, 9th International Conference on Learning Representations, 2021.","ieee":"M. Phuong and C. Lampert, “The inductive bias of ReLU networks on orthogonally separable data,” in 9th International Conference on Learning Representations, Virtual, 2021."},"title":"The inductive bias of ReLU networks on orthogonally separable data","department":[{"_id":"GradSch"},{"_id":"ChLa"}],"file_date_updated":"2021-05-24T11:15:57Z","author":[{"id":"3EC6EE64-F248-11E8-B48F-1D18A9856A87","first_name":"Phuong","full_name":"Bui Thi Mai, Phuong","last_name":"Bui Thi Mai"},{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"}],"article_processing_charge":"No"},{"article_processing_charge":"No","external_id":{"arxiv":["2102.05996"]},"author":[{"id":"4B9D76E4-F248-11E8-B48F-1D18A9856A87","first_name":"Nikola H","last_name":"Konstantinov","full_name":"Konstantinov, Nikola H"},{"last_name":"Lampert","orcid":"0000-0002-4561-241X","full_name":"Lampert, Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"}],"title":"Fairness through regularization for learning to rank","department":[{"_id":"ChLa"}],"citation":{"ista":"Konstantinov NH, Lampert C. Fairness through regularization for learning to rank. arXiv, 2102.05996.","chicago":"Konstantinov, Nikola H, and Christoph Lampert. “Fairness through Regularization for Learning to Rank.” ArXiv, n.d. https://doi.org/10.48550/arXiv.2102.05996.","apa":"Konstantinov, N. H., & Lampert, C. (n.d.). Fairness through regularization for learning to rank. arXiv. https://doi.org/10.48550/arXiv.2102.05996","ama":"Konstantinov NH, Lampert C. Fairness through regularization for learning to rank. arXiv. doi:10.48550/arXiv.2102.05996","short":"N.H. Konstantinov, C. Lampert, ArXiv (n.d.).","ieee":"N. H. Konstantinov and C. Lampert, “Fairness through regularization for learning to rank,” arXiv. .","mla":"Konstantinov, Nikola H., and Christoph Lampert. “Fairness through Regularization for Learning to Rank.” ArXiv, 2102.05996, doi:10.48550/arXiv.2102.05996."},"date_updated":"2023-09-07T13:42:08Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"preprint","status":"public","_id":"10803","article_number":"2102.05996","date_created":"2022-02-28T14:13:59Z","related_material":{"record":[{"relation":"dissertation_contains","id":"10799","status":"public"}]},"date_published":"2021-06-07T00:00:00Z","doi":"10.48550/arXiv.2102.05996","publication_status":"submitted","year":"2021","language":[{"iso":"eng"}],"publication":"arXiv","day":"07","oa":1,"main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2102.05996"}],"month":"06","abstract":[{"text":"Given the abundance of applications of ranking in recent years, addressing fairness concerns around automated ranking systems becomes necessary for increasing the trust among end-users. Previous work on fair ranking has mostly focused on application-specific fairness notions, often tailored to online advertising, and it rarely considers learning as part of the process. In this work, we show how to transfer numerous fairness notions from binary classification to a learning to rank setting. Our formalism allows us to design methods for incorporating fairness objectives with provable generalization guarantees. An extensive experimental evaluation shows that our method can improve ranking fairness substantially with no or only little loss of model quality.","lang":"eng"}],"oa_version":"Preprint"},{"title":"Underspecification in deep learning","author":[{"id":"3EC6EE64-F248-11E8-B48F-1D18A9856A87","first_name":"Phuong","full_name":"Bui Thi Mai, Phuong","last_name":"Bui Thi Mai"}],"article_processing_charge":"No","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","citation":{"ista":"Phuong M. 2021. Underspecification in deep learning. Institute of Science and Technology Austria.","chicago":"Phuong, Mary. “Underspecification in Deep Learning.” Institute of Science and Technology Austria, 2021. https://doi.org/10.15479/AT:ISTA:9418.","ieee":"M. Phuong, “Underspecification in deep learning,” Institute of Science and Technology Austria, 2021.","short":"M. Phuong, Underspecification in Deep Learning, Institute of Science and Technology Austria, 2021.","ama":"Phuong M. Underspecification in deep learning. 2021. doi:10.15479/AT:ISTA:9418","apa":"Phuong, M. (2021). Underspecification in deep learning. Institute of Science and Technology Austria. https://doi.org/10.15479/AT:ISTA:9418","mla":"Phuong, Mary. Underspecification in Deep Learning. Institute of Science and Technology Austria, 2021, doi:10.15479/AT:ISTA:9418."},"date_published":"2021-05-30T00:00:00Z","doi":"10.15479/AT:ISTA:9418","date_created":"2021-05-24T13:06:23Z","page":"125","day":"30","has_accepted_license":"1","year":"2021","publisher":"Institute of Science and Technology Austria","oa":1,"file_date_updated":"2021-05-24T11:56:02Z","department":[{"_id":"GradSch"},{"_id":"ChLa"}],"ddc":["000"],"supervisor":[{"first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","last_name":"Lampert"}],"date_updated":"2023-09-08T11:11:12Z","status":"public","type":"dissertation","_id":"9418","related_material":{"record":[{"relation":"part_of_dissertation","status":"deleted","id":"7435"},{"id":"7481","status":"public","relation":"part_of_dissertation"},{"relation":"part_of_dissertation","id":"9416","status":"public"},{"status":"public","id":"7479","relation":"part_of_dissertation"}]},"file":[{"file_id":"9419","checksum":"4f0abe64114cfed264f9d36e8d1197e3","success":1,"content_type":"application/pdf","access_level":"open_access","relation":"main_file","date_created":"2021-05-24T11:22:29Z","file_name":"mph-thesis-v519-pdfimages.pdf","date_updated":"2021-05-24T11:22:29Z","file_size":2673905,"creator":"bphuong"},{"file_id":"9420","checksum":"f5699e876bc770a9b0df8345a77720a2","content_type":"application/zip","access_level":"closed","relation":"source_file","date_created":"2021-05-24T11:56:02Z","file_name":"thesis.zip","date_updated":"2021-05-24T11:56:02Z","file_size":92995100,"creator":"bphuong"}],"language":[{"iso":"eng"}],"publication_identifier":{"issn":["2663-337X"]},"publication_status":"published","degree_awarded":"PhD","month":"05","alternative_title":["ISTA Thesis"],"oa_version":"Published Version","abstract":[{"text":"Deep learning is best known for its empirical success across a wide range of applications\r\nspanning computer vision, natural language processing and speech. Of equal significance,\r\nthough perhaps less known, are its ramifications for learning theory: deep networks have\r\nbeen observed to perform surprisingly well in the high-capacity regime, aka the overfitting\r\nor underspecified regime. Classically, this regime on the far right of the bias-variance curve\r\nis associated with poor generalisation; however, recent experiments with deep networks\r\nchallenge this view.\r\n\r\nThis thesis is devoted to investigating various aspects of underspecification in deep learning.\r\nFirst, we argue that deep learning models are underspecified on two levels: a) any given\r\ntraining dataset can be fit by many different functions, and b) any given function can be\r\nexpressed by many different parameter configurations. We refer to the second kind of\r\nunderspecification as parameterisation redundancy and we precisely characterise its extent.\r\nSecond, we characterise the implicit criteria (the inductive bias) that guide learning in the\r\nunderspecified regime. Specifically, we consider a nonlinear but tractable classification\r\nsetting, and show that given the choice, neural networks learn classifiers with a large margin.\r\nThird, we consider learning scenarios where the inductive bias is not by itself sufficient to\r\ndeal with underspecification. We then study different ways of ‘tightening the specification’: i)\r\nIn the setting of representation learning with variational autoencoders, we propose a hand-\r\ncrafted regulariser based on mutual information. ii) In the setting of binary classification, we\r\nconsider soft-label (real-valued) supervision. We derive a generalisation bound for linear\r\nnetworks supervised in this way and verify that soft labels facilitate fast learning. Finally, we\r\nexplore an application of soft-label supervision to the training of multi-exit models.","lang":"eng"}],"acknowledged_ssus":[{"_id":"ScienComp"},{"_id":"CampIT"},{"_id":"E-Lib"}]},{"type":"book_chapter","status":"public","_id":"14987","article_processing_charge":"No","author":[{"full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"}],"editor":[{"first_name":"Katsushi","full_name":"Ikeuchi, Katsushi","last_name":"Ikeuchi"}],"department":[{"_id":"ChLa"}],"title":"Zero-Shot Learning","citation":{"mla":"Lampert, Christoph. “Zero-Shot Learning.” Computer Vision, edited by Katsushi Ikeuchi, 2nd ed., Springer, 2021, pp. 1395–97, doi:10.1007/978-3-030-63416-2_874.","apa":"Lampert, C. (2021). Zero-Shot Learning. In K. Ikeuchi (Ed.), Computer Vision (2nd ed., pp. 1395–1397). Cham: Springer. https://doi.org/10.1007/978-3-030-63416-2_874","ama":"Lampert C. Zero-Shot Learning. In: Ikeuchi K, ed. Computer Vision. 2nd ed. Cham: Springer; 2021:1395-1397. doi:10.1007/978-3-030-63416-2_874","ieee":"C. Lampert, “Zero-Shot Learning,” in Computer Vision, 2nd ed., K. Ikeuchi, Ed. Cham: Springer, 2021, pp. 1395–1397.","short":"C. Lampert, in:, K. Ikeuchi (Ed.), Computer Vision, 2nd ed., Springer, Cham, 2021, pp. 1395–1397.","chicago":"Lampert, Christoph. “Zero-Shot Learning.” In Computer Vision, edited by Katsushi Ikeuchi, 2nd ed., 1395–97. Cham: Springer, 2021. https://doi.org/10.1007/978-3-030-63416-2_874.","ista":"Lampert C. 2021.Zero-Shot Learning. In: Computer Vision. , 1395–1397."},"date_updated":"2024-02-19T10:59:04Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","edition":"2","quality_controlled":"1","publisher":"Springer","place":"Cham","month":"10","abstract":[{"text":"The goal of zero-shot learning is to construct a classifier that can identify object classes for which no training examples are available. When training data for some of the object classes is available but not for others, the name generalized zero-shot learning is commonly used.\r\nIn a wider sense, the phrase zero-shot is also used to describe other machine learning-based approaches that require no training data from the problem of interest, such as zero-shot action recognition or zero-shot machine translation.","lang":"eng"}],"oa_version":"None","page":"1395-1397","date_created":"2024-02-14T14:05:32Z","date_published":"2021-10-13T00:00:00Z","doi":"10.1007/978-3-030-63416-2_874","year":"2021","publication_status":"published","publication_identifier":{"isbn":["9783030634155"],"eisbn":["9783030634162"]},"publication":"Computer Vision","language":[{"iso":"eng"}],"day":"13"},{"status":"public","type":"preprint","tmp":{"short":"CC BY-SA (4.0)","image":"/images/cc_by_sa.png","legal_code_url":"https://creativecommons.org/licenses/by-sa/4.0/legalcode","name":"Creative Commons Attribution-ShareAlike 4.0 International Public License (CC BY-SA 4.0)"},"article_number":"2004.00642","_id":"8063","title":"Object-centric image generation with factored depths, locations, and appearances","department":[{"_id":"ChLa"}],"author":[{"first_name":"Titas","full_name":"Anciukevicius, Titas","last_name":"Anciukevicius"},{"orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","last_name":"Lampert","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"},{"id":"13C09E74-18D9-11E9-8878-32CFE5697425","first_name":"Paul M","orcid":"0000-0002-5198-7445","full_name":"Henderson, Paul M","last_name":"Henderson"}],"external_id":{"arxiv":["2004.00642"]},"article_processing_charge":"No","ddc":["004"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"mla":"Anciukevicius, Titas, et al. “Object-Centric Image Generation with Factored Depths, Locations, and Appearances.” ArXiv, 2004.00642.","ieee":"T. Anciukevicius, C. Lampert, and P. M. Henderson, “Object-centric image generation with factored depths, locations, and appearances,” arXiv. .","short":"T. Anciukevicius, C. Lampert, P.M. Henderson, ArXiv (n.d.).","ama":"Anciukevicius T, Lampert C, Henderson PM. Object-centric image generation with factored depths, locations, and appearances. arXiv.","apa":"Anciukevicius, T., Lampert, C., & Henderson, P. M. (n.d.). Object-centric image generation with factored depths, locations, and appearances. arXiv.","chicago":"Anciukevicius, Titas, Christoph Lampert, and Paul M Henderson. “Object-Centric Image Generation with Factored Depths, Locations, and Appearances.” ArXiv, n.d.","ista":"Anciukevicius T, Lampert C, Henderson PM. Object-centric image generation with factored depths, locations, and appearances. arXiv, 2004.00642."},"date_updated":"2021-01-12T08:16:44Z","month":"04","oa":1,"main_file_link":[{"url":"https://arxiv.org/abs/2004.00642","open_access":"1"}],"oa_version":"Preprint","abstract":[{"lang":"eng","text":"We present a generative model of images that explicitly reasons over the set\r\nof objects they show. Our model learns a structured latent representation that\r\nseparates objects from each other and from the background; unlike prior works,\r\nit explicitly represents the 2D position and depth of each object, as well as\r\nan embedding of its segmentation mask and appearance. The model can be trained\r\nfrom images alone in a purely unsupervised fashion without the need for object\r\nmasks or depth information. Moreover, it always generates complete objects,\r\neven though a significant fraction of training images contain occlusions.\r\nFinally, we show that our model can infer decompositions of novel images into\r\ntheir constituent objects, including accurate prediction of depth ordering and\r\nsegmentation of occluded parts."}],"date_published":"2020-04-01T00:00:00Z","license":"https://creativecommons.org/licenses/by-sa/4.0/","date_created":"2020-06-29T23:55:23Z","day":"01","publication":"arXiv","language":[{"iso":"eng"}],"publication_status":"submitted","year":"2020"},{"publication_status":"published","publication_identifier":{"isbn":["9781713829546"]},"language":[{"iso":"eng"}],"volume":33,"abstract":[{"lang":"eng","text":"A natural approach to generative modeling of videos is to represent them as a composition of moving objects. Recent works model a set of 2D sprites over a slowly-varying background, but without considering the underlying 3D scene that\r\ngives rise to them. We instead propose to model a video as the view seen while moving through a scene with multiple 3D objects and a 3D background. Our model is trained from monocular videos without any supervision, yet learns to\r\ngenerate coherent 3D scenes containing several moving objects. We conduct detailed experiments on two datasets, going beyond the visual complexity supported by state-of-the-art generative approaches. We evaluate our method on\r\ndepth-prediction and 3D object detection---tasks which cannot be addressed by those earlier works---and show it out-performs them even on 2D instance segmentation and tracking."}],"acknowledged_ssus":[{"_id":"ScienComp"}],"oa_version":"Preprint","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2007.06705"}],"intvolume":" 33","month":"07","date_updated":"2023-04-25T09:49:58Z","department":[{"_id":"ChLa"}],"_id":"8188","conference":{"start_date":"2020-12-06","end_date":"2020-12-12","location":"Vancouver, Canada","name":"NeurIPS: Neural Information Processing Systems"},"type":"conference","status":"public","year":"2020","publication":"34th Conference on Neural Information Processing Systems","day":"07","page":"3106–3117","date_created":"2020-07-31T16:59:19Z","date_published":"2020-07-07T00:00:00Z","acknowledgement":"This research was supported by the Scientific Service Units (SSU) of IST Austria through resources\r\nprovided by Scientific Computing (SciComp). PH is employed part-time by Blackford Analysis, but\r\nthey did not support this project in any way.","oa":1,"publisher":"Curran Associates","quality_controlled":"1","citation":{"ama":"Henderson PM, Lampert C. Unsupervised object-centric video generation and decomposition in 3D. In: 34th Conference on Neural Information Processing Systems. Vol 33. Curran Associates; 2020:3106–3117.","apa":"Henderson, P. M., & Lampert, C. (2020). Unsupervised object-centric video generation and decomposition in 3D. In 34th Conference on Neural Information Processing Systems (Vol. 33, pp. 3106–3117). Vancouver, Canada: Curran Associates.","short":"P.M. Henderson, C. Lampert, in:, 34th Conference on Neural Information Processing Systems, Curran Associates, 2020, pp. 3106–3117.","ieee":"P. M. Henderson and C. Lampert, “Unsupervised object-centric video generation and decomposition in 3D,” in 34th Conference on Neural Information Processing Systems, Vancouver, Canada, 2020, vol. 33, pp. 3106–3117.","mla":"Henderson, Paul M., and Christoph Lampert. “Unsupervised Object-Centric Video Generation and Decomposition in 3D.” 34th Conference on Neural Information Processing Systems, vol. 33, Curran Associates, 2020, pp. 3106–3117.","ista":"Henderson PM, Lampert C. 2020. Unsupervised object-centric video generation and decomposition in 3D. 34th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 33, 3106–3117.","chicago":"Henderson, Paul M, and Christoph Lampert. “Unsupervised Object-Centric Video Generation and Decomposition in 3D.” In 34th Conference on Neural Information Processing Systems, 33:3106–3117. Curran Associates, 2020."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","external_id":{"arxiv":["2007.06705"]},"article_processing_charge":"No","author":[{"last_name":"Henderson","full_name":"Henderson, Paul M","orcid":"0000-0002-5198-7445","first_name":"Paul M","id":"13C09E74-18D9-11E9-8878-32CFE5697425"},{"first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","last_name":"Lampert","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph"}],"title":"Unsupervised object-centric video generation and decomposition in 3D"},{"intvolume":" 128","month":"04","scopus_import":"1","oa_version":"Published Version","abstract":[{"lang":"eng","text":"We present a unified framework tackling two problems: class-specific 3D reconstruction from a single image, and generation of new 3D shape samples. These tasks have received considerable attention recently; however, most existing approaches rely on 3D supervision, annotation of 2D images with keypoints or poses, and/or training with multiple views of each object instance. Our framework is very general: it can be trained in similar settings to existing approaches, while also supporting weaker supervision. Importantly, it can be trained purely from 2D images, without pose annotations, and with only a single view per instance. We employ meshes as an output representation, instead of voxels used in most prior work. This allows us to reason over lighting parameters and exploit shading information during training, which previous 2D-supervised methods cannot. Thus, our method can learn to generate and reconstruct concave object classes. We evaluate our approach in various settings, showing that: (i) it learns to disentangle shape from pose and lighting; (ii) using shading in the loss improves performance compared to just silhouettes; (iii) when using a standard single white light, our model outperforms state-of-the-art 2D-supervised methods, both with and without pose supervision, thanks to exploiting shading cues; (iv) performance improves further when using multiple coloured lights, even approaching that of state-of-the-art 3D-supervised methods; (v) shapes produced by our model capture smooth surfaces and fine details better than voxel-based approaches; and (vi) our approach supports concave classes such as bathtubs and sofas, which methods based on silhouettes cannot learn."}],"volume":128,"language":[{"iso":"eng"}],"file":[{"file_id":"6973","checksum":"a0f05dd4f5f64e4f713d8d9d4b5b1e3f","content_type":"application/pdf","access_level":"open_access","relation":"main_file","date_created":"2019-10-25T10:28:29Z","file_name":"2019_CompVision_Henderson.pdf","date_updated":"2020-07-14T12:47:46Z","file_size":2243134,"creator":"dernst"}],"publication_status":"published","publication_identifier":{"eissn":["1573-1405"],"issn":["0920-5691"]},"status":"public","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"article_type":"original","type":"journal_article","_id":"6952","department":[{"_id":"ChLa"}],"file_date_updated":"2020-07-14T12:47:46Z","ddc":["004"],"date_updated":"2023-08-17T14:01:16Z","oa":1,"quality_controlled":"1","publisher":"Springer Nature","acknowledgement":"Open access funding provided by Institute of Science and Technology (IST Austria).","date_created":"2019-10-17T13:38:20Z","doi":"10.1007/s11263-019-01219-8","date_published":"2020-04-01T00:00:00Z","page":"835-854","publication":"International Journal of Computer Vision","day":"01","year":"2020","isi":1,"has_accepted_license":"1","project":[{"_id":"B67AFEDC-15C9-11EA-A837-991A96BB2854","name":"IST Austria Open Access Fund"}],"title":"Learning single-image 3D reconstruction by generative modelling of shape, pose and shading","external_id":{"isi":["000491042100002"],"arxiv":["1901.06447"]},"article_processing_charge":"Yes (via OA deal)","author":[{"first_name":"Paul M","id":"13C09E74-18D9-11E9-8878-32CFE5697425","full_name":"Henderson, Paul M","orcid":"0000-0002-5198-7445","last_name":"Henderson"},{"full_name":"Ferrari, Vittorio","last_name":"Ferrari","first_name":"Vittorio"}],"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","citation":{"chicago":"Henderson, Paul M, and Vittorio Ferrari. “Learning Single-Image 3D Reconstruction by Generative Modelling of Shape, Pose and Shading.” International Journal of Computer Vision. Springer Nature, 2020. https://doi.org/10.1007/s11263-019-01219-8.","ista":"Henderson PM, Ferrari V. 2020. Learning single-image 3D reconstruction by generative modelling of shape, pose and shading. International Journal of Computer Vision. 128, 835–854.","mla":"Henderson, Paul M., and Vittorio Ferrari. “Learning Single-Image 3D Reconstruction by Generative Modelling of Shape, Pose and Shading.” International Journal of Computer Vision, vol. 128, Springer Nature, 2020, pp. 835–54, doi:10.1007/s11263-019-01219-8.","ieee":"P. M. Henderson and V. Ferrari, “Learning single-image 3D reconstruction by generative modelling of shape, pose and shading,” International Journal of Computer Vision, vol. 128. Springer Nature, pp. 835–854, 2020.","short":"P.M. Henderson, V. Ferrari, International Journal of Computer Vision 128 (2020) 835–854.","ama":"Henderson PM, Ferrari V. Learning single-image 3D reconstruction by generative modelling of shape, pose and shading. International Journal of Computer Vision. 2020;128:835-854. doi:10.1007/s11263-019-01219-8","apa":"Henderson, P. M., & Ferrari, V. (2020). Learning single-image 3D reconstruction by generative modelling of shape, pose and shading. International Journal of Computer Vision. Springer Nature. https://doi.org/10.1007/s11263-019-01219-8"}},{"month":"03","main_file_link":[{"url":"https://arxiv.org/abs/2004.12623","open_access":"1"}],"scopus_import":1,"oa_version":"Preprint","abstract":[{"lang":"eng","text":"State-of-the-art detection systems are generally evaluated on their ability to exhaustively retrieve objects densely distributed in the image, across a wide variety of appearances and semantic categories. Orthogonal to this, many real-life object detection applications, for example in remote sensing, instead require dealing with large images that contain only a few small objects of a single class, scattered heterogeneously across the space. In addition, they are often subject to strict computational constraints, such as limited battery capacity and computing power.To tackle these more practical scenarios, we propose a novel flexible detection scheme that efficiently adapts to variable object sizes and densities: We rely on a sequence of detection stages, each of which has the ability to predict groups of objects as well as individuals. Similar to a detection cascade, this multi-stage architecture spares computational effort by discarding large irrelevant regions of the image early during the detection process. The ability to group objects provides further computational and memory savings, as it allows working with lower image resolutions in early stages, where groups are more easily detected than individuals, as they are more salient. We report experimental results on two aerial image datasets, and show that the proposed method is as accurate yet computationally more efficient than standard single-shot detectors, consistently across three different backbone architectures."}],"related_material":{"record":[{"relation":"dissertation_contains","id":"8331","status":"deleted"},{"relation":"dissertation_contains","id":"8390","status":"public"}]},"language":[{"iso":"eng"}],"publication_status":"published","publication_identifier":{"isbn":["9781728165530"]},"status":"public","conference":{"name":"WACV: Winter Conference on Applications of Computer Vision","location":" Snowmass Village, CO, United States","end_date":"2020-03-05","start_date":"2020-03-01"},"type":"conference","_id":"7936","department":[{"_id":"ChLa"}],"date_updated":"2023-09-07T13:16:17Z","oa":1,"quality_controlled":"1","publisher":"IEEE","date_created":"2020-06-07T22:00:53Z","doi":"10.1109/WACV45572.2020.9093288","date_published":"2020-03-01T00:00:00Z","publication":"IEEE Winter Conference on Applications of Computer Vision","day":"01","year":"2020","article_number":"1716-1725","title":"Localizing grouped instances for efficient detection in low-resource scenarios","article_processing_charge":"No","external_id":{"arxiv":["2004.12623"]},"author":[{"id":"3811D890-F248-11E8-B48F-1D18A9856A87","first_name":"Amélie","full_name":"Royer, Amélie","orcid":"0000-0002-8407-0705","last_name":"Royer"},{"orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"ista":"Royer A, Lampert C. 2020. Localizing grouped instances for efficient detection in low-resource scenarios. IEEE Winter Conference on Applications of Computer Vision. WACV: Winter Conference on Applications of Computer Vision, 1716–1725.","chicago":"Royer, Amélie, and Christoph Lampert. “Localizing Grouped Instances for Efficient Detection in Low-Resource Scenarios.” In IEEE Winter Conference on Applications of Computer Vision. IEEE, 2020. https://doi.org/10.1109/WACV45572.2020.9093288.","short":"A. Royer, C. Lampert, in:, IEEE Winter Conference on Applications of Computer Vision, IEEE, 2020.","ieee":"A. Royer and C. Lampert, “Localizing grouped instances for efficient detection in low-resource scenarios,” in IEEE Winter Conference on Applications of Computer Vision, Snowmass Village, CO, United States, 2020.","apa":"Royer, A., & Lampert, C. (2020). Localizing grouped instances for efficient detection in low-resource scenarios. In IEEE Winter Conference on Applications of Computer Vision. Snowmass Village, CO, United States: IEEE. https://doi.org/10.1109/WACV45572.2020.9093288","ama":"Royer A, Lampert C. Localizing grouped instances for efficient detection in low-resource scenarios. In: IEEE Winter Conference on Applications of Computer Vision. IEEE; 2020. doi:10.1109/WACV45572.2020.9093288","mla":"Royer, Amélie, and Christoph Lampert. “Localizing Grouped Instances for Efficient Detection in Low-Resource Scenarios.” IEEE Winter Conference on Applications of Computer Vision, 1716–1725, IEEE, 2020, doi:10.1109/WACV45572.2020.9093288."}},{"abstract":[{"lang":"eng","text":"Fine-tuning is a popular way of exploiting knowledge contained in a pre-trained convolutional network for a new visual recognition task. However, the orthogonal setting of transferring knowledge from a pretrained network to a visually different yet semantically close source is rarely considered: This commonly happens with real-life data, which is not necessarily as clean as the training source (noise, geometric transformations, different modalities, etc.).To tackle such scenarios, we introduce a new, generalized form of fine-tuning, called flex-tuning, in which any individual unit (e.g. layer) of a network can be tuned, and the most promising one is chosen automatically. In order to make the method appealing for practical use, we propose two lightweight and faster selection procedures that prove to be good approximations in practice. We study these selection criteria empirically across a variety of domain shifts and data scarcity scenarios, and show that fine-tuning individual units, despite its simplicity, yields very good results as an adaptation technique. As it turns out, in contrast to common practice, rather than the last fully-connected unit it is best to tune an intermediate or early one in many domain- shift scenarios, which is accurately detected by flex-tuning."}],"oa_version":"Preprint","main_file_link":[{"url":"http://arxiv.org/abs/2008.11995","open_access":"1"}],"scopus_import":"1","month":"03","publication_status":"published","publication_identifier":{"isbn":["9781728165530"]},"language":[{"iso":"eng"}],"related_material":{"record":[{"relation":"dissertation_contains","id":"8331","status":"deleted"},{"status":"public","id":"8390","relation":"dissertation_contains"}]},"_id":"7937","conference":{"location":"Snowmass Village, CO, United States","end_date":"2020-03-05","start_date":"2020-03-01","name":"WACV: Winter Conference on Applications of Computer Vision"},"type":"conference","status":"public","date_updated":"2023-09-07T13:16:17Z","department":[{"_id":"ChLa"}],"oa":1,"quality_controlled":"1","publisher":"IEEE","year":"2020","publication":"2020 IEEE Winter Conference on Applications of Computer Vision","day":"01","date_created":"2020-06-07T22:00:53Z","doi":"10.1109/WACV45572.2020.9093635","date_published":"2020-03-01T00:00:00Z","article_number":"2180-2189","citation":{"ama":"Royer A, Lampert C. A flexible selection scheme for minimum-effort transfer learning. In: 2020 IEEE Winter Conference on Applications of Computer Vision. IEEE; 2020. doi:10.1109/WACV45572.2020.9093635","apa":"Royer, A., & Lampert, C. (2020). A flexible selection scheme for minimum-effort transfer learning. In 2020 IEEE Winter Conference on Applications of Computer Vision. Snowmass Village, CO, United States: IEEE. https://doi.org/10.1109/WACV45572.2020.9093635","short":"A. Royer, C. Lampert, in:, 2020 IEEE Winter Conference on Applications of Computer Vision, IEEE, 2020.","ieee":"A. Royer and C. Lampert, “A flexible selection scheme for minimum-effort transfer learning,” in 2020 IEEE Winter Conference on Applications of Computer Vision, Snowmass Village, CO, United States, 2020.","mla":"Royer, Amélie, and Christoph Lampert. “A Flexible Selection Scheme for Minimum-Effort Transfer Learning.” 2020 IEEE Winter Conference on Applications of Computer Vision, 2180–2189, IEEE, 2020, doi:10.1109/WACV45572.2020.9093635.","ista":"Royer A, Lampert C. 2020. A flexible selection scheme for minimum-effort transfer learning. 2020 IEEE Winter Conference on Applications of Computer Vision. WACV: Winter Conference on Applications of Computer Vision, 2180–2189.","chicago":"Royer, Amélie, and Christoph Lampert. “A Flexible Selection Scheme for Minimum-Effort Transfer Learning.” In 2020 IEEE Winter Conference on Applications of Computer Vision. IEEE, 2020. https://doi.org/10.1109/WACV45572.2020.9093635."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_processing_charge":"No","external_id":{"arxiv":["2008.11995"]},"author":[{"first_name":"Amélie","id":"3811D890-F248-11E8-B48F-1D18A9856A87","full_name":"Royer, Amélie","orcid":"0000-0002-8407-0705","last_name":"Royer"},{"full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"}],"title":"A flexible selection scheme for minimum-effort transfer learning"},{"related_material":{"record":[{"relation":"dissertation_contains","id":"8331","status":"deleted"},{"relation":"dissertation_contains","id":"8390","status":"public"}]},"language":[{"iso":"eng"}],"publication_identifier":{"isbn":["9783030306717"]},"publication_status":"published","month":"01","scopus_import":"1","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/1711.05139"}],"oa_version":"Preprint","abstract":[{"text":"Image translation refers to the task of mapping images from a visual domain to another. Given two unpaired collections of images, we aim to learn a mapping between the corpus-level style of each collection, while preserving semantic content shared across the two domains. We introduce xgan, a dual adversarial auto-encoder, which captures a shared representation of the common domain semantic content in an unsupervised way, while jointly learning the domain-to-domain image translations in both directions. We exploit ideas from the domain adaptation literature and define a semantic consistency loss which encourages the learned embedding to preserve semantics shared across domains. We report promising qualitative results for the task of face-to-cartoon translation. The cartoon dataset we collected for this purpose, “CartoonSet”, is also publicly available as a new benchmark for semantic style transfer at https://google.github.io/cartoonset/index.html.","lang":"eng"}],"department":[{"_id":"ChLa"}],"date_updated":"2023-09-07T13:16:18Z","status":"public","type":"book_chapter","_id":"8092","date_published":"2020-01-08T00:00:00Z","doi":"10.1007/978-3-030-30671-7_3","date_created":"2020-07-05T22:00:46Z","page":"33-49","day":"08","publication":"Domain Adaptation for Visual Understanding","year":"2020","quality_controlled":"1","publisher":"Springer Nature","oa":1,"editor":[{"last_name":"Singh","full_name":"Singh, Richa","first_name":"Richa"},{"last_name":"Vatsa","full_name":"Vatsa, Mayank","first_name":"Mayank"},{"full_name":"Patel, Vishal M.","last_name":"Patel","first_name":"Vishal M."},{"first_name":"Nalini","last_name":"Ratha","full_name":"Ratha, Nalini"}],"title":"XGAN: Unsupervised image-to-image translation for many-to-many mappings","author":[{"id":"3811D890-F248-11E8-B48F-1D18A9856A87","first_name":"Amélie","full_name":"Royer, Amélie","orcid":"0000-0002-8407-0705","last_name":"Royer"},{"last_name":"Bousmalis","full_name":"Bousmalis, Konstantinos","first_name":"Konstantinos"},{"full_name":"Gouws, Stephan","last_name":"Gouws","first_name":"Stephan"},{"first_name":"Fred","last_name":"Bertsch","full_name":"Bertsch, Fred"},{"last_name":"Mosseri","full_name":"Mosseri, Inbar","first_name":"Inbar"},{"first_name":"Forrester","full_name":"Cole, Forrester","last_name":"Cole"},{"full_name":"Murphy, Kevin","last_name":"Murphy","first_name":"Kevin"}],"external_id":{"arxiv":["1711.05139"]},"article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"chicago":"Royer, Amélie, Konstantinos Bousmalis, Stephan Gouws, Fred Bertsch, Inbar Mosseri, Forrester Cole, and Kevin Murphy. “XGAN: Unsupervised Image-to-Image Translation for Many-to-Many Mappings.” In Domain Adaptation for Visual Understanding, edited by Richa Singh, Mayank Vatsa, Vishal M. Patel, and Nalini Ratha, 33–49. Springer Nature, 2020. https://doi.org/10.1007/978-3-030-30671-7_3.","ista":"Royer A, Bousmalis K, Gouws S, Bertsch F, Mosseri I, Cole F, Murphy K. 2020.XGAN: Unsupervised image-to-image translation for many-to-many mappings. In: Domain Adaptation for Visual Understanding. , 33–49.","mla":"Royer, Amélie, et al. “XGAN: Unsupervised Image-to-Image Translation for Many-to-Many Mappings.” Domain Adaptation for Visual Understanding, edited by Richa Singh et al., Springer Nature, 2020, pp. 33–49, doi:10.1007/978-3-030-30671-7_3.","short":"A. Royer, K. Bousmalis, S. Gouws, F. Bertsch, I. Mosseri, F. Cole, K. Murphy, in:, R. Singh, M. Vatsa, V.M. Patel, N. Ratha (Eds.), Domain Adaptation for Visual Understanding, Springer Nature, 2020, pp. 33–49.","ieee":"A. Royer et al., “XGAN: Unsupervised image-to-image translation for many-to-many mappings,” in Domain Adaptation for Visual Understanding, R. Singh, M. Vatsa, V. M. Patel, and N. Ratha, Eds. Springer Nature, 2020, pp. 33–49.","apa":"Royer, A., Bousmalis, K., Gouws, S., Bertsch, F., Mosseri, I., Cole, F., & Murphy, K. (2020). XGAN: Unsupervised image-to-image translation for many-to-many mappings. In R. Singh, M. Vatsa, V. M. Patel, & N. Ratha (Eds.), Domain Adaptation for Visual Understanding (pp. 33–49). Springer Nature. https://doi.org/10.1007/978-3-030-30671-7_3","ama":"Royer A, Bousmalis K, Gouws S, et al. XGAN: Unsupervised image-to-image translation for many-to-many mappings. In: Singh R, Vatsa M, Patel VM, Ratha N, eds. Domain Adaptation for Visual Understanding. Springer Nature; 2020:33-49. doi:10.1007/978-3-030-30671-7_3"}},{"status":"public","conference":{"name":"ICLR: International Conference on Learning Representations","location":"Online","end_date":"2020-04-30","start_date":"2020-04-27"},"type":"conference","_id":"7481","department":[{"_id":"ChLa"}],"file_date_updated":"2020-07-14T12:47:59Z","title":"Functional vs. parametric equivalence of ReLU networks","article_processing_charge":"No","author":[{"first_name":"Phuong","id":"3EC6EE64-F248-11E8-B48F-1D18A9856A87","last_name":"Bui Thi Mai","full_name":"Bui Thi Mai, Phuong"},{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ddc":["000"],"date_updated":"2023-09-07T13:29:50Z","citation":{"chicago":"Phuong, Mary, and Christoph Lampert. “Functional vs. Parametric Equivalence of ReLU Networks.” In 8th International Conference on Learning Representations, 2020.","ista":"Phuong M, Lampert C. 2020. Functional vs. parametric equivalence of ReLU networks. 8th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","mla":"Phuong, Mary, and Christoph Lampert. “Functional vs. Parametric Equivalence of ReLU Networks.” 8th International Conference on Learning Representations, 2020.","apa":"Phuong, M., & Lampert, C. (2020). Functional vs. parametric equivalence of ReLU networks. In 8th International Conference on Learning Representations. Online.","ama":"Phuong M, Lampert C. Functional vs. parametric equivalence of ReLU networks. In: 8th International Conference on Learning Representations. ; 2020.","short":"M. Phuong, C. Lampert, in:, 8th International Conference on Learning Representations, 2020.","ieee":"M. Phuong and C. Lampert, “Functional vs. parametric equivalence of ReLU networks,” in 8th International Conference on Learning Representations, Online, 2020."},"month":"04","oa":1,"quality_controlled":"1","oa_version":"Published Version","abstract":[{"text":"We address the following question: How redundant is the parameterisation of ReLU networks? Specifically, we consider transformations of the weight space which leave the function implemented by the network intact. Two such transformations are known for feed-forward architectures: permutation of neurons within a layer, and positive scaling of all incoming weights of a neuron coupled with inverse scaling of its outgoing weights. In this work, we show for architectures with non-increasing widths that permutation and scaling are in fact the only function-preserving weight transformations. For any eligible architecture we give an explicit construction of a neural network such that any other network that implements the same function can be obtained from the original one by the application of permutations and rescaling. The proof relies on a geometric understanding of boundaries between linear regions of ReLU networks, and we hope the developed mathematical tools are of independent interest.","lang":"eng"}],"date_created":"2020-02-11T09:07:37Z","related_material":{"record":[{"id":"9418","status":"public","relation":"dissertation_contains"}],"link":[{"url":"https://iclr.cc/virtual_2020/poster_Bylx-TNKvH.html","relation":"supplementary_material"}]},"date_published":"2020-04-26T00:00:00Z","language":[{"iso":"eng"}],"publication":"8th International Conference on Learning Representations","file":[{"file_size":405469,"date_updated":"2020-07-14T12:47:59Z","creator":"bphuong","file_name":"main.pdf","date_created":"2020-02-11T09:07:27Z","content_type":"application/pdf","relation":"main_file","access_level":"open_access","file_id":"7482","checksum":"8d372ea5defd8cb8fdc430111ed754a9"}],"day":"26","year":"2020","publication_status":"published","has_accepted_license":"1"}]