[{"article_processing_charge":"No","external_id":{"arxiv":["1806.10823"],"pmid":[" 30728300"],"isi":["000459074400013"]},"author":[{"first_name":"Moritz","id":"29E0800A-F248-11E8-B48F-1D18A9856A87","full_name":"Lang, Moritz","last_name":"Lang"},{"last_name":"Shkolnikov","orcid":"0000-0002-4310-178X","full_name":"Shkolnikov, Mikhail","id":"35084A62-F248-11E8-B48F-1D18A9856A87","first_name":"Mikhail"}],"title":"Harmonic dynamics of the Abelian sandpile","citation":{"mla":"Lang, Moritz, and Mikhail Shkolnikov. “Harmonic Dynamics of the Abelian Sandpile.” Proceedings of the National Academy of Sciences, vol. 116, no. 8, National Academy of Sciences, 2019, pp. 2821–30, doi:10.1073/pnas.1812015116.","ama":"Lang M, Shkolnikov M. Harmonic dynamics of the Abelian sandpile. Proceedings of the National Academy of Sciences. 2019;116(8):2821-2830. doi:10.1073/pnas.1812015116","apa":"Lang, M., & Shkolnikov, M. (2019). Harmonic dynamics of the Abelian sandpile. Proceedings of the National Academy of Sciences. National Academy of Sciences. https://doi.org/10.1073/pnas.1812015116","ieee":"M. Lang and M. Shkolnikov, “Harmonic dynamics of the Abelian sandpile,” Proceedings of the National Academy of Sciences, vol. 116, no. 8. National Academy of Sciences, pp. 2821–2830, 2019.","short":"M. Lang, M. Shkolnikov, Proceedings of the National Academy of Sciences 116 (2019) 2821–2830.","chicago":"Lang, Moritz, and Mikhail Shkolnikov. “Harmonic Dynamics of the Abelian Sandpile.” Proceedings of the National Academy of Sciences. National Academy of Sciences, 2019. https://doi.org/10.1073/pnas.1812015116.","ista":"Lang M, Shkolnikov M. 2019. Harmonic dynamics of the Abelian sandpile. Proceedings of the National Academy of Sciences. 116(8), 2821–2830."},"user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","oa":1,"quality_controlled":"1","publisher":"National Academy of Sciences","acknowledgement":"M.L. is grateful to the members of the C Guet and G Tkacik groups for valuable comments and support. M.S. is grateful to Nikita Kalinin for inspiring communications.\r\n","page":"2821-2830","date_created":"2018-12-11T11:45:08Z","date_published":"2019-02-19T00:00:00Z","doi":"10.1073/pnas.1812015116","year":"2019","isi":1,"publication":"Proceedings of the National Academy of Sciences","day":"19","type":"journal_article","article_type":"original","status":"public","_id":"196","department":[{"_id":"CaGu"},{"_id":"GaTk"},{"_id":"TaHa"}],"date_updated":"2023-09-11T14:09:34Z","main_file_link":[{"open_access":"1","url":"https://doi.org/10.1073/pnas.1812015116"}],"scopus_import":"1","intvolume":" 116","month":"02","abstract":[{"lang":"eng","text":"The abelian sandpile serves as a model to study self-organized criticality, a phenomenon occurring in biological, physical and social processes. The identity of the abelian group is a fractal composed of self-similar patches, and its limit is subject of extensive collaborative research. Here, we analyze the evolution of the sandpile identity under harmonic fields of different orders. We show that this evolution corresponds to periodic cycles through the abelian group characterized by the smooth transformation and apparent conservation of the patches constituting the identity. The dynamics induced by second and third order harmonics resemble smooth stretchings, respectively translations, of the identity, while the ones induced by fourth order harmonics resemble magnifications and rotations. Starting with order three, the dynamics pass through extended regions of seemingly random configurations which spontaneously reassemble into accentuated patterns. We show that the space of harmonic functions projects to the extended analogue of the sandpile group, thus providing a set of universal coordinates identifying configurations between different domains. Since the original sandpile group is a subgroup of the extended one, this directly implies that it admits a natural renormalization. Furthermore, we show that the harmonic fields can be induced by simple Markov processes, and that the corresponding stochastic dynamics show remarkable robustness over hundreds of periods. Finally, we encode information into seemingly random configurations, and decode this information with an algorithm requiring minimal prior knowledge. Our results suggest that harmonic fields might split the sandpile group into sub-sets showing different critical coefficients, and that it might be possible to extend the fractal structure of the identity beyond the boundaries of its domain. "}],"pmid":1,"oa_version":"Published Version","issue":"8","volume":116,"related_material":{"link":[{"url":"https://ist.ac.at/en/news/famous-sandpile-model-shown-to-move-like-a-traveling-sand-dune/","relation":"press_release","description":"News on IST Webpage"}]},"publication_status":"published","publication_identifier":{"eissn":["1091-6490"]},"language":[{"iso":"eng"}]},{"citation":{"ista":"Locatello F, Tschannen M, Bauer S, Rätsch G, Schölkopf B, Bachem O. 2019. Disentangling factors of variation using few labels. 8th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","chicago":"Locatello, Francesco, Michael Tschannen, Stefan Bauer, Gunnar Rätsch, Bernhard Schölkopf, and Olivier Bachem. “Disentangling Factors of Variation Using Few Labels.” In 8th International Conference on Learning Representations, 2019.","short":"F. Locatello, M. Tschannen, S. Bauer, G. Rätsch, B. Schölkopf, O. Bachem, in:, 8th International Conference on Learning Representations, 2019.","ieee":"F. Locatello, M. Tschannen, S. Bauer, G. Rätsch, B. Schölkopf, and O. Bachem, “Disentangling factors of variation using few labels,” in 8th International Conference on Learning Representations, Virtual, 2019.","ama":"Locatello F, Tschannen M, Bauer S, Rätsch G, Schölkopf B, Bachem O. Disentangling factors of variation using few labels. In: 8th International Conference on Learning Representations. ; 2019.","apa":"Locatello, F., Tschannen, M., Bauer, S., Rätsch, G., Schölkopf, B., & Bachem, O. (2019). Disentangling factors of variation using few labels. In 8th International Conference on Learning Representations. Virtual.","mla":"Locatello, Francesco, et al. “Disentangling Factors of Variation Using Few Labels.” 8th International Conference on Learning Representations, 2019."},"date_updated":"2023-09-12T07:01:34Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","extern":"1","external_id":{"arxiv":["1905.01258"]},"article_processing_charge":"No","author":[{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco","last_name":"Locatello","full_name":"Locatello, Francesco","orcid":"0000-0002-4850-0683"},{"first_name":"Michael","last_name":"Tschannen","full_name":"Tschannen, Michael"},{"full_name":"Bauer, Stefan","last_name":"Bauer","first_name":"Stefan"},{"first_name":"Gunnar","last_name":"Rätsch","full_name":"Rätsch, Gunnar"},{"first_name":"Bernhard","last_name":"Schölkopf","full_name":"Schölkopf, Bernhard"},{"last_name":"Bachem","full_name":"Bachem, Olivier","first_name":"Olivier"}],"department":[{"_id":"FrLo"}],"title":"Disentangling factors of variation using few labels","_id":"14184","conference":{"start_date":"2020-04-26","end_date":"2020-05-01","location":"Virtual","name":"ICLR: International Conference on Learning Representations"},"type":"conference","status":"public","publication_status":"published","year":"2019","language":[{"iso":"eng"}],"publication":"8th International Conference on Learning Representations","day":"20","date_created":"2023-08-22T14:06:37Z","date_published":"2019-12-20T00:00:00Z","abstract":[{"lang":"eng","text":"Learning disentangled representations is considered a cornerstone problem in\r\nrepresentation learning. Recently, Locatello et al. (2019) demonstrated that\r\nunsupervised disentanglement learning without inductive biases is theoretically\r\nimpossible and that existing inductive biases and unsupervised methods do not\r\nallow to consistently learn disentangled representations. However, in many\r\npractical settings, one might have access to a limited amount of supervision,\r\nfor example through manual labeling of (some) factors of variation in a few\r\ntraining examples. In this paper, we investigate the impact of such supervision\r\non state-of-the-art disentanglement methods and perform a large scale study,\r\ntraining over 52000 models under well-defined and reproducible experimental\r\nconditions. We observe that a small number of labeled examples (0.01--0.5\\% of\r\nthe data set), with potentially imprecise and incomplete labels, is sufficient\r\nto perform model selection on state-of-the-art unsupervised models. Further, we\r\ninvestigate the benefit of incorporating supervision into the training process.\r\nOverall, we empirically validate that with little and imprecise supervision it\r\nis possible to reliably learn disentangled representations."}],"oa_version":"Preprint","oa":1,"main_file_link":[{"url":"https://arxiv.org/abs/1905.01258","open_access":"1"}],"quality_controlled":"1","scopus_import":"1","month":"12"},{"publication_status":"published","language":[{"iso":"eng"}],"volume":115,"abstract":[{"text":"We consider the problem of recovering a common latent source with independent\r\ncomponents from multiple views. This applies to settings in which a variable is\r\nmeasured with multiple experimental modalities, and where the goal is to\r\nsynthesize the disparate measurements into a single unified representation. We\r\nconsider the case that the observed views are a nonlinear mixing of\r\ncomponent-wise corruptions of the sources. When the views are considered\r\nseparately, this reduces to nonlinear Independent Component Analysis (ICA) for\r\nwhich it is provably impossible to undo the mixing. We present novel\r\nidentifiability proofs that this is possible when the multiple views are\r\nconsidered jointly, showing that the mixing can theoretically be undone using\r\nfunction approximators such as deep neural networks. In contrast to known\r\nidentifiability results for nonlinear ICA, we prove that independent latent\r\nsources with arbitrary mixing can be recovered as long as multiple,\r\nsufficiently different noisy views are available.","lang":"eng"}],"oa_version":"Preprint","scopus_import":"1","alternative_title":["PMLR"],"main_file_link":[{"url":"https://arxiv.org/abs/1905.06642","open_access":"1"}],"month":"05","intvolume":" 115","date_updated":"2023-09-12T08:07:38Z","extern":"1","department":[{"_id":"FrLo"}],"_id":"14189","type":"conference","conference":{"location":"Tel Aviv, Israel","end_date":"2019-07-25","start_date":"2019-07-22","name":"UAI: Uncertainty in Artificial Intelligence"},"status":"public","year":"2019","day":"16","publication":"Proceedings of the 35th Conference on Uncertainty in Artificial Intelligence","page":"217-227","date_published":"2019-05-16T00:00:00Z","date_created":"2023-08-22T14:08:35Z","quality_controlled":"1","publisher":"ML Research Press","oa":1,"citation":{"chicago":"Gresele, Luigi, Paul K. Rubenstein, Arash Mehrjou, Francesco Locatello, and Bernhard Schölkopf. “The Incomplete Rosetta Stone Problem: Identifiability Results for Multi-View Nonlinear ICA.” In Proceedings of the 35th Conference on Uncertainty in Artificial Intelligence, 115:217–27. ML Research Press, 2019.","ista":"Gresele L, Rubenstein PK, Mehrjou A, Locatello F, Schölkopf B. 2019. The incomplete Rosetta Stone problem: Identifiability results for multi-view nonlinear ICA. Proceedings of the 35th Conference on Uncertainty in Artificial Intelligence. UAI: Uncertainty in Artificial Intelligence, PMLR, vol. 115, 217–227.","mla":"Gresele, Luigi, et al. “The Incomplete Rosetta Stone Problem: Identifiability Results for Multi-View Nonlinear ICA.” Proceedings of the 35th Conference on Uncertainty in Artificial Intelligence, vol. 115, ML Research Press, 2019, pp. 217–27.","ama":"Gresele L, Rubenstein PK, Mehrjou A, Locatello F, Schölkopf B. The incomplete Rosetta Stone problem: Identifiability results for multi-view nonlinear ICA. In: Proceedings of the 35th Conference on Uncertainty in Artificial Intelligence. Vol 115. ML Research Press; 2019:217-227.","apa":"Gresele, L., Rubenstein, P. K., Mehrjou, A., Locatello, F., & Schölkopf, B. (2019). The incomplete Rosetta Stone problem: Identifiability results for multi-view nonlinear ICA. In Proceedings of the 35th Conference on Uncertainty in Artificial Intelligence (Vol. 115, pp. 217–227). Tel Aviv, Israel: ML Research Press.","short":"L. Gresele, P.K. Rubenstein, A. Mehrjou, F. Locatello, B. Schölkopf, in:, Proceedings of the 35th Conference on Uncertainty in Artificial Intelligence, ML Research Press, 2019, pp. 217–227.","ieee":"L. Gresele, P. K. Rubenstein, A. Mehrjou, F. Locatello, and B. Schölkopf, “The incomplete Rosetta Stone problem: Identifiability results for multi-view nonlinear ICA,” in Proceedings of the 35th Conference on Uncertainty in Artificial Intelligence, Tel Aviv, Israel, 2019, vol. 115, pp. 217–227."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"last_name":"Gresele","full_name":"Gresele, Luigi","first_name":"Luigi"},{"last_name":"Rubenstein","full_name":"Rubenstein, Paul K.","first_name":"Paul K."},{"full_name":"Mehrjou, Arash","last_name":"Mehrjou","first_name":"Arash"},{"first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","last_name":"Locatello","full_name":"Locatello, Francesco","orcid":"0000-0002-4850-0683"},{"last_name":"Schölkopf","full_name":"Schölkopf, Bernhard","first_name":"Bernhard"}],"article_processing_charge":"No","external_id":{"arxiv":["1905.06642"]},"title":"The incomplete Rosetta Stone problem: Identifiability results for multi-view nonlinear ICA"},{"_id":"14197","type":"conference","conference":{"name":"NeurIPS: Neural Information Processing Systems","start_date":"2019-12-08","location":"Vancouver, Canada","end_date":"2019-12-14"},"status":"public","date_updated":"2023-09-12T09:37:22Z","extern":"1","department":[{"_id":"FrLo"}],"abstract":[{"text":"Recently there has been a significant interest in learning disentangled\r\nrepresentations, as they promise increased interpretability, generalization to\r\nunseen scenarios and faster learning on downstream tasks. In this paper, we\r\ninvestigate the usefulness of different notions of disentanglement for\r\nimproving the fairness of downstream prediction tasks based on representations.\r\nWe consider the setting where the goal is to predict a target variable based on\r\nthe learned representation of high-dimensional observations (such as images)\r\nthat depend on both the target variable and an \\emph{unobserved} sensitive\r\nvariable. We show that in this setting both the optimal and empirical\r\npredictions can be unfair, even if the target variable and the sensitive\r\nvariable are independent. Analyzing the representations of more than\r\n\\num{12600} trained state-of-the-art disentangled models, we observe that\r\nseveral disentanglement scores are consistently correlated with increased\r\nfairness, suggesting that disentanglement may be a useful property to encourage\r\nfairness when sensitive variables are not observed.","lang":"eng"}],"oa_version":"Preprint","scopus_import":"1","main_file_link":[{"url":"https://arxiv.org/abs/1905.13662","open_access":"1"}],"month":"12","intvolume":" 32","publication_identifier":{"isbn":["9781713807933"]},"publication_status":"published","language":[{"iso":"eng"}],"volume":32,"citation":{"chicago":"Locatello, Francesco, Gabriele Abbati, Tom Rainforth, Stefan Bauer, Bernhard Schölkopf, and Olivier Bachem. “On the Fairness of Disentangled Representations.” In Advances in Neural Information Processing Systems, 32:14611–14624, 2019.","ista":"Locatello F, Abbati G, Rainforth T, Bauer S, Schölkopf B, Bachem O. 2019. On the fairness of disentangled representations. Advances in Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 32, 14611–14624.","mla":"Locatello, Francesco, et al. “On the Fairness of Disentangled Representations.” Advances in Neural Information Processing Systems, vol. 32, 2019, pp. 14611–14624.","short":"F. Locatello, G. Abbati, T. Rainforth, S. Bauer, B. Schölkopf, O. Bachem, in:, Advances in Neural Information Processing Systems, 2019, pp. 14611–14624.","ieee":"F. Locatello, G. Abbati, T. Rainforth, S. Bauer, B. Schölkopf, and O. Bachem, “On the fairness of disentangled representations,” in Advances in Neural Information Processing Systems, Vancouver, Canada, 2019, vol. 32, pp. 14611–14624.","ama":"Locatello F, Abbati G, Rainforth T, Bauer S, Schölkopf B, Bachem O. On the fairness of disentangled representations. In: Advances in Neural Information Processing Systems. Vol 32. ; 2019:14611–14624.","apa":"Locatello, F., Abbati, G., Rainforth, T., Bauer, S., Schölkopf, B., & Bachem, O. (2019). On the fairness of disentangled representations. In Advances in Neural Information Processing Systems (Vol. 32, pp. 14611–14624). Vancouver, Canada."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","last_name":"Locatello","orcid":"0000-0002-4850-0683","full_name":"Locatello, Francesco"},{"last_name":"Abbati","full_name":"Abbati, Gabriele","first_name":"Gabriele"},{"first_name":"Tom","last_name":"Rainforth","full_name":"Rainforth, Tom"},{"first_name":"Stefan","full_name":"Bauer, Stefan","last_name":"Bauer"},{"full_name":"Schölkopf, Bernhard","last_name":"Schölkopf","first_name":"Bernhard"},{"first_name":"Olivier","full_name":"Bachem, Olivier","last_name":"Bachem"}],"article_processing_charge":"No","external_id":{"arxiv":["1905.13662"]},"title":"On the fairness of disentangled representations","quality_controlled":"1","oa":1,"year":"2019","day":"08","publication":"Advances in Neural Information Processing Systems","page":"14611–14624","date_published":"2019-12-08T00:00:00Z","date_created":"2023-08-22T14:12:28Z"},{"volume":32,"publication_identifier":{"isbn":["9781713807933"]},"publication_status":"published","language":[{"iso":"eng"}],"scopus_import":"1","main_file_link":[{"url":"https://arxiv.org/abs/1901.10348","open_access":"1"}],"month":"12","intvolume":" 32","abstract":[{"lang":"eng","text":"A broad class of convex optimization problems can be formulated as a semidefinite program (SDP), minimization of a convex function over the positive-semidefinite cone subject to some affine constraints. The majority of classical SDP solvers are designed for the deterministic setting where problem data is readily available. In this setting, generalized conditional gradient methods (aka Frank-Wolfe-type methods) provide scalable solutions by leveraging the so-called linear minimization oracle instead of the projection onto the semidefinite cone. Most problems in machine learning and modern engineering applications, however, contain some degree of stochasticity. In this work, we propose the first conditional-gradient-type method for solving stochastic optimization problems under affine constraints. Our method guarantees O(k−1/3) convergence rate in expectation on the objective residual and O(k−5/12) on the feasibility gap."}],"oa_version":"Preprint","department":[{"_id":"FrLo"}],"date_updated":"2023-09-12T08:48:45Z","extern":"1","type":"conference","conference":{"end_date":"2019-12-14","location":"Vancouver, Canada","start_date":"2019-12-08","name":"NeurIPS: Neural Information Processing Systems"},"status":"public","_id":"14191","page":"14291–14301","date_published":"2019-12-29T00:00:00Z","date_created":"2023-08-22T14:09:35Z","year":"2019","day":"29","publication":"Advances in Neural Information Processing Systems","quality_controlled":"1","oa":1,"author":[{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco","orcid":"0000-0002-4850-0683","full_name":"Locatello, Francesco","last_name":"Locatello"},{"last_name":"Yurtsever","full_name":"Yurtsever, Alp","first_name":"Alp"},{"last_name":"Fercoq","full_name":"Fercoq, Olivier","first_name":"Olivier"},{"full_name":"Cevher, Volkan","last_name":"Cevher","first_name":"Volkan"}],"article_processing_charge":"No","external_id":{"arxiv":["1901.10348"]},"title":"Stochastic Frank-Wolfe for composite convex minimization","citation":{"ama":"Locatello F, Yurtsever A, Fercoq O, Cevher V. Stochastic Frank-Wolfe for composite convex minimization. In: Advances in Neural Information Processing Systems. Vol 32. ; 2019:14291–14301.","apa":"Locatello, F., Yurtsever, A., Fercoq, O., & Cevher, V. (2019). Stochastic Frank-Wolfe for composite convex minimization. In Advances in Neural Information Processing Systems (Vol. 32, pp. 14291–14301). Vancouver, Canada.","short":"F. Locatello, A. Yurtsever, O. Fercoq, V. Cevher, in:, Advances in Neural Information Processing Systems, 2019, pp. 14291–14301.","ieee":"F. Locatello, A. Yurtsever, O. Fercoq, and V. Cevher, “Stochastic Frank-Wolfe for composite convex minimization,” in Advances in Neural Information Processing Systems, Vancouver, Canada, 2019, vol. 32, pp. 14291–14301.","mla":"Locatello, Francesco, et al. “Stochastic Frank-Wolfe for Composite Convex Minimization.” Advances in Neural Information Processing Systems, vol. 32, 2019, pp. 14291–14301.","ista":"Locatello F, Yurtsever A, Fercoq O, Cevher V. 2019. Stochastic Frank-Wolfe for composite convex minimization. Advances in Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 32, 14291–14301.","chicago":"Locatello, Francesco, Alp Yurtsever, Olivier Fercoq, and Volkan Cevher. “Stochastic Frank-Wolfe for Composite Convex Minimization.” In Advances in Neural Information Processing Systems, 32:14291–14301, 2019."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87"}]