[{"scopus_import":"1","day":"20","month":"02","publication_identifier":{"issn":["0018-9448"],"eissn":["1557-9654"]},"article_processing_charge":"No","article_type":"original","quality_controlled":"1","project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"publication":"IEEE Transactions on Information Theory","citation":{"ista":"Esposito AR, Mondelli M. Concentration without independence via information measures. IEEE Transactions on Information Theory.","apa":"Esposito, A. R., & Mondelli, M. (n.d.). Concentration without independence via information measures. IEEE Transactions on Information Theory. IEEE. https://doi.org/10.1109/TIT.2024.3367767","ieee":"A. R. Esposito and M. Mondelli, “Concentration without independence via information measures,” IEEE Transactions on Information Theory. IEEE.","ama":"Esposito AR, Mondelli M. Concentration without independence via information measures. IEEE Transactions on Information Theory. doi:10.1109/TIT.2024.3367767","chicago":"Esposito, Amedeo Roberto, and Marco Mondelli. “Concentration without Independence via Information Measures.” IEEE Transactions on Information Theory. IEEE, n.d. https://doi.org/10.1109/TIT.2024.3367767.","mla":"Esposito, Amedeo Roberto, and Marco Mondelli. “Concentration without Independence via Information Measures.” IEEE Transactions on Information Theory, IEEE, doi:10.1109/TIT.2024.3367767.","short":"A.R. Esposito, M. Mondelli, IEEE Transactions on Information Theory (n.d.)."},"external_id":{"arxiv":["2303.07245"]},"language":[{"iso":"eng"}],"doi":"10.1109/TIT.2024.3367767","date_published":"2024-02-20T00:00:00Z","type":"journal_article","abstract":[{"lang":"eng","text":"We propose a novel approach to concentration for non-independent random variables. The main idea is to “pretend” that the random variables are independent and pay a multiplicative price measuring how far they are from actually being independent. This price is encapsulated in the Hellinger integral between the joint and the product of the marginals, which is then upper bounded leveraging tensorisation properties. Our bounds represent a natural generalisation of concentration inequalities in the presence of dependence: we recover exactly the classical bounds (McDiarmid’s inequality) when the random variables are independent. Furthermore, in a “large deviations” regime, we obtain the same decay in the probability as for the independent case, even when the random variables display non-trivial dependencies. To show this, we consider a number of applications of interest. First, we provide a bound for Markov chains with finite state space. Then, we consider the Simple Symmetric Random Walk, which is a non-contracting Markov chain, and a non-Markovian setting in which the stochastic process depends on its entire past. To conclude, we propose an application to Markov Chain Monte Carlo methods, where our approach leads to an improved lower bound on the minimum burn-in period required to reach a certain accuracy. In all of these settings, we provide a regime of parameters in which our bound fares better than what the state of the art can provide."}],"publication_status":"inpress","title":"Concentration without independence via information measures","status":"public","publisher":"IEEE","department":[{"_id":"MaMo"}],"year":"2024","_id":"15172","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_created":"2024-03-24T23:01:00Z","date_updated":"2024-03-25T07:15:51Z","oa_version":"None","author":[{"id":"9583e921-e1ad-11ec-9862-cef099626dc9","first_name":"Amedeo Roberto","last_name":"Esposito","full_name":"Esposito, Amedeo Roberto"},{"full_name":"Mondelli, Marco","id":"27EB676C-8706-11E9-9510-7717E6697425","orcid":"0000-0002-3242-7020","first_name":"Marco","last_name":"Mondelli"}],"related_material":{"record":[{"status":"public","relation":"earlier_version","id":"14922"}]}},{"file_date_updated":"2023-08-21T07:23:18Z","article_number":"99","date_updated":"2023-08-21T07:26:01Z","date_created":"2023-08-20T22:01:13Z","volume":261,"author":[{"full_name":"Resch, Nicolas","last_name":"Resch","first_name":"Nicolas"},{"full_name":"Yuan, Chen","last_name":"Yuan","first_name":"Chen"},{"full_name":"Zhang, Yihan","orcid":"0000-0002-6465-6258","id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c","last_name":"Zhang","first_name":"Yihan"}],"publication_status":"published","department":[{"_id":"MaMo"}],"publisher":"Schloss Dagstuhl - Leibniz-Zentrum für Informatik","year":"2023","acknowledgement":"Nicolas Resch: Research supported in part by ERC H2020 grant No.74079 (ALGSTRONGCRYPTO). Chen Yuan: Research supported in part by the National Key Research and Development Projects under Grant 2022YFA1004900 and Grant 2021YFE0109900, the National Natural Science Foundation of China under Grant 12101403 and Grant 12031011.\r\nAcknowledgements YZ is grateful to Shashank Vatedka, Diyuan Wu and Fengxing Zhu for inspiring discussions.","month":"07","publication_identifier":{"isbn":["9783959772785"],"issn":["1868-8969"]},"language":[{"iso":"eng"}],"conference":{"name":"ICALP: International Colloquium on Automata, Languages, and Programming","start_date":"2023-07-10","location":"Paderborn, Germany","end_date":"2023-07-14"},"doi":"10.4230/LIPIcs.ICALP.2023.99","quality_controlled":"1","oa":1,"tmp":{"name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png"},"external_id":{"arxiv":["2210.07754"]},"abstract":[{"text":"In this work we consider the list-decodability and list-recoverability of arbitrary q-ary codes, for all integer values of q ≥ 2. A code is called (p,L)_q-list-decodable if every radius pn Hamming ball contains less than L codewords; (p,𝓁,L)_q-list-recoverability is a generalization where we place radius pn Hamming balls on every point of a combinatorial rectangle with side length 𝓁 and again stipulate that there be less than L codewords.\r\nOur main contribution is to precisely calculate the maximum value of p for which there exist infinite families of positive rate (p,𝓁,L)_q-list-recoverable codes, the quantity we call the zero-rate threshold. Denoting this value by p_*, we in fact show that codes correcting a p_*+ε fraction of errors must have size O_ε(1), i.e., independent of n. Such a result is typically referred to as a \"Plotkin bound.\" To complement this, a standard random code with expurgation construction shows that there exist positive rate codes correcting a p_*-ε fraction of errors. We also follow a classical proof template (typically attributed to Elias and Bassalygo) to derive from the zero-rate threshold other tradeoffs between rate and decoding radius for list-decoding and list-recovery.\r\nTechnically, proving the Plotkin bound boils down to demonstrating the Schur convexity of a certain function defined on the q-simplex as well as the convexity of a univariate function derived from it. We remark that an earlier argument claimed similar results for q-ary list-decoding; however, we point out that this earlier proof is flawed.","lang":"eng"}],"alternative_title":["LIPIcs"],"type":"conference","oa_version":"Published Version","file":[{"content_type":"application/pdf","file_size":1141497,"creator":"dernst","access_level":"open_access","file_name":"2023_LIPIcsICALP_Resch.pdf","checksum":"a449143fec3fbebb092cb8ef3b53c226","success":1,"date_created":"2023-08-21T07:23:18Z","date_updated":"2023-08-21T07:23:18Z","relation":"main_file","file_id":"14091"}],"title":"Zero-rate thresholds and new capacity bounds for list-decoding and list-recovery","status":"public","ddc":["000"],"intvolume":" 261","_id":"14083","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","day":"01","has_accepted_license":"1","article_processing_charge":"Yes","scopus_import":"1","date_published":"2023-07-01T00:00:00Z","publication":"50th International Colloquium on Automata, Languages, and Programming","citation":{"ama":"Resch N, Yuan C, Zhang Y. Zero-rate thresholds and new capacity bounds for list-decoding and list-recovery. In: 50th International Colloquium on Automata, Languages, and Programming. Vol 261. Schloss Dagstuhl - Leibniz-Zentrum für Informatik; 2023. doi:10.4230/LIPIcs.ICALP.2023.99","ista":"Resch N, Yuan C, Zhang Y. 2023. Zero-rate thresholds and new capacity bounds for list-decoding and list-recovery. 50th International Colloquium on Automata, Languages, and Programming. ICALP: International Colloquium on Automata, Languages, and Programming, LIPIcs, vol. 261, 99.","ieee":"N. Resch, C. Yuan, and Y. Zhang, “Zero-rate thresholds and new capacity bounds for list-decoding and list-recovery,” in 50th International Colloquium on Automata, Languages, and Programming, Paderborn, Germany, 2023, vol. 261.","apa":"Resch, N., Yuan, C., & Zhang, Y. (2023). Zero-rate thresholds and new capacity bounds for list-decoding and list-recovery. In 50th International Colloquium on Automata, Languages, and Programming (Vol. 261). Paderborn, Germany: Schloss Dagstuhl - Leibniz-Zentrum für Informatik. https://doi.org/10.4230/LIPIcs.ICALP.2023.99","mla":"Resch, Nicolas, et al. “Zero-Rate Thresholds and New Capacity Bounds for List-Decoding and List-Recovery.” 50th International Colloquium on Automata, Languages, and Programming, vol. 261, 99, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023, doi:10.4230/LIPIcs.ICALP.2023.99.","short":"N. Resch, C. Yuan, Y. Zhang, in:, 50th International Colloquium on Automata, Languages, and Programming, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023.","chicago":"Resch, Nicolas, Chen Yuan, and Yihan Zhang. “Zero-Rate Thresholds and New Capacity Bounds for List-Decoding and List-Recovery.” In 50th International Colloquium on Automata, Languages, and Programming, Vol. 261. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023. https://doi.org/10.4230/LIPIcs.ICALP.2023.99."}},{"language":[{"iso":"eng"}],"doi":"10.1073/pnas.2302028120","project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"quality_controlled":"1","oa":1,"tmp":{"name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png"},"external_id":{"pmid":["37463204"]},"publication_identifier":{"eissn":["1091-6490"]},"month":"07","volume":120,"date_created":"2023-07-30T22:01:02Z","date_updated":"2023-10-17T11:44:55Z","related_material":{"link":[{"relation":"software","url":"https://github.com/fcamilli95/Structured-PCA-"}]},"author":[{"last_name":"Barbier","first_name":"Jean","full_name":"Barbier, Jean"},{"full_name":"Camilli, Francesco","first_name":"Francesco","last_name":"Camilli"},{"full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","id":"27EB676C-8706-11E9-9510-7717E6697425","last_name":"Mondelli","first_name":"Marco"},{"full_name":"Sáenz, Manuel","first_name":"Manuel","last_name":"Sáenz"}],"department":[{"_id":"MaMo"}],"publisher":"National Academy of Sciences","publication_status":"published","pmid":1,"acknowledgement":"J.B. was funded by the European Union (ERC, CHORAL, project number 101039794). Views and opinions expressed are however those of the author(s) only and do not necessarily reflect those of the European Union or the European Research Council. Neither the European Union nor the granting authority can be held responsible for them. M.M. was supported by the 2019 Lopez-Loreta Prize. We would like to thank the reviewers for the insightful comments and, in particular, for suggesting the BAMP-inspired denoisers leading to AMP-AP.","year":"2023","file_date_updated":"2023-07-31T07:30:48Z","article_number":"e2302028120","date_published":"2023-07-25T00:00:00Z","article_type":"original","citation":{"chicago":"Barbier, Jean, Francesco Camilli, Marco Mondelli, and Manuel Sáenz. “Fundamental Limits in Structured Principal Component Analysis and How to Reach Them.” Proceedings of the National Academy of Sciences of the United States of America. National Academy of Sciences, 2023. https://doi.org/10.1073/pnas.2302028120.","mla":"Barbier, Jean, et al. “Fundamental Limits in Structured Principal Component Analysis and How to Reach Them.” Proceedings of the National Academy of Sciences of the United States of America, vol. 120, no. 30, e2302028120, National Academy of Sciences, 2023, doi:10.1073/pnas.2302028120.","short":"J. Barbier, F. Camilli, M. Mondelli, M. Sáenz, Proceedings of the National Academy of Sciences of the United States of America 120 (2023).","ista":"Barbier J, Camilli F, Mondelli M, Sáenz M. 2023. Fundamental limits in structured principal component analysis and how to reach them. Proceedings of the National Academy of Sciences of the United States of America. 120(30), e2302028120.","apa":"Barbier, J., Camilli, F., Mondelli, M., & Sáenz, M. (2023). Fundamental limits in structured principal component analysis and how to reach them. Proceedings of the National Academy of Sciences of the United States of America. National Academy of Sciences. https://doi.org/10.1073/pnas.2302028120","ieee":"J. Barbier, F. Camilli, M. Mondelli, and M. Sáenz, “Fundamental limits in structured principal component analysis and how to reach them,” Proceedings of the National Academy of Sciences of the United States of America, vol. 120, no. 30. National Academy of Sciences, 2023.","ama":"Barbier J, Camilli F, Mondelli M, Sáenz M. Fundamental limits in structured principal component analysis and how to reach them. Proceedings of the National Academy of Sciences of the United States of America. 2023;120(30). doi:10.1073/pnas.2302028120"},"publication":"Proceedings of the National Academy of Sciences of the United States of America","has_accepted_license":"1","article_processing_charge":"Yes (in subscription journal)","day":"25","scopus_import":"1","oa_version":"Published Version","file":[{"date_updated":"2023-07-31T07:30:48Z","date_created":"2023-07-31T07:30:48Z","checksum":"1fc06228afdb3aa80cf8e7766bcf9dc5","success":1,"relation":"main_file","file_id":"13323","content_type":"application/pdf","file_size":995933,"creator":"dernst","file_name":"2023_PNAS_Barbier.pdf","access_level":"open_access"}],"intvolume":" 120","title":"Fundamental limits in structured principal component analysis and how to reach them","ddc":["000"],"status":"public","_id":"13315","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","issue":"30","abstract":[{"text":"How do statistical dependencies in measurement noise influence high-dimensional inference? To answer this, we study the paradigmatic spiked matrix model of principal components analysis (PCA), where a rank-one matrix is corrupted by additive noise. We go beyond the usual independence assumption on the noise entries, by drawing the noise from a low-order polynomial orthogonal matrix ensemble. The resulting noise correlations make the setting relevant for applications but analytically challenging. We provide characterization of the Bayes optimal limits of inference in this model. If the spike is rotation invariant, we show that standard spectral PCA is optimal. However, for more general priors, both PCA and the existing approximate message-passing algorithm (AMP) fall short of achieving the information-theoretic limits, which we compute using the replica method from statistical physics. We thus propose an AMP, inspired by the theory of adaptive Thouless–Anderson–Palmer equations, which is empirically observed to saturate the conjectured theoretical limit. This AMP comes with a rigorous state evolution analysis tracking its performance. Although we focus on specific noise distributions, our methodology can be generalized to a wide class of trace matrix ensembles at the cost of more involved expressions. Finally, despite the seemingly strong assumption of rotation-invariant noise, our theory empirically predicts algorithmic performance on real data, pointing at strong universality properties.","lang":"eng"}],"type":"journal_article"},{"acknowledgement":"Aleksandr Shevchenko, Kevin Kogler and Marco Mondelli are supported by the 2019 Lopez-Loreta Prize. Hamed Hassani acknowledges the support by the NSF CIF award (1910056) and the NSF Institute for CORE Emerging Methods in Data Science (EnCORE).","year":"2023","department":[{"_id":"MaMo"},{"_id":"DaAl"}],"publisher":"ML Research Press","publication_status":"published","author":[{"first_name":"Aleksandr","last_name":"Shevchenko","id":"F2B06EC2-C99E-11E9-89F0-752EE6697425","full_name":"Shevchenko, Aleksandr"},{"last_name":"Kögler","first_name":"Kevin","id":"94ec913c-dc85-11ea-9058-e5051ab2428b","full_name":"Kögler, Kevin"},{"full_name":"Hassani, Hamed","last_name":"Hassani","first_name":"Hamed"},{"first_name":"Marco","last_name":"Mondelli","id":"27EB676C-8706-11E9-9510-7717E6697425","orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco"}],"volume":202,"date_updated":"2023-10-31T08:52:28Z","date_created":"2023-10-29T23:01:17Z","external_id":{"arxiv":["2212.13468"]},"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2212.13468"}],"oa":1,"project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"quality_controlled":"1","conference":{"name":"ICML: International Conference on Machine Learning","location":"Honolulu, Hawaii, HI, United States","start_date":"2023-07-23","end_date":"2023-07-29"},"language":[{"iso":"eng"}],"publication_identifier":{"eissn":["2640-3498"]},"month":"07","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"14459","intvolume":" 202","title":"Fundamental limits of two-layer autoencoders, and achieving them with gradient methods","status":"public","oa_version":"Preprint","type":"conference","alternative_title":["PMLR"],"abstract":[{"lang":"eng","text":"Autoencoders are a popular model in many branches of machine learning and lossy data compression. However, their fundamental limits, the performance of gradient methods and the features learnt during optimization remain poorly understood, even in the two-layer setting. In fact, earlier work has considered either linear autoencoders or specific training regimes (leading to vanishing or diverging compression rates). Our paper addresses this gap by focusing on non-linear two-layer autoencoders trained in the challenging proportional regime in which the input dimension scales linearly with the size of the representation. Our results characterize the minimizers of the population risk, and show that such minimizers are achieved by gradient methods; their structure is also unveiled, thus leading to a concise description of the features obtained via training. For the special case of a sign activation function, our analysis establishes the fundamental limits for the lossy compression of Gaussian sources via (shallow) autoencoders. Finally, while the results are proved for Gaussian data, numerical simulations on standard datasets display the universality of the theoretical predictions."}],"citation":{"short":"A. Shevchenko, K. Kögler, H. Hassani, M. Mondelli, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 31151–31209.","mla":"Shevchenko, Aleksandr, et al. “Fundamental Limits of Two-Layer Autoencoders, and Achieving Them with Gradient Methods.” Proceedings of the 40th International Conference on Machine Learning, vol. 202, ML Research Press, 2023, pp. 31151–209.","chicago":"Shevchenko, Aleksandr, Kevin Kögler, Hamed Hassani, and Marco Mondelli. “Fundamental Limits of Two-Layer Autoencoders, and Achieving Them with Gradient Methods.” In Proceedings of the 40th International Conference on Machine Learning, 202:31151–209. ML Research Press, 2023.","ama":"Shevchenko A, Kögler K, Hassani H, Mondelli M. Fundamental limits of two-layer autoencoders, and achieving them with gradient methods. In: Proceedings of the 40th International Conference on Machine Learning. Vol 202. ML Research Press; 2023:31151-31209.","ieee":"A. Shevchenko, K. Kögler, H. Hassani, and M. Mondelli, “Fundamental limits of two-layer autoencoders, and achieving them with gradient methods,” in Proceedings of the 40th International Conference on Machine Learning, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 31151–31209.","apa":"Shevchenko, A., Kögler, K., Hassani, H., & Mondelli, M. (2023). Fundamental limits of two-layer autoencoders, and achieving them with gradient methods. In Proceedings of the 40th International Conference on Machine Learning (Vol. 202, pp. 31151–31209). Honolulu, Hawaii, HI, United States: ML Research Press.","ista":"Shevchenko A, Kögler K, Hassani H, Mondelli M. 2023. Fundamental limits of two-layer autoencoders, and achieving them with gradient methods. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 31151–31209."},"publication":"Proceedings of the 40th International Conference on Machine Learning","page":"31151-31209","date_published":"2023-07-30T00:00:00Z","scopus_import":"1","article_processing_charge":"No","day":"30"},{"article_processing_charge":"No","day":"01","scopus_import":"1","date_published":"2023-07-01T00:00:00Z","citation":{"ama":"Zhang Y, Vatedka S. Multiple packing: Lower bounds via infinite constellations. IEEE Transactions on Information Theory. 2023;69(7):4513-4527. doi:10.1109/TIT.2023.3260950","apa":"Zhang, Y., & Vatedka, S. (2023). Multiple packing: Lower bounds via infinite constellations. IEEE Transactions on Information Theory. IEEE. https://doi.org/10.1109/TIT.2023.3260950","ieee":"Y. Zhang and S. Vatedka, “Multiple packing: Lower bounds via infinite constellations,” IEEE Transactions on Information Theory, vol. 69, no. 7. IEEE, pp. 4513–4527, 2023.","ista":"Zhang Y, Vatedka S. 2023. Multiple packing: Lower bounds via infinite constellations. IEEE Transactions on Information Theory. 69(7), 4513–4527.","short":"Y. Zhang, S. Vatedka, IEEE Transactions on Information Theory 69 (2023) 4513–4527.","mla":"Zhang, Yihan, and Shashank Vatedka. “Multiple Packing: Lower Bounds via Infinite Constellations.” IEEE Transactions on Information Theory, vol. 69, no. 7, IEEE, 2023, pp. 4513–27, doi:10.1109/TIT.2023.3260950.","chicago":"Zhang, Yihan, and Shashank Vatedka. “Multiple Packing: Lower Bounds via Infinite Constellations.” IEEE Transactions on Information Theory. IEEE, 2023. https://doi.org/10.1109/TIT.2023.3260950."},"publication":"IEEE Transactions on Information Theory","page":"4513-4527","article_type":"original","issue":"7","abstract":[{"text":"We study the problem of high-dimensional multiple packing in Euclidean space. Multiple packing is a natural generalization of sphere packing and is defined as follows. Let N > 0 and L ∈ Z ≽2 . A multiple packing is a set C of points in R n such that any point in R n lies in the intersection of at most L – 1 balls of radius √ nN around points in C . Given a well-known connection with coding theory, multiple packings can be viewed as the Euclidean analog of list-decodable codes, which are well-studied for finite fields. In this paper, we derive the best known lower bounds on the optimal density of list-decodable infinite constellations for constant L under a stronger notion called average-radius multiple packing. To this end, we apply tools from high-dimensional geometry and large deviation theory.","lang":"eng"}],"type":"journal_article","oa_version":"Preprint","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"12838","intvolume":" 69","status":"public","title":"Multiple packing: Lower bounds via infinite constellations","publication_identifier":{"eissn":["1557-9654"],"issn":["0018-9448"]},"month":"07","doi":"10.1109/TIT.2023.3260950","language":[{"iso":"eng"}],"external_id":{"arxiv":["2211.04407"],"isi":["001017307000023"]},"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2211.04407","open_access":"1"}],"oa":1,"isi":1,"quality_controlled":"1","author":[{"first_name":"Yihan","last_name":"Zhang","id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c","orcid":"0000-0002-6465-6258","full_name":"Zhang, Yihan"},{"last_name":"Vatedka","first_name":"Shashank","full_name":"Vatedka, Shashank"}],"volume":69,"date_created":"2023-04-16T22:01:09Z","date_updated":"2023-12-13T11:16:46Z","acknowledgement":"YZ thanks Jiajin Li for making the observation given by Equation (23). He also would like to thank Nir Ailon and Ely Porat for several helpful conversations throughout this project, and Alexander Barg for insightful comments on the manuscript.\r\nYZ has received funding from the European Union’s Horizon 2020 research and innovation programme under grant agreement No 682203-ERC-[Inf-Speed-Tradeoff]. The work of SV was supported by a seed grant from IIT Hyderabad and the start-up research grant from the Science and Engineering Research Board, India (SRG/2020/000910).","year":"2023","department":[{"_id":"MaMo"}],"publisher":"IEEE","publication_status":"published"},{"article_processing_charge":"No","day":"01","scopus_import":"1","date_published":"2023-05-01T00:00:00Z","page":"294-298","citation":{"short":"Y. Xu, T.Q. Hou, S.S. Liang, M. Mondelli, in:, 2023 IEEE Information Theory Workshop, Institute of Electrical and Electronics Engineers, 2023, pp. 294–298.","mla":"Xu, Yizhou, et al. “Approximate Message Passing for Multi-Layer Estimation in Rotationally Invariant Models.” 2023 IEEE Information Theory Workshop, Institute of Electrical and Electronics Engineers, 2023, pp. 294–98, doi:10.1109/ITW55543.2023.10160238.","chicago":"Xu, Yizhou, Tian Qi Hou, Shan Suo Liang, and Marco Mondelli. “Approximate Message Passing for Multi-Layer Estimation in Rotationally Invariant Models.” In 2023 IEEE Information Theory Workshop, 294–98. Institute of Electrical and Electronics Engineers, 2023. https://doi.org/10.1109/ITW55543.2023.10160238.","ama":"Xu Y, Hou TQ, Liang SS, Mondelli M. Approximate message passing for multi-layer estimation in rotationally invariant models. In: 2023 IEEE Information Theory Workshop. Institute of Electrical and Electronics Engineers; 2023:294-298. doi:10.1109/ITW55543.2023.10160238","ieee":"Y. Xu, T. Q. Hou, S. S. Liang, and M. Mondelli, “Approximate message passing for multi-layer estimation in rotationally invariant models,” in 2023 IEEE Information Theory Workshop, Saint-Malo, France, 2023, pp. 294–298.","apa":"Xu, Y., Hou, T. Q., Liang, S. S., & Mondelli, M. (2023). Approximate message passing for multi-layer estimation in rotationally invariant models. In 2023 IEEE Information Theory Workshop (pp. 294–298). Saint-Malo, France: Institute of Electrical and Electronics Engineers. https://doi.org/10.1109/ITW55543.2023.10160238","ista":"Xu Y, Hou TQ, Liang SS, Mondelli M. 2023. Approximate message passing for multi-layer estimation in rotationally invariant models. 2023 IEEE Information Theory Workshop. ITW: Information Theory Workshop, 294–298."},"publication":"2023 IEEE Information Theory Workshop","abstract":[{"lang":"eng","text":"We consider the problem of reconstructing the signal and the hidden variables from observations coming from a multi-layer network with rotationally invariant weight matrices. The multi-layer structure models inference from deep generative priors, and the rotational invariance imposed on the weights generalizes the i.i.d. Gaussian assumption by allowing for a complex correlation structure, which is typical in applications. In this work, we present a new class of approximate message passing (AMP) algorithms and give a state evolution recursion which precisely characterizes their performance in the large system limit. In contrast with the existing multi-layer VAMP (ML-VAMP) approach, our proposed AMP – dubbed multilayer rotationally invariant generalized AMP (ML-RI-GAMP) – provides a natural generalization beyond Gaussian designs, in the sense that it recovers the existing Gaussian AMP as a special case. Furthermore, ML-RI-GAMP exhibits a significantly lower complexity than ML-VAMP, as the computationally intensive singular value decomposition is replaced by an estimation of the moments of the design matrices. Finally, our numerical results show that this complexity gain comes at little to no cost in the performance of the algorithm."}],"type":"conference","oa_version":"Preprint","title":"Approximate message passing for multi-layer estimation in rotationally invariant models","status":"public","_id":"13321","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publication_identifier":{"eissn":["2475-4218"],"isbn":["9798350301496"]},"month":"05","language":[{"iso":"eng"}],"doi":"10.1109/ITW55543.2023.10160238","conference":{"name":"ITW: Information Theory Workshop","end_date":"2023-04-28","location":"Saint-Malo, France","start_date":"2023-04-23"},"project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"quality_controlled":"1","isi":1,"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2212.01572","open_access":"1"}],"oa":1,"external_id":{"isi":["001031733100053"],"arxiv":["2212.01572"]},"date_created":"2023-07-30T22:01:04Z","date_updated":"2023-12-13T11:35:46Z","author":[{"first_name":"Yizhou","last_name":"Xu","full_name":"Xu, Yizhou"},{"full_name":"Hou, Tian Qi","first_name":"Tian Qi","last_name":"Hou"},{"full_name":"Liang, Shan Suo","last_name":"Liang","first_name":"Shan Suo"},{"id":"27EB676C-8706-11E9-9510-7717E6697425","orcid":"0000-0002-3242-7020","first_name":"Marco","last_name":"Mondelli","full_name":"Mondelli, Marco"}],"publisher":"Institute of Electrical and Electronics Engineers","department":[{"_id":"MaMo"}],"publication_status":"published","year":"2023","acknowledgement":"Marco Mondelli was partially supported by the 2019 Lopez-Loreta prize."},{"_id":"14665","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2023","department":[{"_id":"MaMo"}],"publisher":"IEEE","title":"Multiple packing: Lower bounds via error exponents","status":"public","publication_status":"epub_ahead","author":[{"last_name":"Zhang","first_name":"Yihan","orcid":"0000-0002-6465-6258","id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c","full_name":"Zhang, Yihan"},{"first_name":"Shashank","last_name":"Vatedka","full_name":"Vatedka, Shashank"}],"oa_version":"Preprint","date_updated":"2023-12-18T07:46:45Z","date_created":"2023-12-10T23:01:00Z","type":"journal_article","abstract":[{"text":"We derive lower bounds on the maximal rates for multiple packings in high-dimensional Euclidean spaces. For any N > 0 and L ∈ Z ≥2 , a multiple packing is a set C of points in R n such that any point in R n lies in the intersection of at most L - 1 balls of radius √ nN around points in C . This is a natural generalization of the sphere packing problem. We study the multiple packing problem for both bounded point sets whose points have norm at most √ nP for some constant P > 0, and unbounded point sets whose points are allowed to be anywhere in R n . Given a well-known connection with coding theory, multiple packings can be viewed as the Euclidean analog of list-decodable codes, which are well-studied over finite fields. We derive the best known lower bounds on the optimal multiple packing density. This is accomplished by establishing an inequality which relates the list-decoding error exponent for additive white Gaussian noise channels, a quantity of average-case nature, to the list-decoding radius, a quantity of worst-case nature. We also derive novel bounds on the list-decoding error exponent for infinite constellations and closed-form expressions for the list-decoding error exponents for the power-constrained AWGN channel, which may be of independent interest beyond multiple packing.","lang":"eng"}],"citation":{"mla":"Zhang, Yihan, and Shashank Vatedka. “Multiple Packing: Lower Bounds via Error Exponents.” IEEE Transactions on Information Theory, IEEE, 2023, doi:10.1109/TIT.2023.3334032.","short":"Y. Zhang, S. Vatedka, IEEE Transactions on Information Theory (2023).","chicago":"Zhang, Yihan, and Shashank Vatedka. “Multiple Packing: Lower Bounds via Error Exponents.” IEEE Transactions on Information Theory. IEEE, 2023. https://doi.org/10.1109/TIT.2023.3334032.","ama":"Zhang Y, Vatedka S. Multiple packing: Lower bounds via error exponents. IEEE Transactions on Information Theory. 2023. doi:10.1109/TIT.2023.3334032","ista":"Zhang Y, Vatedka S. 2023. Multiple packing: Lower bounds via error exponents. IEEE Transactions on Information Theory.","ieee":"Y. Zhang and S. Vatedka, “Multiple packing: Lower bounds via error exponents,” IEEE Transactions on Information Theory. IEEE, 2023.","apa":"Zhang, Y., & Vatedka, S. (2023). Multiple packing: Lower bounds via error exponents. IEEE Transactions on Information Theory. IEEE. https://doi.org/10.1109/TIT.2023.3334032"},"external_id":{"arxiv":["2211.04408"]},"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2211.04408"}],"oa":1,"publication":"IEEE Transactions on Information Theory","quality_controlled":"1","article_type":"original","doi":"10.1109/TIT.2023.3334032","date_published":"2023-11-16T00:00:00Z","language":[{"iso":"eng"}],"scopus_import":"1","article_processing_charge":"No","publication_identifier":{"eissn":["1557-9654"],"issn":["0018-9448"]},"month":"11","day":"16"},{"month":"07","publication_identifier":{"eissn":["1557-9654"],"issn":["0018-9448"]},"language":[{"iso":"eng"}],"doi":"10.1109/tit.2023.3257239","quality_controlled":"1","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2101.12426","open_access":"1"}],"oa":1,"external_id":{"arxiv":["2101.12426"]},"date_updated":"2024-01-09T08:45:24Z","date_created":"2024-01-08T13:04:54Z","volume":69,"author":[{"first_name":"Yihan","last_name":"Zhang","id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c","orcid":"0000-0002-6465-6258","full_name":"Zhang, Yihan"}],"publication_status":"published","publisher":"Institute of Electrical and Electronics Engineers","department":[{"_id":"MaMo"}],"acknowledgement":"The author would like to thank Amitalok J. Budkuley and Sidharth Jaggi for many helpful discussions at the early stage of this work. He would also like to thank Nir Ailon, Qi Cao, and Chandra Nair for discussions on a related problem regarding zero-error binary adder MACs.\r\nThe work of Yihan Zhang was supported by the European Union’s Horizon 2020 Research and Innovation Programme under Grant 682203-ERC-[Inf-Speed-Tradeoff]","year":"2023","day":"01","article_processing_charge":"No","keyword":["Computer Science Applications","Information Systems"],"scopus_import":"1","date_published":"2023-07-01T00:00:00Z","article_type":"original","page":"4093-4127","publication":"IEEE Transactions on Information Theory","citation":{"ista":"Zhang Y. 2023. Zero-error communication over adversarial MACs. IEEE Transactions on Information Theory. 69(7), 4093–4127.","apa":"Zhang, Y. (2023). Zero-error communication over adversarial MACs. IEEE Transactions on Information Theory. Institute of Electrical and Electronics Engineers. https://doi.org/10.1109/tit.2023.3257239","ieee":"Y. Zhang, “Zero-error communication over adversarial MACs,” IEEE Transactions on Information Theory, vol. 69, no. 7. Institute of Electrical and Electronics Engineers, pp. 4093–4127, 2023.","ama":"Zhang Y. Zero-error communication over adversarial MACs. IEEE Transactions on Information Theory. 2023;69(7):4093-4127. doi:10.1109/tit.2023.3257239","chicago":"Zhang, Yihan. “Zero-Error Communication over Adversarial MACs.” IEEE Transactions on Information Theory. Institute of Electrical and Electronics Engineers, 2023. https://doi.org/10.1109/tit.2023.3257239.","mla":"Zhang, Yihan. “Zero-Error Communication over Adversarial MACs.” IEEE Transactions on Information Theory, vol. 69, no. 7, Institute of Electrical and Electronics Engineers, 2023, pp. 4093–127, doi:10.1109/tit.2023.3257239.","short":"Y. Zhang, IEEE Transactions on Information Theory 69 (2023) 4093–4127."},"abstract":[{"lang":"eng","text":"We consider zero-error communication over a two-transmitter deterministic adversarial multiple access channel (MAC) governed by an adversary who has access to the transmissions of both senders (hence called omniscient ) and aims to maliciously corrupt the communication. None of the encoders, jammer and decoder is allowed to randomize using private or public randomness. This enforces a combinatorial nature of the problem. Our model covers a large family of channels studied in the literature, including all deterministic discrete memoryless noisy or noiseless MACs. In this work, given an arbitrary two-transmitter deterministic omniscient adversarial MAC, we characterize when the capacity region: 1) has nonempty interior (in particular, is two-dimensional); 2) consists of two line segments (in particular, has empty interior); 3) consists of one line segment (in particular, is one-dimensional); 4) or only contains (0,0) (in particular, is zero-dimensional). This extends a recent result by Wang et al. (201 9) from the point-to-point setting to the multiple access setting. Indeed, our converse arguments build upon their generalized Plotkin bound and involve delicate case analysis. One of the technical challenges is to take care of both “joint confusability” and “marginal confusability”. In particular, the treatment of marginal confusability does not follow from the point-to-point results by Wang et al. Our achievability results follow from random coding with expurgation."}],"issue":"7","type":"journal_article","oa_version":"Preprint","title":"Zero-error communication over adversarial MACs","status":"public","intvolume":" 69","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"14751"},{"article_processing_charge":"No","day":"04","scopus_import":"1","date_published":"2023-07-04T00:00:00Z","citation":{"chicago":"Polyanskii, Nikita, and Yihan Zhang. “Codes for the Z-Channel.” IEEE Transactions on Information Theory. Institute of Electrical and Electronics Engineers, 2023. https://doi.org/10.1109/TIT.2023.3292219.","mla":"Polyanskii, Nikita, and Yihan Zhang. “Codes for the Z-Channel.” IEEE Transactions on Information Theory, vol. 69, no. 10, Institute of Electrical and Electronics Engineers, 2023, pp. 6340–57, doi:10.1109/TIT.2023.3292219.","short":"N. Polyanskii, Y. Zhang, IEEE Transactions on Information Theory 69 (2023) 6340–6357.","ista":"Polyanskii N, Zhang Y. 2023. Codes for the Z-channel. IEEE Transactions on Information Theory. 69(10), 6340–6357.","ieee":"N. Polyanskii and Y. Zhang, “Codes for the Z-channel,” IEEE Transactions on Information Theory, vol. 69, no. 10. Institute of Electrical and Electronics Engineers, pp. 6340–6357, 2023.","apa":"Polyanskii, N., & Zhang, Y. (2023). Codes for the Z-channel. IEEE Transactions on Information Theory. Institute of Electrical and Electronics Engineers. https://doi.org/10.1109/TIT.2023.3292219","ama":"Polyanskii N, Zhang Y. Codes for the Z-channel. IEEE Transactions on Information Theory. 2023;69(10):6340-6357. doi:10.1109/TIT.2023.3292219"},"publication":"IEEE Transactions on Information Theory","page":"6340-6357","article_type":"original","issue":"10","abstract":[{"text":"This paper is a collection of results on combinatorial properties of codes for the Z-channel . A Z-channel with error fraction τ takes as input a length- n binary codeword and injects in an adversarial manner up to n τ asymmetric errors, i.e., errors that only zero out bits but do not flip 0’s to 1’s. It is known that the largest ( L - 1)-list-decodable code for the Z-channel with error fraction τ has exponential size (in n ) if τ is less than a critical value that we call the ( L - 1)- list-decoding Plotkin point and has constant size if τ is larger than the threshold. The ( L -1)-list-decoding Plotkin point is known to be L -1/L-1 – L -L/ L-1 , which equals 1/4 for unique-decoding with L -1 = 1. In this paper, we derive various results for the size of the largest codes above and below the list-decoding Plotkin point. In particular, we show that the largest ( L -1)-list-decodable code ε-above the Plotkin point, for any given sufficiently small positive constant ε > 0, has size Θ L (ε -3/2 ) for any L - 1 ≥ 1. We also devise upper and lower bounds on the exponential size of codes below the list-decoding Plotkin point.","lang":"eng"}],"type":"journal_article","oa_version":"Preprint","_id":"13269","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","intvolume":" 69","title":"Codes for the Z-channel","status":"public","publication_identifier":{"issn":["0018-9448"],"eissn":["1557-9654"]},"month":"07","doi":"10.1109/TIT.2023.3292219","language":[{"iso":"eng"}],"external_id":{"isi":["001069680100011"],"arxiv":["2105.01427"]},"oa":1,"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2105.01427","open_access":"1"}],"isi":1,"quality_controlled":"1","author":[{"last_name":"Polyanskii","first_name":"Nikita","full_name":"Polyanskii, Nikita"},{"full_name":"Zhang, Yihan","id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c","orcid":"0000-0002-6465-6258","first_name":"Yihan","last_name":"Zhang"}],"volume":69,"date_created":"2023-07-23T22:01:14Z","date_updated":"2024-01-29T11:10:54Z","acknowledgement":"Nikita Polyanskii’s research was conducted in part during October 2020 - December 2021 with the Technical University of Munich and the Skolkovo Institute of Science and Technology. His work was supported by the German Research Foundation (Deutsche Forschungsgemeinschaft, DFG) under Grant No. WA3907/1-1 and the Russian Foundation for Basic Research (RFBR)\r\nunder Grant No. 20-01-00559.\r\nYihan Zhang is supported by funding from the European Union’s Horizon 2020 research and innovation programme under grant agreement No 682203-ERC-[Inf-Speed-Tradeoff].","year":"2023","department":[{"_id":"MaMo"}],"publisher":"Institute of Electrical and Electronics Engineers","publication_status":"published"},{"language":[{"iso":"eng"}],"conference":{"start_date":"2023-07-23","location":"Honolulu, HI, United States","end_date":"2023-07-29","name":"ICML: International Conference on Machine Learning"},"quality_controlled":"1","project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"main_file_link":[{"url":"https://arxiv.org/abs/2302.01629","open_access":"1"}],"external_id":{"arxiv":["2302.01629"]},"oa":1,"month":"10","date_created":"2023-04-23T16:11:03Z","date_updated":"2024-02-06T07:42:00Z","volume":202,"author":[{"id":"ca726dda-de17-11ea-bc14-f9da834f63aa","last_name":"Bombari","first_name":"Simone","full_name":"Bombari, Simone"},{"full_name":"Kiyani, Shayan","id":"f5a2b424-e339-11ed-8435-ff3b4fe70cf8","first_name":"Shayan","last_name":"Kiyani"},{"full_name":"Mondelli, Marco","id":"27EB676C-8706-11E9-9510-7717E6697425","orcid":"0000-0002-3242-7020","first_name":"Marco","last_name":"Mondelli"}],"related_material":{"link":[{"relation":"software","url":"https://github.com/simone-bombari/beyond-universal-robustness"}]},"publication_status":"published","department":[{"_id":"GradSch"},{"_id":"MaMo"}],"publisher":"ML Research Press","acknowledgement":"Simone Bombari and Marco Mondelli were partially supported by the 2019 Lopez-Loreta prize, and\r\nthe authors would like to thank Hamed Hassani for helpful discussions.\r\n","year":"2023","date_published":"2023-10-27T00:00:00Z","page":"2738-2776","publication":"Proceedings of the 40th International Conference on Machine Learning","citation":{"short":"S. Bombari, S. Kiyani, M. Mondelli, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 2738–2776.","mla":"Bombari, Simone, et al. “Beyond the Universal Law of Robustness: Sharper Laws for Random Features and Neural Tangent Kernels.” Proceedings of the 40th International Conference on Machine Learning, vol. 202, ML Research Press, 2023, pp. 2738–76.","chicago":"Bombari, Simone, Shayan Kiyani, and Marco Mondelli. “Beyond the Universal Law of Robustness: Sharper Laws for Random Features and Neural Tangent Kernels.” In Proceedings of the 40th International Conference on Machine Learning, 202:2738–76. ML Research Press, 2023.","ama":"Bombari S, Kiyani S, Mondelli M. Beyond the universal law of robustness: Sharper laws for random features and neural tangent kernels. In: Proceedings of the 40th International Conference on Machine Learning. Vol 202. ML Research Press; 2023:2738-2776.","ieee":"S. Bombari, S. Kiyani, and M. Mondelli, “Beyond the universal law of robustness: Sharper laws for random features and neural tangent kernels,” in Proceedings of the 40th International Conference on Machine Learning, Honolulu, HI, United States, 2023, vol. 202, pp. 2738–2776.","apa":"Bombari, S., Kiyani, S., & Mondelli, M. (2023). Beyond the universal law of robustness: Sharper laws for random features and neural tangent kernels. In Proceedings of the 40th International Conference on Machine Learning (Vol. 202, pp. 2738–2776). Honolulu, HI, United States: ML Research Press.","ista":"Bombari S, Kiyani S, Mondelli M. 2023. Beyond the universal law of robustness: Sharper laws for random features and neural tangent kernels. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 2738–2776."},"day":"27","article_processing_charge":"No","oa_version":"Preprint","status":"public","title":"Beyond the universal law of robustness: Sharper laws for random features and neural tangent kernels","intvolume":" 202","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"12859","abstract":[{"lang":"eng","text":"Machine learning models are vulnerable to adversarial perturbations, and a thought-provoking paper by Bubeck and Sellke has analyzed this phenomenon through the lens of over-parameterization: interpolating smoothly the data requires significantly more parameters than simply memorizing it. However, this \"universal\" law provides only a necessary condition for robustness, and it is unable to discriminate between models. In this paper, we address these gaps by focusing on empirical risk minimization in two prototypical settings, namely, random features and the neural tangent kernel (NTK). We prove that, for random features, the model is not robust for any degree of over-parameterization, even when the necessary condition coming from the universal law of robustness is satisfied. In contrast, for even activations, the NTK model meets the universal lower bound, and it is robust as soon as the necessary condition on over-parameterization is fulfilled. This also addresses a conjecture in prior work by Bubeck, Li and Nagaraj. Our analysis decouples the effect of the kernel of the model from an \"interaction matrix\", which describes the interaction with the test data and captures the effect of the activation. Our theoretical results are corroborated by numerical evidence on both synthetic and standard datasets (MNIST, CIFAR-10)."}],"alternative_title":["PMLR"],"type":"conference"},{"publication":"37th Annual Conference on Neural Information Processing Systems","citation":{"ama":"Súkeník P, Mondelli M, Lampert C. Deep neural collapse is provably optimal for the deep unconstrained features model. In: 37th Annual Conference on Neural Information Processing Systems.","apa":"Súkeník, P., Mondelli, M., & Lampert, C. (n.d.). Deep neural collapse is provably optimal for the deep unconstrained features model. In 37th Annual Conference on Neural Information Processing Systems. New Orleans, LA, United States.","ieee":"P. Súkeník, M. Mondelli, and C. Lampert, “Deep neural collapse is provably optimal for the deep unconstrained features model,” in 37th Annual Conference on Neural Information Processing Systems, New Orleans, LA, United States.","ista":"Súkeník P, Mondelli M, Lampert C. Deep neural collapse is provably optimal for the deep unconstrained features model. 37th Annual Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems, NeurIPS, .","short":"P. Súkeník, M. Mondelli, C. Lampert, in:, 37th Annual Conference on Neural Information Processing Systems, n.d.","mla":"Súkeník, Peter, et al. “Deep Neural Collapse Is Provably Optimal for the Deep Unconstrained Features Model.” 37th Annual Conference on Neural Information Processing Systems.","chicago":"Súkeník, Peter, Marco Mondelli, and Christoph Lampert. “Deep Neural Collapse Is Provably Optimal for the Deep Unconstrained Features Model.” In 37th Annual Conference on Neural Information Processing Systems, n.d."},"oa":1,"main_file_link":[{"open_access":"1","url":" https://doi.org/10.48550/arXiv.2305.13165"}],"external_id":{"arxiv":["2305.13165"]},"quality_controlled":"1","project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"conference":{"start_date":"2023-12-10","location":"New Orleans, LA, United States","end_date":"2023-12-16","name":"NeurIPS: Neural Information Processing Systems"},"date_published":"2023-12-15T00:00:00Z","language":[{"iso":"eng"}],"day":"15","month":"12","article_processing_charge":"No","year":"2023","_id":"14921","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","acknowledgement":"M. M. is partially supported by the 2019 Lopez-Loreta Prize. The authors would like to thank Eugenia Iofinova, Bernd Prach and Simone Bombari for valuable feedback on the manuscript.","publication_status":"inpress","status":"public","title":"Deep neural collapse is provably optimal for the deep unconstrained features model","department":[{"_id":"MaMo"},{"_id":"ChLa"}],"author":[{"full_name":"Súkeník, Peter","last_name":"Súkeník","first_name":"Peter","id":"d64d6a8d-eb8e-11eb-b029-96fd216dec3c"},{"full_name":"Mondelli, Marco","last_name":"Mondelli","first_name":"Marco","orcid":"0000-0002-3242-7020","id":"27EB676C-8706-11E9-9510-7717E6697425"},{"first_name":"Christoph","last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph"}],"date_created":"2024-02-02T11:17:41Z","date_updated":"2024-02-06T07:53:26Z","oa_version":"Preprint","type":"conference","alternative_title":["NeurIPS"],"abstract":[{"lang":"eng","text":"Neural collapse (NC) refers to the surprising structure of the last layer of deep neural networks in the terminal phase of gradient descent training. Recently, an increasing amount of experimental evidence has pointed to the propagation of NC to earlier layers of neural networks. However, while the NC in the last layer is well studied theoretically, much less is known about its multi-layered counterpart - deep neural collapse (DNC). In particular, existing work focuses either on linear layers or only on the last two layers at the price of an extra assumption. Our paper fills this gap by generalizing the established analytical framework for NC - the unconstrained features model - to multiple non-linear layers. Our key technical contribution is to show that, in a deep unconstrained features model, the unique global optimum for binary classification exhibits all the properties typical of DNC. This explains the existing experimental evidence of DNC. We also empirically show that (i) by optimizing deep unconstrained features models via gradient descent, the resulting solution agrees well with our theory, and (ii) trained networks recover the unconstrained features suitable for the occurrence of DNC, thus supporting the validity of this modeling principle."}]},{"oa_version":"Published Version","date_created":"2024-02-02T11:21:56Z","date_updated":"2024-02-06T08:53:15Z","author":[{"full_name":"Wu, Diyuan","id":"1a5914c2-896a-11ed-bdf8-fb80621a0635","last_name":"Wu","first_name":"Diyuan"},{"full_name":"Kungurtsev, Vyacheslav","last_name":"Kungurtsev","first_name":"Vyacheslav"},{"full_name":"Mondelli, Marco","orcid":"0000-0002-3242-7020","id":"27EB676C-8706-11E9-9510-7717E6697425","last_name":"Mondelli","first_name":"Marco"}],"publisher":"ML Research Press","department":[{"_id":"MaMo"}],"title":"Mean-field analysis for heavy ball methods: Dropout-stability, connectivity, and global convergence","publication_status":"published","status":"public","_id":"14924","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","acknowledgement":"D. Wu and M. Mondelli are partially supported by the 2019 Lopez-Loreta Prize. V. Kungurtsev was supported by the OP VVV project CZ.02.1.01/0.0/0.0/16_019/0000765 \"Research Center for Informatics\".","year":"2023","abstract":[{"lang":"eng","text":"The stochastic heavy ball method (SHB), also known as stochastic gradient descent (SGD) with Polyak's momentum, is widely used in training neural networks. However, despite the remarkable success of such algorithm in practice, its theoretical characterization remains limited. In this paper, we focus on neural networks with two and three layers and provide a rigorous understanding of the properties of the solutions found by SHB: \\emph{(i)} stability after dropping out part of the neurons, \\emph{(ii)} connectivity along a low-loss path, and \\emph{(iii)} convergence to the global optimum.\r\nTo achieve this goal, we take a mean-field view and relate the SHB dynamics to a certain partial differential equation in the limit of large network widths. This mean-field perspective has inspired a recent line of work focusing on SGD while, in contrast, our paper considers an algorithm with momentum. More specifically, after proving existence and uniqueness of the limit differential equations, we show convergence to the global optimum and give a quantitative bound between the mean-field limit and the SHB dynamics of a finite-width network. Armed with this last bound, we are able to establish the dropout-stability and connectivity of SHB solutions."}],"alternative_title":["TMLR"],"type":"conference","language":[{"iso":"eng"}],"date_published":"2023-02-28T00:00:00Z","project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"quality_controlled":"1","tmp":{"name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png"},"oa":1,"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2210.06819","open_access":"1"}],"citation":{"chicago":"Wu, Diyuan, Vyacheslav Kungurtsev, and Marco Mondelli. “Mean-Field Analysis for Heavy Ball Methods: Dropout-Stability, Connectivity, and Global Convergence.” In Transactions on Machine Learning Research. ML Research Press, 2023.","mla":"Wu, Diyuan, et al. “Mean-Field Analysis for Heavy Ball Methods: Dropout-Stability, Connectivity, and Global Convergence.” Transactions on Machine Learning Research, ML Research Press, 2023.","short":"D. Wu, V. Kungurtsev, M. Mondelli, in:, Transactions on Machine Learning Research, ML Research Press, 2023.","ista":"Wu D, Kungurtsev V, Mondelli M. 2023. Mean-field analysis for heavy ball methods: Dropout-stability, connectivity, and global convergence. Transactions on Machine Learning Research. , TMLR, .","apa":"Wu, D., Kungurtsev, V., & Mondelli, M. (2023). Mean-field analysis for heavy ball methods: Dropout-stability, connectivity, and global convergence. In Transactions on Machine Learning Research. ML Research Press.","ieee":"D. Wu, V. Kungurtsev, and M. Mondelli, “Mean-field analysis for heavy ball methods: Dropout-stability, connectivity, and global convergence,” in Transactions on Machine Learning Research, 2023.","ama":"Wu D, Kungurtsev V, Mondelli M. Mean-field analysis for heavy ball methods: Dropout-stability, connectivity, and global convergence. In: Transactions on Machine Learning Research. ML Research Press; 2023."},"external_id":{"arxiv":["2210.06819"]},"publication":"Transactions on Machine Learning Research","article_processing_charge":"No","has_accepted_license":"1","month":"02","day":"28"},{"department":[{"_id":"MaMo"}],"publisher":"IEEE","publication_status":"inpress","title":"Mismatched estimation of non-symmetric rank-one matrices corrupted by structured noise","status":"public","year":"2023","_id":"14923","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Preprint","date_updated":"2024-02-14T14:34:03Z","date_created":"2024-02-02T11:20:39Z","author":[{"full_name":"Fu, Teng","last_name":"Fu","first_name":"Teng"},{"full_name":"Liu, YuHao","first_name":"YuHao","last_name":"Liu"},{"last_name":"Barbier","first_name":"Jean","full_name":"Barbier, Jean"},{"last_name":"Mondelli","first_name":"Marco","orcid":"0000-0002-3242-7020","id":"27EB676C-8706-11E9-9510-7717E6697425","full_name":"Mondelli, Marco"},{"full_name":"Liang, ShanSuo","last_name":"Liang","first_name":"ShanSuo"},{"last_name":"Hou","first_name":"TianQi","full_name":"Hou, TianQi"}],"type":"conference","abstract":[{"text":"We study the performance of a Bayesian statistician who estimates a rank-one signal corrupted by non-symmetric rotationally invariant noise with a generic distribution of singular values. As the signal-to-noise ratio and the noise structure are unknown, a Gaussian setup is incorrectly assumed. We derive the exact analytic expression for the error of the mismatched Bayes estimator and also provide the analysis of an approximate message passing (AMP) algorithm. The first result exploits the asymptotic behavior of spherical integrals for rectangular matrices and of low-rank matrix perturbations; the second one relies on the design and analysis of an auxiliary AMP. The numerical experiments show that there is a performance gap between the AMP and Bayes estimators, which is due to the incorrect estimation of the signal norm.","lang":"eng"}],"quality_controlled":"1","external_id":{"arxiv":["2302.03306"]},"oa":1,"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2302.03306"}],"citation":{"mla":"Fu, Teng, et al. “Mismatched Estimation of Non-Symmetric Rank-One Matrices Corrupted by Structured Noise.” Proceedings of 2023 IEEE International Symposium on Information Theory, IEEE, doi:10.1109/isit54713.2023.10206671.","short":"T. Fu, Y. Liu, J. Barbier, M. Mondelli, S. Liang, T. Hou, in:, Proceedings of 2023 IEEE International Symposium on Information Theory, IEEE, n.d.","chicago":"Fu, Teng, YuHao Liu, Jean Barbier, Marco Mondelli, ShanSuo Liang, and TianQi Hou. “Mismatched Estimation of Non-Symmetric Rank-One Matrices Corrupted by Structured Noise.” In Proceedings of 2023 IEEE International Symposium on Information Theory. IEEE, n.d. https://doi.org/10.1109/isit54713.2023.10206671.","ama":"Fu T, Liu Y, Barbier J, Mondelli M, Liang S, Hou T. Mismatched estimation of non-symmetric rank-one matrices corrupted by structured noise. In: Proceedings of 2023 IEEE International Symposium on Information Theory. IEEE. doi:10.1109/isit54713.2023.10206671","ista":"Fu T, Liu Y, Barbier J, Mondelli M, Liang S, Hou T. Mismatched estimation of non-symmetric rank-one matrices corrupted by structured noise. Proceedings of 2023 IEEE International Symposium on Information Theory. ISIT: IEEE International Symposium on Information Theory.","apa":"Fu, T., Liu, Y., Barbier, J., Mondelli, M., Liang, S., & Hou, T. (n.d.). Mismatched estimation of non-symmetric rank-one matrices corrupted by structured noise. In Proceedings of 2023 IEEE International Symposium on Information Theory. Taipei, Taiwan: IEEE. https://doi.org/10.1109/isit54713.2023.10206671","ieee":"T. Fu, Y. Liu, J. Barbier, M. Mondelli, S. Liang, and T. Hou, “Mismatched estimation of non-symmetric rank-one matrices corrupted by structured noise,” in Proceedings of 2023 IEEE International Symposium on Information Theory, Taipei, Taiwan."},"publication":"Proceedings of 2023 IEEE International Symposium on Information Theory","language":[{"iso":"eng"}],"date_published":"2023-06-30T00:00:00Z","doi":"10.1109/isit54713.2023.10206671","conference":{"start_date":"2023-06-25","location":"Taipei, Taiwan","end_date":"2023-06-30","name":"ISIT: IEEE International Symposium on Information Theory"},"article_processing_charge":"No","month":"06","day":"30"},{"date_published":"2023-06-30T00:00:00Z","citation":{"mla":"Esposito, Amedeo Roberto, and Marco Mondelli. “Concentration without Independence via Information Measures.” Proceedings of 2023 IEEE International Symposium on Information Theory, IEEE, 2023, pp. 400–05, doi:10.1109/isit54713.2023.10206899.","short":"A.R. Esposito, M. Mondelli, in:, Proceedings of 2023 IEEE International Symposium on Information Theory, IEEE, 2023, pp. 400–405.","chicago":"Esposito, Amedeo Roberto, and Marco Mondelli. “Concentration without Independence via Information Measures.” In Proceedings of 2023 IEEE International Symposium on Information Theory, 400–405. IEEE, 2023. https://doi.org/10.1109/isit54713.2023.10206899.","ama":"Esposito AR, Mondelli M. Concentration without independence via information measures. In: Proceedings of 2023 IEEE International Symposium on Information Theory. IEEE; 2023:400-405. doi:10.1109/isit54713.2023.10206899","ista":"Esposito AR, Mondelli M. 2023. Concentration without independence via information measures. Proceedings of 2023 IEEE International Symposium on Information Theory. ISIT: IEEE International Symposium on Information Theory, 400–405.","ieee":"A. R. Esposito and M. Mondelli, “Concentration without independence via information measures,” in Proceedings of 2023 IEEE International Symposium on Information Theory, Taipei, Taiwan, 2023, pp. 400–405.","apa":"Esposito, A. R., & Mondelli, M. (2023). Concentration without independence via information measures. In Proceedings of 2023 IEEE International Symposium on Information Theory (pp. 400–405). Taipei, Taiwan: IEEE. https://doi.org/10.1109/isit54713.2023.10206899"},"publication":"Proceedings of 2023 IEEE International Symposium on Information Theory","page":"400-405","article_processing_charge":"No","day":"30","oa_version":"Preprint","_id":"14922","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","title":"Concentration without independence via information measures","abstract":[{"text":"We propose a novel approach to concentration for non-independent random variables. The main idea is to ``pretend'' that the random variables are independent and pay a multiplicative price measuring how far they are from actually being independent. This price is encapsulated in the Hellinger integral between the joint and the product of the marginals, which is then upper bounded leveraging tensorisation properties. Our bounds represent a natural generalisation of concentration inequalities in the presence of dependence: we recover exactly the classical bounds (McDiarmid's inequality) when the random variables are independent. Furthermore, in a ``large deviations'' regime, we obtain the same decay in the probability as for the independent case, even when the random variables display non-trivial dependencies. To show this, we consider a number of applications of interest. First, we provide a bound for Markov chains with finite state space. Then, we consider the Simple Symmetric Random Walk, which is a non-contracting Markov chain, and a non-Markovian setting in which the stochastic process depends on its entire past. To conclude, we propose an application to Markov Chain Monte Carlo methods, where our approach leads to an improved lower bound on the minimum burn-in period required to reach a certain accuracy. In all of these settings, we provide a regime of parameters in which our bound fares better than what the state of the art can provide.","lang":"eng"}],"type":"conference","doi":"10.1109/isit54713.2023.10206899","conference":{"location":"Taipei, Taiwan","start_date":"2023-06-25","end_date":"2023-06-30","name":"ISIT: IEEE International Symposium on Information Theory"},"language":[{"iso":"eng"}],"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2303.07245"}],"external_id":{"arxiv":["2303.07245"]},"oa":1,"project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"quality_controlled":"1","publication_identifier":{"eissn":["2157-8117"],"eisbn":["9781665475549"]},"month":"06","related_material":{"record":[{"id":"15172","relation":"later_version","status":"public"}]},"author":[{"id":"9583e921-e1ad-11ec-9862-cef099626dc9","last_name":"Esposito","first_name":"Amedeo Roberto","full_name":"Esposito, Amedeo Roberto"},{"last_name":"Mondelli","first_name":"Marco","orcid":"0000-0002-3242-7020","id":"27EB676C-8706-11E9-9510-7717E6697425","full_name":"Mondelli, Marco"}],"date_updated":"2024-03-25T07:15:51Z","date_created":"2024-02-02T11:18:40Z","acknowledgement":"The authors are partially supported by the 2019 Lopez-Loreta Prize. They would also like to thank Professor Jan Maas for providing valuable suggestions and comments on an early version of the work.","year":"2023","publisher":"IEEE","department":[{"_id":"MaMo"}],"publication_status":"published"},{"language":[{"iso":"eng"}],"tmp":{"name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png"},"external_id":{"arxiv":["2111.02278"]},"oa":1,"project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"quality_controlled":"1","publication_identifier":{"eissn":["1533-7928"],"issn":["1532-4435"]},"month":"04","related_material":{"link":[{"url":"https://www.jmlr.org/papers/v23/21-1365.html","relation":"other"}]},"author":[{"last_name":"Shevchenko","first_name":"Aleksandr","id":"F2B06EC2-C99E-11E9-89F0-752EE6697425","full_name":"Shevchenko, Aleksandr"},{"last_name":"Kungurtsev","first_name":"Vyacheslav","full_name":"Kungurtsev, Vyacheslav"},{"first_name":"Marco","last_name":"Mondelli","id":"27EB676C-8706-11E9-9510-7717E6697425","orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco"}],"volume":23,"date_updated":"2022-05-30T08:34:14Z","date_created":"2022-05-29T22:01:54Z","year":"2022","acknowledgement":"We would like to thank Mert Pilanci for several exploratory discussions in the early stage\r\nof the project, Jan Maas for clarifications about Jordan et al. (1998), and Max Zimmer for\r\nsuggestive numerical experiments. A. Shevchenko and M. Mondelli are partially supported\r\nby the 2019 Lopez-Loreta Prize. V. Kungurtsev acknowledges support to the OP VVV\r\nproject CZ.02.1.01/0.0/0.0/16 019/0000765 Research Center for Informatics.\r\n","department":[{"_id":"MaMo"},{"_id":"DaAl"}],"publisher":"Journal of Machine Learning Research","publication_status":"published","file_date_updated":"2022-05-30T08:22:55Z","date_published":"2022-04-01T00:00:00Z","citation":{"ama":"Shevchenko A, Kungurtsev V, Mondelli M. Mean-field analysis of piecewise linear solutions for wide ReLU networks. Journal of Machine Learning Research. 2022;23(130):1-55.","ieee":"A. Shevchenko, V. Kungurtsev, and M. Mondelli, “Mean-field analysis of piecewise linear solutions for wide ReLU networks,” Journal of Machine Learning Research, vol. 23, no. 130. Journal of Machine Learning Research, pp. 1–55, 2022.","apa":"Shevchenko, A., Kungurtsev, V., & Mondelli, M. (2022). Mean-field analysis of piecewise linear solutions for wide ReLU networks. Journal of Machine Learning Research. Journal of Machine Learning Research.","ista":"Shevchenko A, Kungurtsev V, Mondelli M. 2022. Mean-field analysis of piecewise linear solutions for wide ReLU networks. Journal of Machine Learning Research. 23(130), 1–55.","short":"A. Shevchenko, V. Kungurtsev, M. Mondelli, Journal of Machine Learning Research 23 (2022) 1–55.","mla":"Shevchenko, Aleksandr, et al. “Mean-Field Analysis of Piecewise Linear Solutions for Wide ReLU Networks.” Journal of Machine Learning Research, vol. 23, no. 130, Journal of Machine Learning Research, 2022, pp. 1–55.","chicago":"Shevchenko, Aleksandr, Vyacheslav Kungurtsev, and Marco Mondelli. “Mean-Field Analysis of Piecewise Linear Solutions for Wide ReLU Networks.” Journal of Machine Learning Research. Journal of Machine Learning Research, 2022."},"publication":"Journal of Machine Learning Research","page":"1-55","article_type":"original","has_accepted_license":"1","article_processing_charge":"No","day":"01","scopus_import":"1","oa_version":"Published Version","file":[{"file_size":1521701,"content_type":"application/pdf","creator":"cchlebak","file_name":"21-1365.pdf","access_level":"open_access","date_created":"2022-05-30T08:22:55Z","date_updated":"2022-05-30T08:22:55Z","checksum":"d4ff5d1affb34848b5c5e4002483fc62","success":1,"relation":"main_file","file_id":"11422"}],"_id":"11420","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","intvolume":" 23","ddc":["000"],"title":"Mean-field analysis of piecewise linear solutions for wide ReLU networks","status":"public","issue":"130","abstract":[{"lang":"eng","text":"Understanding the properties of neural networks trained via stochastic gradient descent (SGD) is at the heart of the theory of deep learning. In this work, we take a mean-field view, and consider a two-layer ReLU network trained via noisy-SGD for a univariate regularized regression problem. Our main result is that SGD with vanishingly small noise injected in the gradients is biased towards a simple solution: at convergence, the ReLU network implements a piecewise linear map of the inputs, and the number of “knot” points -- i.e., points where the tangent of the ReLU network estimator changes -- between two consecutive training inputs is at most three. In particular, as the number of neurons of the network grows, the SGD dynamics is captured by the solution of a gradient flow and, at convergence, the distribution of the weights approaches the unique minimizer of a related free energy, which has a Gibbs form. Our key technical contribution consists in the analysis of the estimator resulting from this minimizer: we show that its second derivative vanishes everywhere, except at some specific locations which represent the “knot” points. We also provide empirical evidence that knots at locations distinct from the data points might occur, as predicted by our theory."}],"type":"journal_article"},{"type":"conference","abstract":[{"text":"We characterize the capacity for the discrete-time arbitrarily varying channel with discrete inputs, outputs, and states when (a) the encoder and decoder do not share common randomness, (b) the input and state are subject to cost constraints, (c) the transition matrix of the channel is deterministic given the state, and (d) at each time step the adversary can only observe the current and past channel inputs when choosing the state at that time. The achievable strategy involves stochastic encoding together with list decoding and a disambiguation step. The converse uses a two-phase \"babble-and-push\" strategy where the adversary chooses the state randomly in the first phase, list decodes the output, and then chooses state inputs to symmetrize the channel in the second phase. These results generalize prior work on specific channels models (additive, erasure) to general discrete alphabets and models.","lang":"eng"}],"intvolume":" 2022","title":"The capacity of causal adversarial channels","status":"public","_id":"12011","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Preprint","scopus_import":"1","article_processing_charge":"No","day":"03","page":"2523-2528","citation":{"chicago":"Zhang, Yihan, Sidharth Jaggi, Michael Langberg, and Anand D. Sarwate. “The Capacity of Causal Adversarial Channels.” In 2022 IEEE International Symposium on Information Theory, 2022:2523–28. IEEE, 2022. https://doi.org/10.1109/ISIT50566.2022.9834709.","mla":"Zhang, Yihan, et al. “The Capacity of Causal Adversarial Channels.” 2022 IEEE International Symposium on Information Theory, vol. 2022, IEEE, 2022, pp. 2523–28, doi:10.1109/ISIT50566.2022.9834709.","short":"Y. Zhang, S. Jaggi, M. Langberg, A.D. Sarwate, in:, 2022 IEEE International Symposium on Information Theory, IEEE, 2022, pp. 2523–2528.","ista":"Zhang Y, Jaggi S, Langberg M, Sarwate AD. 2022. The capacity of causal adversarial channels. 2022 IEEE International Symposium on Information Theory. ISIT: Internation Symposium on Information Theory vol. 2022, 2523–2528.","apa":"Zhang, Y., Jaggi, S., Langberg, M., & Sarwate, A. D. (2022). The capacity of causal adversarial channels. In 2022 IEEE International Symposium on Information Theory (Vol. 2022, pp. 2523–2528). Espoo, Finland: IEEE. https://doi.org/10.1109/ISIT50566.2022.9834709","ieee":"Y. Zhang, S. Jaggi, M. Langberg, and A. D. Sarwate, “The capacity of causal adversarial channels,” in 2022 IEEE International Symposium on Information Theory, Espoo, Finland, 2022, vol. 2022, pp. 2523–2528.","ama":"Zhang Y, Jaggi S, Langberg M, Sarwate AD. The capacity of causal adversarial channels. In: 2022 IEEE International Symposium on Information Theory. Vol 2022. IEEE; 2022:2523-2528. doi:10.1109/ISIT50566.2022.9834709"},"publication":"2022 IEEE International Symposium on Information Theory","date_published":"2022-08-03T00:00:00Z","department":[{"_id":"MaMo"}],"publisher":"IEEE","publication_status":"published","acknowledgement":"The work of ADS and ML was supported in part by the US National Science Foundation under awards CCF-1909468 and CCF-1909451.","year":"2022","volume":2022,"date_updated":"2022-09-05T09:09:15Z","date_created":"2022-09-04T22:02:03Z","author":[{"full_name":"Zhang, Yihan","id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c","first_name":"Yihan","last_name":"Zhang"},{"full_name":"Jaggi, Sidharth","first_name":"Sidharth","last_name":"Jaggi"},{"full_name":"Langberg, Michael","first_name":"Michael","last_name":"Langberg"},{"last_name":"Sarwate","first_name":"Anand D.","full_name":"Sarwate, Anand D."}],"publication_identifier":{"isbn":["9781665421591"],"issn":["2157-8095"]},"month":"08","quality_controlled":"1","main_file_link":[{"open_access":"1","url":" https://doi.org/10.48550/arXiv.2205.06708"}],"oa":1,"external_id":{"arxiv":["2205.06708"]},"language":[{"iso":"eng"}],"doi":"10.1109/ISIT50566.2022.9834709","conference":{"name":"ISIT: Internation Symposium on Information Theory","location":"Espoo, Finland","start_date":"2022-06-26","end_date":"2022-07-01"}},{"scopus_import":"1","article_processing_charge":"No","publication_identifier":{"isbn":["9781665421591"],"issn":["2157-8095"]},"day":"03","month":"08","page":"2535-2540","quality_controlled":"1","citation":{"short":"A.K. Yadav, M. Alimohammadi, Y. Zhang, A.J. Budkuley, S. Jaggi, in:, 2022 IEEE International Symposium on Information Theory, Institute of Electrical and Electronics Engineers, 2022, pp. 2535–2540.","mla":"Yadav, Anuj Kumar, et al. “New Results on AVCs with Omniscient and Myopic Adversaries.” 2022 IEEE International Symposium on Information Theory, vol. 2022, Institute of Electrical and Electronics Engineers, 2022, pp. 2535–40, doi:10.1109/ISIT50566.2022.9834632.","chicago":"Yadav, Anuj Kumar, Mohammadreza Alimohammadi, Yihan Zhang, Amitalok J. Budkuley, and Sidharth Jaggi. “New Results on AVCs with Omniscient and Myopic Adversaries.” In 2022 IEEE International Symposium on Information Theory, 2022:2535–40. Institute of Electrical and Electronics Engineers, 2022. https://doi.org/10.1109/ISIT50566.2022.9834632.","ama":"Yadav AK, Alimohammadi M, Zhang Y, Budkuley AJ, Jaggi S. New results on AVCs with omniscient and myopic adversaries. In: 2022 IEEE International Symposium on Information Theory. Vol 2022. Institute of Electrical and Electronics Engineers; 2022:2535-2540. doi:10.1109/ISIT50566.2022.9834632","ieee":"A. K. Yadav, M. Alimohammadi, Y. Zhang, A. J. Budkuley, and S. Jaggi, “New results on AVCs with omniscient and myopic adversaries,” in 2022 IEEE International Symposium on Information Theory, Espoo, Finland, 2022, vol. 2022, pp. 2535–2540.","apa":"Yadav, A. K., Alimohammadi, M., Zhang, Y., Budkuley, A. J., & Jaggi, S. (2022). New results on AVCs with omniscient and myopic adversaries. In 2022 IEEE International Symposium on Information Theory (Vol. 2022, pp. 2535–2540). Espoo, Finland: Institute of Electrical and Electronics Engineers. https://doi.org/10.1109/ISIT50566.2022.9834632","ista":"Yadav AK, Alimohammadi M, Zhang Y, Budkuley AJ, Jaggi S. 2022. New results on AVCs with omniscient and myopic adversaries. 2022 IEEE International Symposium on Information Theory. ISIT: Internation Symposium on Information Theory vol. 2022, 2535–2540."},"publication":"2022 IEEE International Symposium on Information Theory","language":[{"iso":"eng"}],"doi":"10.1109/ISIT50566.2022.9834632","date_published":"2022-08-03T00:00:00Z","conference":{"name":"ISIT: Internation Symposium on Information Theory","end_date":"2022-07-01","start_date":"2022-06-26","location":"Espoo, Finland"},"type":"conference","abstract":[{"text":"In the classic adversarial communication problem, two parties communicate over a noisy channel in the presence of a malicious jamming adversary. The arbitrarily varying channels (AVCs) offer an elegant framework to study a wide range of interesting adversary models. The optimal throughput or capacity over such AVCs is intimately tied to the underlying adversary model; in some cases, capacity is unknown and the problem is known to be notoriously hard. The omniscient adversary, one which knows the sender’s entire channel transmission a priori, is one of such classic models of interest; the capacity under such an adversary remains an exciting open problem. The myopic adversary is a generalization of that model where the adversary’s observation may be corrupted over a noisy discrete memoryless channel. Through the adversary’s myopicity, one can unify the slew of different adversary models, ranging from the omniscient adversary to one that is completely blind to the transmission (the latter is the well known oblivious model where the capacity is fully characterized).In this work, we present new results on the capacity under both the omniscient and myopic adversary models. We completely characterize the positive capacity threshold over general AVCs with omniscient adversaries. The characterization is in terms of two key combinatorial objects: the set of completely positive distributions and the CP-confusability set. For omniscient AVCs with positive capacity, we present non-trivial lower and upper bounds on the capacity; unlike some of the previous bounds, our bounds hold under fairly general input and jamming constraints. Our lower bound improves upon the generalized Gilbert-Varshamov bound for general AVCs while the upper bound generalizes the well known Elias-Bassalygo bound (known for binary and q-ary alphabets). For the myopic AVCs, we build on prior results known for the so-called sufficiently myopic model, and present new results on the positive rate communication threshold over the so-called insufficiently myopic regime (a completely insufficient myopic adversary specializes to an omniscient adversary). We present interesting examples for the widely studied models of adversarial bit-flip and bit-erasure channels. In fact, for the bit-flip AVC with additive adversarial noise as well as random noise, we completely characterize the omniscient model capacity when the random noise is sufficiently large vis-a-vis the adversary’s budget.","lang":"eng"}],"intvolume":" 2022","department":[{"_id":"MaMo"}],"publisher":"Institute of Electrical and Electronics Engineers","title":"New results on AVCs with omniscient and myopic adversaries","status":"public","publication_status":"published","_id":"12017","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2022","oa_version":"None","volume":2022,"date_updated":"2023-02-13T09:00:14Z","date_created":"2022-09-04T22:02:06Z","author":[{"full_name":"Yadav, Anuj Kumar","last_name":"Yadav","first_name":"Anuj Kumar"},{"last_name":"Alimohammadi","first_name":"Mohammadreza","full_name":"Alimohammadi, Mohammadreza"},{"full_name":"Zhang, Yihan","id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c","first_name":"Yihan","last_name":"Zhang"},{"full_name":"Budkuley, Amitalok J.","last_name":"Budkuley","first_name":"Amitalok J."},{"last_name":"Jaggi","first_name":"Sidharth","full_name":"Jaggi, Sidharth"}]},{"article_processing_charge":"No","publication_identifier":{"isbn":["9781665421591"],"issn":["2157-8095"]},"month":"08","day":"03","scopus_import":"1","date_published":"2022-08-03T00:00:00Z","doi":"10.1109/ISIT50566.2022.9834850","conference":{"name":"ISIT: Internation Symposium on Information Theory","end_date":"2022-07-01","start_date":"2022-06-26","location":"Espoo, Finland"},"language":[{"iso":"eng"}],"citation":{"chicago":"Joshi, Pranav, Amritakshya Purkayastha, Yihan Zhang, Amitalok J. Budkuley, and Sidharth Jaggi. “On the Capacity of Additive AVCs with Feedback.” In 2022 IEEE International Symposium on Information Theory, 2022:504–9. IEEE, 2022. https://doi.org/10.1109/ISIT50566.2022.9834850.","mla":"Joshi, Pranav, et al. “On the Capacity of Additive AVCs with Feedback.” 2022 IEEE International Symposium on Information Theory, vol. 2022, IEEE, 2022, pp. 504–09, doi:10.1109/ISIT50566.2022.9834850.","short":"P. Joshi, A. Purkayastha, Y. Zhang, A.J. Budkuley, S. Jaggi, in:, 2022 IEEE International Symposium on Information Theory, IEEE, 2022, pp. 504–509.","ista":"Joshi P, Purkayastha A, Zhang Y, Budkuley AJ, Jaggi S. 2022. On the capacity of additive AVCs with feedback. 2022 IEEE International Symposium on Information Theory. ISIT: Internation Symposium on Information Theory vol. 2022, 504–509.","apa":"Joshi, P., Purkayastha, A., Zhang, Y., Budkuley, A. J., & Jaggi, S. (2022). On the capacity of additive AVCs with feedback. In 2022 IEEE International Symposium on Information Theory (Vol. 2022, pp. 504–509). Espoo, Finland: IEEE. https://doi.org/10.1109/ISIT50566.2022.9834850","ieee":"P. Joshi, A. Purkayastha, Y. Zhang, A. J. Budkuley, and S. Jaggi, “On the capacity of additive AVCs with feedback,” in 2022 IEEE International Symposium on Information Theory, Espoo, Finland, 2022, vol. 2022, pp. 504–509.","ama":"Joshi P, Purkayastha A, Zhang Y, Budkuley AJ, Jaggi S. On the capacity of additive AVCs with feedback. In: 2022 IEEE International Symposium on Information Theory. Vol 2022. IEEE; 2022:504-509. doi:10.1109/ISIT50566.2022.9834850"},"publication":"2022 IEEE International Symposium on Information Theory","page":"504-509","quality_controlled":"1","abstract":[{"text":"We consider the problem of communication over adversarial channels with feedback. Two parties comprising sender Alice and receiver Bob seek to communicate reliably. An adversary James observes Alice's channel transmission entirely and chooses, maliciously, its additive channel input or jamming state thereby corrupting Bob's observation. Bob can communicate over a one-way reverse link with Alice; we assume that transmissions over this feedback link cannot be corrupted by James. Our goal in this work is to study the optimum throughput or capacity over such channels with feedback. We first present results for the quadratically-constrained additive channel where communication is known to be impossible when the noise-to-signal (power) ratio (NSR) is at least 1. We present a novel achievability scheme to establish that positive rate communication is possible even when the NSR is as high as 8/9. We also present new converse upper bounds on the capacity of this channel under potentially stochastic encoders and decoders. We also study feedback communication over the more widely studied q-ary alphabet channel under additive noise. For the q -ary channel, where q > 2, it is well known that capacity is positive under full feedback if and only if the adversary can corrupt strictly less than half the transmitted symbols. We generalize this result and show that the same threshold holds for positive rate communication when the noiseless feedback may only be partial; our scheme employs a stochastic decoder. We extend this characterization, albeit partially, to fully deterministic schemes under partial noiseless feedback. We also present new converse upper bounds for q-ary channels under full feedback, where the encoder and/or decoder may privately randomize. Our converse results bring to the fore an interesting alternate expression for the well known converse bound for the q—ary channel under full feedback which, when specialized to the binary channel, also equals its known capacity.","lang":"eng"}],"type":"conference","author":[{"last_name":"Joshi","first_name":"Pranav","full_name":"Joshi, Pranav"},{"last_name":"Purkayastha","first_name":"Amritakshya","full_name":"Purkayastha, Amritakshya"},{"full_name":"Zhang, Yihan","last_name":"Zhang","first_name":"Yihan","id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c"},{"full_name":"Budkuley, Amitalok J.","first_name":"Amitalok J.","last_name":"Budkuley"},{"full_name":"Jaggi, Sidharth","first_name":"Sidharth","last_name":"Jaggi"}],"oa_version":"None","volume":2022,"date_updated":"2022-09-05T10:23:35Z","date_created":"2022-09-04T22:02:04Z","_id":"12013","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2022","department":[{"_id":"MaMo"}],"intvolume":" 2022","publisher":"IEEE","title":"On the capacity of additive AVCs with feedback","publication_status":"published","status":"public"},{"type":"conference","abstract":[{"lang":"eng","text":"We consider the problem of coded distributed computing using polar codes. The average execution time of a coded computing system is related to the error probability for transmission over the binary erasure channel in recent work by Soleymani, Jamali and Mahdavifar, where the performance of binary linear codes is investigated. In this paper, we focus on polar codes and unveil a connection between the average execution time and the scaling exponent μ of the family of codes. In the finite-length characterization of polar codes, the scaling exponent is a key object capturing the speed of convergence to capacity. In particular, we show that (i) the gap between the normalized average execution time of polar codes and that of optimal MDS codes is O(n –1/μ ), and (ii) this upper bound can be improved to roughly O(n –1/2 ) by considering polar codes with large kernels. We conjecture that these bounds could be improved to O(n –2/μ ) and O(n –1 ), respectively, and provide a heuristic argument as well as numerical evidence supporting this view."}],"_id":"12016","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","title":"Polar coded computing: The role of the scaling exponent","status":"public","intvolume":" 2022","oa_version":"Preprint","scopus_import":"1","day":"03","article_processing_charge":"No","publication":"2022 IEEE International Symposium on Information Theory","citation":{"ista":"Fathollahi D, Mondelli M. 2022. Polar coded computing: The role of the scaling exponent. 2022 IEEE International Symposium on Information Theory. ISIT: Internation Symposium on Information Theory vol. 2022, 2154–2159.","apa":"Fathollahi, D., & Mondelli, M. (2022). Polar coded computing: The role of the scaling exponent. In 2022 IEEE International Symposium on Information Theory (Vol. 2022, pp. 2154–2159). Espoo, Finland: IEEE. https://doi.org/10.1109/ISIT50566.2022.9834712","ieee":"D. Fathollahi and M. Mondelli, “Polar coded computing: The role of the scaling exponent,” in 2022 IEEE International Symposium on Information Theory, Espoo, Finland, 2022, vol. 2022, pp. 2154–2159.","ama":"Fathollahi D, Mondelli M. Polar coded computing: The role of the scaling exponent. In: 2022 IEEE International Symposium on Information Theory. Vol 2022. IEEE; 2022:2154-2159. doi:10.1109/ISIT50566.2022.9834712","chicago":"Fathollahi, Dorsa, and Marco Mondelli. “Polar Coded Computing: The Role of the Scaling Exponent.” In 2022 IEEE International Symposium on Information Theory, 2022:2154–59. IEEE, 2022. https://doi.org/10.1109/ISIT50566.2022.9834712.","mla":"Fathollahi, Dorsa, and Marco Mondelli. “Polar Coded Computing: The Role of the Scaling Exponent.” 2022 IEEE International Symposium on Information Theory, vol. 2022, IEEE, 2022, pp. 2154–59, doi:10.1109/ISIT50566.2022.9834712.","short":"D. Fathollahi, M. Mondelli, in:, 2022 IEEE International Symposium on Information Theory, IEEE, 2022, pp. 2154–2159."},"page":"2154-2159","date_published":"2022-08-03T00:00:00Z","acknowledgement":"D. Fathollahi and M. Mondelli were partially supported by the 2019 Lopez-Loreta Prize. The authors thank Hamed Hassani and Hessam Mahdavifar for helpful discussions.","year":"2022","publication_status":"published","department":[{"_id":"MaMo"}],"publisher":"IEEE","author":[{"last_name":"Fathollahi","first_name":"Dorsa","full_name":"Fathollahi, Dorsa"},{"id":"27EB676C-8706-11E9-9510-7717E6697425","orcid":"0000-0002-3242-7020","first_name":"Marco","last_name":"Mondelli","full_name":"Mondelli, Marco"}],"date_created":"2022-09-04T22:02:05Z","date_updated":"2022-09-05T10:35:40Z","volume":2022,"month":"08","publication_identifier":{"issn":["2157-8095"],"isbn":["9781665421591"]},"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2201.10082","open_access":"1"}],"external_id":{"arxiv":["2201.10082"]},"oa":1,"quality_controlled":"1","project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"conference":{"name":"ISIT: Internation Symposium on Information Theory","start_date":"2022-06-26","location":"Espoo, Finland","end_date":"2022-07-01"},"doi":"10.1109/ISIT50566.2022.9834712","language":[{"iso":"eng"}]},{"article_processing_charge":"No","day":"03","scopus_import":"1","date_published":"2022-08-03T00:00:00Z","citation":{"short":"S. Torkamani, J.B. Ebrahimi, P. Sadeghi, R.G.L. D’Oliveira, M. Médard, in:, 2022 IEEE International Symposium on Information Theory, IEEE, 2022, pp. 1623–1628.","mla":"Torkamani, Sahel, et al. “Heterogeneous Differential Privacy via Graphs.” 2022 IEEE International Symposium on Information Theory, vol. 2022, IEEE, 2022, pp. 1623–28, doi:10.1109/ISIT50566.2022.9834711.","chicago":"Torkamani, Sahel, Javad B. Ebrahimi, Parastoo Sadeghi, Rafael G.L. D’Oliveira, and Muriel Médard. “Heterogeneous Differential Privacy via Graphs.” In 2022 IEEE International Symposium on Information Theory, 2022:1623–28. IEEE, 2022. https://doi.org/10.1109/ISIT50566.2022.9834711.","ama":"Torkamani S, Ebrahimi JB, Sadeghi P, D’Oliveira RGL, Médard M. Heterogeneous differential privacy via graphs. In: 2022 IEEE International Symposium on Information Theory. Vol 2022. IEEE; 2022:1623-1628. doi:10.1109/ISIT50566.2022.9834711","ieee":"S. Torkamani, J. B. Ebrahimi, P. Sadeghi, R. G. L. D’Oliveira, and M. Médard, “Heterogeneous differential privacy via graphs,” in 2022 IEEE International Symposium on Information Theory, Espoo, Finland, 2022, vol. 2022, pp. 1623–1628.","apa":"Torkamani, S., Ebrahimi, J. B., Sadeghi, P., D’Oliveira, R. G. L., & Médard, M. (2022). Heterogeneous differential privacy via graphs. In 2022 IEEE International Symposium on Information Theory (Vol. 2022, pp. 1623–1628). Espoo, Finland: IEEE. https://doi.org/10.1109/ISIT50566.2022.9834711","ista":"Torkamani S, Ebrahimi JB, Sadeghi P, D’Oliveira RGL, Médard M. 2022. Heterogeneous differential privacy via graphs. 2022 IEEE International Symposium on Information Theory. ISIT: Internation Symposium on Information Theory vol. 2022, 1623–1628."},"publication":"2022 IEEE International Symposium on Information Theory","page":"1623-1628","abstract":[{"text":"This paper is eligible for the Jack Keil Wolf ISIT Student Paper Award. We generalize a previous framework for designing utility-optimal differentially private (DP) mechanisms via graphs, where datasets are vertices in the graph and edges represent dataset neighborhood. The boundary set contains datasets where an individual’s response changes the binary-valued query compared to its neighbors. Previous work was limited to the homogeneous case where the privacy parameter ε across all datasets was the same and the mechanism at boundary datasets was identical. In our work, the mechanism can take different distributions at the boundary and the privacy parameter ε is a function of neighboring datasets, which recovers an earlier definition of personalized DP as special case. The problem is how to extend the mechanism, which is only defined at the boundary set, to other datasets in the graph in a computationally efficient and utility optimal manner. Using the concept of strongest induced DP condition we solve this problem efficiently in polynomial time (in the size of the graph).","lang":"eng"}],"type":"conference","oa_version":"Preprint","_id":"12012","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","intvolume":" 2022","title":"Heterogeneous differential privacy via graphs","status":"public","publication_identifier":{"isbn":["9781665421591"],"issn":["2157-8095"]},"month":"08","doi":"10.1109/ISIT50566.2022.9834711","conference":{"end_date":"2022-07-01","location":"Espoo, Finland","start_date":"2022-06-26","name":"ISIT: Internation Symposium on Information Theory"},"language":[{"iso":"eng"}],"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2203.15429","open_access":"1"}],"external_id":{"arxiv":["2203.15429"]},"oa":1,"quality_controlled":"1","author":[{"full_name":"Torkamani, Sahel","id":"0503e7f8-2d05-11ed-aa17-db0640c720fc","first_name":"Sahel","last_name":"Torkamani"},{"first_name":"Javad B.","last_name":"Ebrahimi","full_name":"Ebrahimi, Javad B."},{"first_name":"Parastoo","last_name":"Sadeghi","full_name":"Sadeghi, Parastoo"},{"first_name":"Rafael G.L.","last_name":"D'Oliveira","full_name":"D'Oliveira, Rafael G.L."},{"last_name":"Médard","first_name":"Muriel","full_name":"Médard, Muriel"}],"volume":2022,"date_updated":"2022-09-05T10:28:35Z","date_created":"2022-09-04T22:02:04Z","year":"2022","publisher":"IEEE","department":[{"_id":"MaMo"}],"publication_status":"published"}]