[{"oa_version":"Preprint","_id":"15011","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","intvolume":" 234","status":"public","title":"How to prune your language model: Recovering accuracy on the \"Sparsity May Cry\" benchmark","abstract":[{"text":"Pruning large language models (LLMs) from the BERT family has emerged as a standard compression benchmark, and several pruning methods have been proposed for this task. The recent “Sparsity May Cry” (SMC) benchmark put into question the validity of all existing methods, exhibiting a more complex setup where many known pruning methods appear to fail. We revisit the question of accurate BERT-pruning during fine-tuning on downstream datasets, and propose a set of general guidelines for successful pruning, even on the challenging SMC benchmark. First, we perform a cost-vs-benefits analysis of pruning model components, such as the embeddings and the classification head; second, we provide a simple-yet-general way of scaling training, sparsification and learning rate schedules relative to the desired target sparsity; finally, we investigate the importance of proper parametrization for Knowledge Distillation in the context of LLMs. Our simple insights lead to state-of-the-art results, both on classic BERT-pruning benchmarks, as well as on the SMC benchmark, showing that even classic gradual magnitude pruning (GMP) can yield competitive results, with the right approach.","lang":"eng"}],"type":"conference","alternative_title":["PMLR"],"date_published":"2024-01-08T00:00:00Z","citation":{"mla":"Kurtic, Eldar, et al. “How to Prune Your Language Model: Recovering Accuracy on the ‘Sparsity May Cry’ Benchmark.” Proceedings of Machine Learning Research, vol. 234, ML Research Press, 2024, pp. 542–53.","short":"E. Kurtic, T. Hoefler, D.-A. Alistarh, in:, Proceedings of Machine Learning Research, ML Research Press, 2024, pp. 542–553.","chicago":"Kurtic, Eldar, Torsten Hoefler, and Dan-Adrian Alistarh. “How to Prune Your Language Model: Recovering Accuracy on the ‘Sparsity May Cry’ Benchmark.” In Proceedings of Machine Learning Research, 234:542–53. ML Research Press, 2024.","ama":"Kurtic E, Hoefler T, Alistarh D-A. How to prune your language model: Recovering accuracy on the “Sparsity May Cry” benchmark. In: Proceedings of Machine Learning Research. Vol 234. ML Research Press; 2024:542-553.","ista":"Kurtic E, Hoefler T, Alistarh D-A. 2024. How to prune your language model: Recovering accuracy on the ‘Sparsity May Cry’ benchmark. Proceedings of Machine Learning Research. CPAL: Conference on Parsimony and Learning, PMLR, vol. 234, 542–553.","apa":"Kurtic, E., Hoefler, T., & Alistarh, D.-A. (2024). How to prune your language model: Recovering accuracy on the “Sparsity May Cry” benchmark. In Proceedings of Machine Learning Research (Vol. 234, pp. 542–553). Hongkong, China: ML Research Press.","ieee":"E. Kurtic, T. Hoefler, and D.-A. Alistarh, “How to prune your language model: Recovering accuracy on the ‘Sparsity May Cry’ benchmark,” in Proceedings of Machine Learning Research, Hongkong, China, 2024, vol. 234, pp. 542–553."},"publication":"Proceedings of Machine Learning Research","page":"542-553","article_processing_charge":"No","day":"08","scopus_import":"1","author":[{"full_name":"Kurtic, Eldar","first_name":"Eldar","last_name":"Kurtic","id":"47beb3a5-07b5-11eb-9b87-b108ec578218"},{"full_name":"Hoefler, Torsten","last_name":"Hoefler","first_name":"Torsten"},{"last_name":"Alistarh","first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","full_name":"Alistarh, Dan-Adrian"}],"volume":234,"date_updated":"2024-02-26T10:30:52Z","date_created":"2024-02-18T23:01:03Z","year":"2024","department":[{"_id":"DaAl"}],"publisher":"ML Research Press","publication_status":"published","conference":{"start_date":"2024-01-03","location":"Hongkong, China","end_date":"2024-01-06","name":"CPAL: Conference on Parsimony and Learning"},"language":[{"iso":"eng"}],"oa":1,"main_file_link":[{"url":"https://proceedings.mlr.press/v234/kurtic24a","open_access":"1"}],"external_id":{"arxiv":["2312.13547"]},"quality_controlled":"1","publication_identifier":{"eissn":["2640-3498"]},"month":"01"},{"doi":"10.1145/3572848.3577481","date_published":"2023-02-25T00:00:00Z","conference":{"location":"Montreal, QC, Canada","start_date":"2023-02-25","end_date":"2023-03-01","name":"PPoPP: Sympopsium on Principles and Practice of Parallel Programming"},"language":[{"iso":"eng"}],"citation":{"ama":"Koval N, Alistarh D-A, Elizarov R. Fast and scalable channels in Kotlin Coroutines. In: Proceedings of the ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming. Association for Computing Machinery; 2023:107-118. doi:10.1145/3572848.3577481","apa":"Koval, N., Alistarh, D.-A., & Elizarov, R. (2023). Fast and scalable channels in Kotlin Coroutines. In Proceedings of the ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming (pp. 107–118). Montreal, QC, Canada: Association for Computing Machinery. https://doi.org/10.1145/3572848.3577481","ieee":"N. Koval, D.-A. Alistarh, and R. Elizarov, “Fast and scalable channels in Kotlin Coroutines,” in Proceedings of the ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, Montreal, QC, Canada, 2023, pp. 107–118.","ista":"Koval N, Alistarh D-A, Elizarov R. 2023. Fast and scalable channels in Kotlin Coroutines. Proceedings of the ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming. PPoPP: Sympopsium on Principles and Practice of Parallel Programming, 107–118.","short":"N. Koval, D.-A. Alistarh, R. Elizarov, in:, Proceedings of the ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, Association for Computing Machinery, 2023, pp. 107–118.","mla":"Koval, Nikita, et al. “Fast and Scalable Channels in Kotlin Coroutines.” Proceedings of the ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, Association for Computing Machinery, 2023, pp. 107–18, doi:10.1145/3572848.3577481.","chicago":"Koval, Nikita, Dan-Adrian Alistarh, and Roman Elizarov. “Fast and Scalable Channels in Kotlin Coroutines.” In Proceedings of the ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, 107–18. Association for Computing Machinery, 2023. https://doi.org/10.1145/3572848.3577481."},"external_id":{"arxiv":["2211.04986"]},"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2211.04986","open_access":"1"}],"oa":1,"publication":"Proceedings of the ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming","page":"107-118","quality_controlled":"1","article_processing_charge":"No","publication_identifier":{"isbn":["9798400700156"]},"day":"25","month":"02","scopus_import":"1","author":[{"full_name":"Koval, Nikita","first_name":"Nikita","last_name":"Koval","id":"2F4DB10C-F248-11E8-B48F-1D18A9856A87"},{"last_name":"Alistarh","first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","full_name":"Alistarh, Dan-Adrian"},{"last_name":"Elizarov","first_name":"Roman","full_name":"Elizarov, Roman"}],"oa_version":"Preprint","date_updated":"2023-03-20T07:29:28Z","date_created":"2023-03-19T23:00:58Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"12735","year":"2023","department":[{"_id":"DaAl"}],"publisher":"Association for Computing Machinery","status":"public","publication_status":"published","title":"Fast and scalable channels in Kotlin Coroutines","abstract":[{"text":"Asynchronous programming has gained significant popularity over the last decade: support for this programming pattern is available in many popular languages via libraries and native language implementations, typically in the form of coroutines or the async/await construct. Instead of programming via shared memory, this concept assumes implicit synchronization through message passing. The key data structure enabling such communication is the rendezvous channel. Roughly, a rendezvous channel is a blocking queue of size zero, so both send(e) and receive() operations wait for each other, performing a rendezvous when they meet. To optimize the message passing pattern, channels are usually equipped with a fixed-size buffer, so sends do not suspend and put elements into the buffer until its capacity is exceeded. This primitive is known as a buffered channel.\r\n\r\nThis paper presents a fast and scalable algorithm for both rendezvous and buffered channels. Similarly to modern queues, our solution is based on an infinite array with two positional counters for send(e) and receive() operations, leveraging the unconditional Fetch-And-Add instruction to update them. Yet, the algorithm requires non-trivial modifications of this classic pattern, in order to support the full channel semantics, such as buffering and cancellation of waiting requests. We compare the performance of our solution to that of the Kotlin implementation, as well as against other academic proposals, showing up to 9.8× speedup. To showcase its expressiveness and performance, we also integrated the proposed algorithm into the standard Kotlin Coroutines library, replacing the previous channel implementations.","lang":"eng"}],"type":"conference"},{"day":"25","month":"02","publication_identifier":{"isbn":["9798400700156"]},"article_processing_charge":"No","publication":"Proceedings of the ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming","oa":1,"main_file_link":[{"url":"https://doi.org/10.1145/3572848.3577512","open_access":"1"}],"citation":{"mla":"Aksenov, Vitaly, et al. “Unexpected Scaling in Path Copying Trees.” Proceedings of the ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, Association for Computing Machinery, 2023, pp. 438–40, doi:10.1145/3572848.3577512.","short":"V. Aksenov, T.A. Brown, A. Fedorov, I. Kokorin, Unexpected Scaling in Path Copying Trees, Association for Computing Machinery, 2023.","chicago":"Aksenov, Vitaly, Trevor A Brown, Alexander Fedorov, and Ilya Kokorin. Unexpected Scaling in Path Copying Trees. Proceedings of the ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming. Association for Computing Machinery, 2023. https://doi.org/10.1145/3572848.3577512.","ama":"Aksenov V, Brown TA, Fedorov A, Kokorin I. Unexpected Scaling in Path Copying Trees. Association for Computing Machinery; 2023:438-440. doi:10.1145/3572848.3577512","ista":"Aksenov V, Brown TA, Fedorov A, Kokorin I. 2023. Unexpected scaling in path copying trees, Association for Computing Machinery,p.","ieee":"V. Aksenov, T. A. Brown, A. Fedorov, and I. Kokorin, Unexpected scaling in path copying trees. Association for Computing Machinery, 2023, pp. 438–440.","apa":"Aksenov, V., Brown, T. A., Fedorov, A., & Kokorin, I. (2023). Unexpected scaling in path copying trees. Proceedings of the ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming (pp. 438–440). Montreal, QB, Canada: Association for Computing Machinery. https://doi.org/10.1145/3572848.3577512"},"quality_controlled":"1","page":"438-440","conference":{"end_date":"2023-03-01","start_date":"2023-02-25","location":"Montreal, QB, Canada","name":"PPoPP: Sympopsium on Principles and Practice of Parallel Programming"},"date_published":"2023-02-25T00:00:00Z","doi":"10.1145/3572848.3577512","language":[{"iso":"eng"}],"type":"conference_poster","abstract":[{"lang":"eng","text":"Although a wide variety of handcrafted concurrent data structures have been proposed, there is considerable interest in universal approaches (Universal Constructions or UCs) for building concurrent data structures. UCs (semi-)automatically convert a sequential data structure into a concurrent one. The simplest approach uses locks [3, 6] that protect a sequential data structure and allow only one process to access it at a time. However, the resulting data structure is blocking. Most work on UCs instead focuses on obtaining non-blocking progress guarantees such as obstruction-freedom, lock-freedom or wait-freedom. Many non-blocking UCs have appeared. Key examples include the seminal wait-free UC [2] by Herlihy, a NUMA-aware UC [10] by Yi et al., and an efficient UC for large objects [1] by Fatourou et al."}],"acknowledgement":"This work was supported by: the Natural Sciences and Engineering Research Council of Canada (NSERC) Discovery Program grant: RGPIN-2019-04227, and the Canada Foundation for Innovation John R. Evans Leaders Fund (CFI-JELF) with equal support from the Ontario Research Fund CFI Leaders Opportunity Fund: 38512.","_id":"12736","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2023","publication_status":"published","title":"Unexpected scaling in path copying trees","status":"public","department":[{"_id":"DaAl"},{"_id":"GradSch"}],"publisher":"Association for Computing Machinery","author":[{"last_name":"Aksenov","first_name":"Vitaly","full_name":"Aksenov, Vitaly"},{"full_name":"Brown, Trevor A","id":"3569F0A0-F248-11E8-B48F-1D18A9856A87","first_name":"Trevor A","last_name":"Brown"},{"full_name":"Fedorov, Alexander","id":"2e711909-896a-11ed-bdf8-eb0f5a2984c6","last_name":"Fedorov","first_name":"Alexander"},{"first_name":"Ilya","last_name":"Kokorin","full_name":"Kokorin, Ilya"}],"date_created":"2023-03-19T23:00:58Z","date_updated":"2023-03-20T07:57:27Z","oa_version":"Published Version"},{"author":[{"id":"32D78294-F248-11E8-B48F-1D18A9856A87","first_name":"Elena-Alexandra","last_name":"Peste","full_name":"Peste, Elena-Alexandra"},{"last_name":"Vladu","first_name":"Adrian","full_name":"Vladu, Adrian"},{"last_name":"Kurtic","first_name":"Eldar","id":"47beb3a5-07b5-11eb-9b87-b108ec578218","full_name":"Kurtic, Eldar"},{"full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","last_name":"Lampert","first_name":"Christoph"},{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian","last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian"}],"related_material":{"record":[{"status":"public","relation":"dissertation_contains","id":"13074"}]},"date_updated":"2023-06-01T12:54:45Z","date_created":"2023-05-23T11:36:18Z","oa_version":"Preprint","_id":"13053","year":"2023","acknowledgement":"AP, EK, DA received funding from the European Research Council (ERC) under the European\r\nUnion’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML). AV acknowledges the support of the French Agence Nationale de la Recherche (ANR), under grant ANR-21-CE48-0016 (project COMCOPT). We further acknowledge the support from the Scientific Service Units (SSU) of ISTA through resources provided by Scientific Computing (SciComp)-","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","status":"public","title":"CrAM: A Compression-Aware Minimizer","publication_status":"accepted","department":[{"_id":"GradSch"},{"_id":"DaAl"},{"_id":"ChLa"}],"abstract":[{"text":"Deep neural networks (DNNs) often have to be compressed, via pruning and/or quantization, before they can be deployed in practical settings. In this work we propose a new compression-aware minimizer dubbed CrAM that modifies the optimization step in a principled way, in order to produce models whose local loss behavior is stable under compression operations such as pruning. Thus, dense models trained via CrAM should be compressible post-training, in a single step, without significant accuracy loss. Experimental results on standard benchmarks, such as residual networks for ImageNet classification and BERT models for language modelling, show that CrAM produces dense models that can be more accurate than the standard SGD/Adam-based baselines, but which are stable under weight pruning: specifically, we can prune models in one-shot to 70-80% sparsity with almost no accuracy loss, and to 90% with reasonable (∼1%) accuracy loss, which is competitive with gradual compression methods. Additionally, CrAM can produce sparse models which perform well for transfer learning, and it also works for semi-structured 2:4 pruning patterns supported by GPU hardware. The code for reproducing the results is available at this https URL .","lang":"eng"}],"ec_funded":1,"type":"conference","conference":{"end_date":"2023-05-05","start_date":"2023-05-01","location":"Kigali, Rwanda ","name":"ICLR: International Conference on Learning Representations"},"date_published":"2023-05-01T00:00:00Z","acknowledged_ssus":[{"_id":"ScienComp"}],"language":[{"iso":"eng"}],"publication":"11th International Conference on Learning Representations ","oa":1,"main_file_link":[{"open_access":"1","url":"https://openreview.net/pdf?id=_eTZBs-yedr"}],"external_id":{"arxiv":["2207.14200"]},"citation":{"apa":"Peste, E.-A., Vladu, A., Kurtic, E., Lampert, C., & Alistarh, D.-A. (n.d.). CrAM: A Compression-Aware Minimizer. In 11th International Conference on Learning Representations . Kigali, Rwanda .","ieee":"E.-A. Peste, A. Vladu, E. Kurtic, C. Lampert, and D.-A. Alistarh, “CrAM: A Compression-Aware Minimizer,” in 11th International Conference on Learning Representations , Kigali, Rwanda .","ista":"Peste E-A, Vladu A, Kurtic E, Lampert C, Alistarh D-A. CrAM: A Compression-Aware Minimizer. 11th International Conference on Learning Representations . ICLR: International Conference on Learning Representations.","ama":"Peste E-A, Vladu A, Kurtic E, Lampert C, Alistarh D-A. CrAM: A Compression-Aware Minimizer. In: 11th International Conference on Learning Representations .","chicago":"Peste, Elena-Alexandra, Adrian Vladu, Eldar Kurtic, Christoph Lampert, and Dan-Adrian Alistarh. “CrAM: A Compression-Aware Minimizer.” In 11th International Conference on Learning Representations , n.d.","short":"E.-A. Peste, A. Vladu, E. Kurtic, C. Lampert, D.-A. Alistarh, in:, 11th International Conference on Learning Representations , n.d.","mla":"Peste, Elena-Alexandra, et al. “CrAM: A Compression-Aware Minimizer.” 11th International Conference on Learning Representations ."},"quality_controlled":"1","project":[{"call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning","grant_number":"805223","_id":"268A44D6-B435-11E9-9278-68D0E5697425"}],"month":"05","article_processing_charge":"No"},{"citation":{"mla":"Koval, Nikita, et al. “CQS: A Formally-Verified Framework for Fair and Abortable Synchronization.” Proceedings of the ACM on Programming Languages, vol. 7, 116, Association for Computing Machinery , 2023, doi:10.1145/3591230.","short":"N. Koval, D. Khalanskiy, D.-A. Alistarh, Proceedings of the ACM on Programming Languages 7 (2023).","chicago":"Koval, Nikita, Dmitry Khalanskiy, and Dan-Adrian Alistarh. “CQS: A Formally-Verified Framework for Fair and Abortable Synchronization.” Proceedings of the ACM on Programming Languages. Association for Computing Machinery , 2023. https://doi.org/10.1145/3591230.","ama":"Koval N, Khalanskiy D, Alistarh D-A. CQS: A formally-verified framework for fair and abortable synchronization. Proceedings of the ACM on Programming Languages. 2023;7. doi:10.1145/3591230","ista":"Koval N, Khalanskiy D, Alistarh D-A. 2023. CQS: A formally-verified framework for fair and abortable synchronization. Proceedings of the ACM on Programming Languages. 7, 116.","apa":"Koval, N., Khalanskiy, D., & Alistarh, D.-A. (2023). CQS: A formally-verified framework for fair and abortable synchronization. Proceedings of the ACM on Programming Languages. Association for Computing Machinery . https://doi.org/10.1145/3591230","ieee":"N. Koval, D. Khalanskiy, and D.-A. Alistarh, “CQS: A formally-verified framework for fair and abortable synchronization,” Proceedings of the ACM on Programming Languages, vol. 7. Association for Computing Machinery , 2023."},"publication":"Proceedings of the ACM on Programming Languages","article_type":"original","date_published":"2023-06-06T00:00:00Z","scopus_import":"1","has_accepted_license":"1","article_processing_charge":"No","day":"06","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"13179","intvolume":" 7","ddc":["000"],"title":"CQS: A formally-verified framework for fair and abortable synchronization","status":"public","oa_version":"Published Version","file":[{"relation":"main_file","file_id":"13187","checksum":"5dba6e73f0ed79adbdae14d165bc2f68","success":1,"date_created":"2023-07-03T13:09:39Z","date_updated":"2023-07-03T13:09:39Z","access_level":"open_access","file_name":"2023_ACMProgram.Lang._Koval.pdf","file_size":1266773,"content_type":"application/pdf","creator":"alisjak"}],"type":"journal_article","abstract":[{"lang":"eng","text":"Writing concurrent code that is both correct and efficient is notoriously difficult. Thus, programmers often prefer to use synchronization abstractions, which render code simpler and easier to reason about. Despite a wealth of work on this topic, there is still a gap between the rich semantics provided by synchronization abstractions in modern programming languages—specifically, fair FIFO ordering of synchronization requests and support for abortable operations—and frameworks for implementing it correctly and efficiently. Supporting such semantics is critical given the rising popularity of constructs for asynchronous programming, such as coroutines, which abort frequently and are cheaper to suspend and resume compared to native threads.\r\n\r\nThis paper introduces a new framework called CancellableQueueSynchronizer (CQS), which enables simple yet efficient implementations of a wide range of fair and abortable synchronization primitives: mutexes, semaphores, barriers, count-down latches, and blocking pools. Our main contribution is algorithmic, as implementing both fairness and abortability efficiently at this level of generality is non-trivial. Importantly, all our algorithms, including the CQS framework and the primitives built on top of it, come with formal proofs in the Iris framework for Coq for many of their properties. These proofs are modular, so it is easy to show correctness for new primitives implemented on top of CQS. From a practical perspective, implementation of CQS for native threads on the JVM improves throughput by up to two orders of magnitude over Java’s AbstractQueuedSynchronizer, the only practical abstraction offering similar semantics. Further, we successfully integrated CQS as a core component of the popular Kotlin Coroutines library, validating the framework’s practical impact and expressiveness in a real-world environment. In sum, CancellableQueueSynchronizer is the first framework to combine expressiveness with formal guarantees and solid practical performance. Our approach should be extensible to other languages and families of synchronization primitives."}],"oa":1,"tmp":{"name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png"},"quality_controlled":"1","doi":"10.1145/3591230","language":[{"iso":"eng"}],"publication_identifier":{"eissn":["2475-1421"]},"month":"06","year":"2023","department":[{"_id":"DaAl"}],"publisher":"Association for Computing Machinery ","publication_status":"published","author":[{"id":"2F4DB10C-F248-11E8-B48F-1D18A9856A87","first_name":"Nikita","last_name":"Koval","full_name":"Koval, Nikita"},{"full_name":"Khalanskiy, Dmitry","last_name":"Khalanskiy","first_name":"Dmitry"},{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian","last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian"}],"volume":7,"date_updated":"2023-07-17T08:43:19Z","date_created":"2023-07-02T22:00:43Z","article_number":"116","file_date_updated":"2023-07-03T13:09:39Z","license":"https://creativecommons.org/licenses/by/4.0/"},{"abstract":[{"lang":"eng","text":"Determining the degree of inherent parallelism in classical sequential algorithms and leveraging it for fast parallel execution is a key topic in parallel computing, and detailed analyses are known for a wide range of classical algorithms. In this paper, we perform the first such analysis for the fundamental Union-Find problem, in which we are given a graph as a sequence of edges, and must maintain its connectivity structure under edge additions. We prove that classic sequential algorithms for this problem are well-parallelizable under reasonable assumptions, addressing a conjecture by [Blelloch, 2017]. More precisely, we show via a new potential argument that, under uniform random edge ordering, parallel union-find operations are unlikely to interfere: T concurrent threads processing the graph in parallel will encounter memory contention O(T2 · log |V| · log |E|) times in expectation, where |E| and |V| are the number of edges and nodes in the graph, respectively. We leverage this result to design a new parallel Union-Find algorithm that is both internally deterministic, i.e., its results are guaranteed to match those of a sequential execution, but also work-efficient and scalable, as long as the number of threads T is O(|E|1 over 3 - ε), for an arbitrarily small constant ε > 0, which holds for most large real-world graphs. We present lower bounds which show that our analysis is close to optimal, and experimental results suggesting that the performance cost of internal determinism is limited."}],"type":"conference","file":[{"file_id":"13334","relation":"main_file","success":1,"checksum":"72e312aabf0c5248c99b5cd3a88e4c88","date_updated":"2023-07-31T10:53:08Z","date_created":"2023-07-31T10:53:08Z","access_level":"open_access","file_name":"2023_SPAA_Fedorov.pdf","creator":"dernst","content_type":"application/pdf","file_size":2087937}],"oa_version":"Published Version","status":"public","ddc":["000"],"title":"Provably-efficient and internally-deterministic parallel Union-Find","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"13262","day":"17","has_accepted_license":"1","article_processing_charge":"Yes (in subscription journal)","scopus_import":"1","date_published":"2023-06-17T00:00:00Z","page":"261-271","publication":"Proceedings of the 35th ACM Symposium on Parallelism in Algorithms and Architectures","citation":{"ista":"Fedorov A, Hashemi D, Nadiradze G, Alistarh D-A. 2023. Provably-efficient and internally-deterministic parallel Union-Find. Proceedings of the 35th ACM Symposium on Parallelism in Algorithms and Architectures. SPAA: Symposium on Parallelism in Algorithms and Architectures, 261–271.","ieee":"A. Fedorov, D. Hashemi, G. Nadiradze, and D.-A. Alistarh, “Provably-efficient and internally-deterministic parallel Union-Find,” in Proceedings of the 35th ACM Symposium on Parallelism in Algorithms and Architectures, Orlando, FL, United States, 2023, pp. 261–271.","apa":"Fedorov, A., Hashemi, D., Nadiradze, G., & Alistarh, D.-A. (2023). Provably-efficient and internally-deterministic parallel Union-Find. In Proceedings of the 35th ACM Symposium on Parallelism in Algorithms and Architectures (pp. 261–271). Orlando, FL, United States: Association for Computing Machinery. https://doi.org/10.1145/3558481.3591082","ama":"Fedorov A, Hashemi D, Nadiradze G, Alistarh D-A. Provably-efficient and internally-deterministic parallel Union-Find. In: Proceedings of the 35th ACM Symposium on Parallelism in Algorithms and Architectures. Association for Computing Machinery; 2023:261-271. doi:10.1145/3558481.3591082","chicago":"Fedorov, Alexander, Diba Hashemi, Giorgi Nadiradze, and Dan-Adrian Alistarh. “Provably-Efficient and Internally-Deterministic Parallel Union-Find.” In Proceedings of the 35th ACM Symposium on Parallelism in Algorithms and Architectures, 261–71. Association for Computing Machinery, 2023. https://doi.org/10.1145/3558481.3591082.","mla":"Fedorov, Alexander, et al. “Provably-Efficient and Internally-Deterministic Parallel Union-Find.” Proceedings of the 35th ACM Symposium on Parallelism in Algorithms and Architectures, Association for Computing Machinery, 2023, pp. 261–71, doi:10.1145/3558481.3591082.","short":"A. Fedorov, D. Hashemi, G. Nadiradze, D.-A. Alistarh, in:, Proceedings of the 35th ACM Symposium on Parallelism in Algorithms and Architectures, Association for Computing Machinery, 2023, pp. 261–271."},"file_date_updated":"2023-07-31T10:53:08Z","date_updated":"2023-07-31T10:54:32Z","date_created":"2023-07-23T22:01:12Z","author":[{"full_name":"Fedorov, Alexander","id":"2e711909-896a-11ed-bdf8-eb0f5a2984c6","last_name":"Fedorov","first_name":"Alexander"},{"full_name":"Hashemi, Diba","id":"ed9595ea-2f8f-11ee-ba95-d2b546540783","first_name":"Diba","last_name":"Hashemi"},{"last_name":"Nadiradze","first_name":"Giorgi","id":"3279A00C-F248-11E8-B48F-1D18A9856A87","full_name":"Nadiradze, Giorgi"},{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian","last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian"}],"publication_status":"published","department":[{"_id":"DaAl"},{"_id":"GradSch"}],"publisher":"Association for Computing Machinery","year":"2023","month":"06","publication_identifier":{"isbn":["9781450395458"]},"language":[{"iso":"eng"}],"conference":{"name":"SPAA: Symposium on Parallelism in Algorithms and Architectures","end_date":"2023-06-19","location":"Orlando, FL, United States","start_date":"2023-06-17"},"doi":"10.1145/3558481.3591082","quality_controlled":"1","external_id":{"arxiv":["2304.09331"]},"tmp":{"name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png"},"oa":1},{"oa_version":"Published Version","file":[{"checksum":"b27c5290f2f1500c403494364ee39c9f","success":1,"date_updated":"2023-02-20T07:30:20Z","date_created":"2023-02-20T07:30:20Z","relation":"main_file","file_id":"12570","content_type":"application/pdf","file_size":602333,"creator":"dernst","access_level":"open_access","file_name":"2023_TheoreticalCompScience_Alistarh.pdf"}],"intvolume":" 948","ddc":["000"],"title":"Wait-free approximate agreement on graphs","status":"public","_id":"12566","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","issue":"2","abstract":[{"lang":"eng","text":"Approximate agreement is one of the few variants of consensus that can be solved in a wait-free manner in asynchronous systems where processes communicate by reading and writing to shared memory. In this work, we consider a natural generalisation of approximate agreement on arbitrary undirected connected graphs. Each process is given a node of the graph as input and, if non-faulty, must output a node such that\r\n– all the outputs are within distance 1 of one another, and\r\n– each output value lies on a shortest path between two input values.\r\nFrom prior work, it is known that there is no wait-free algorithm among processes for this problem on any cycle of length , by reduction from 2-set agreement (Castañeda et al., 2018).\r\n\r\nIn this work, we investigate the solvability of this task on general graphs. We give a new, direct proof of the impossibility of approximate agreement on cycles of length , via a generalisation of Sperner's Lemma to convex polygons. We also extend the reduction from 2-set agreement to a larger class of graphs, showing that approximate agreement on these graphs is unsolvable. On the positive side, we present a wait-free algorithm for a different class of graphs, which properly contains the class of chordal graphs."}],"type":"journal_article","date_published":"2023-02-28T00:00:00Z","article_type":"original","citation":{"ama":"Alistarh D-A, Ellen F, Rybicki J. Wait-free approximate agreement on graphs. Theoretical Computer Science. 2023;948(2). doi:10.1016/j.tcs.2023.113733","ista":"Alistarh D-A, Ellen F, Rybicki J. 2023. Wait-free approximate agreement on graphs. Theoretical Computer Science. 948(2), 113733.","ieee":"D.-A. Alistarh, F. Ellen, and J. Rybicki, “Wait-free approximate agreement on graphs,” Theoretical Computer Science, vol. 948, no. 2. Elsevier, 2023.","apa":"Alistarh, D.-A., Ellen, F., & Rybicki, J. (2023). Wait-free approximate agreement on graphs. Theoretical Computer Science. Elsevier. https://doi.org/10.1016/j.tcs.2023.113733","mla":"Alistarh, Dan-Adrian, et al. “Wait-Free Approximate Agreement on Graphs.” Theoretical Computer Science, vol. 948, no. 2, 113733, Elsevier, 2023, doi:10.1016/j.tcs.2023.113733.","short":"D.-A. Alistarh, F. Ellen, J. Rybicki, Theoretical Computer Science 948 (2023).","chicago":"Alistarh, Dan-Adrian, Faith Ellen, and Joel Rybicki. “Wait-Free Approximate Agreement on Graphs.” Theoretical Computer Science. Elsevier, 2023. https://doi.org/10.1016/j.tcs.2023.113733."},"publication":"Theoretical Computer Science","article_processing_charge":"Yes (via OA deal)","has_accepted_license":"1","day":"28","scopus_import":"1","volume":948,"date_updated":"2023-08-01T13:17:20Z","date_created":"2023-02-19T23:00:55Z","author":[{"full_name":"Alistarh, Dan-Adrian","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","last_name":"Alistarh","first_name":"Dan-Adrian"},{"first_name":"Faith","last_name":"Ellen","full_name":"Ellen, Faith"},{"full_name":"Rybicki, Joel","orcid":"0000-0002-6432-6646","id":"334EFD2E-F248-11E8-B48F-1D18A9856A87","last_name":"Rybicki","first_name":"Joel"}],"department":[{"_id":"DaAl"}],"publisher":"Elsevier","publication_status":"published","year":"2023","acknowledgement":"This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No. 805223 ScaleML) and under the Marie Skłodowska-Curie grant agreement No. 840605 and from the Natural Sciences and Engineering Research Council of Canada grant RGPIN-2020-04178. Part of this work was done while Faith Ellen was visiting IST Austria.","ec_funded":1,"file_date_updated":"2023-02-20T07:30:20Z","article_number":"113733","language":[{"iso":"eng"}],"doi":"10.1016/j.tcs.2023.113733","project":[{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","grant_number":"805223","call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning"},{"_id":"26A5D39A-B435-11E9-9278-68D0E5697425","grant_number":"840605","name":"Coordination in constrained and natural distributed systems","call_identifier":"H2020"}],"isi":1,"quality_controlled":"1","external_id":{"isi":["000934262700001"]},"tmp":{"name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png"},"oa":1,"publication_identifier":{"issn":["0304-3975"]},"month":"02"},{"month":"05","publication_identifier":{"issn":["2663-337X"]},"supervisor":[{"last_name":"Lampert","first_name":"Christoph","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","full_name":"Lampert, Christoph"},{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian","last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian"}],"degree_awarded":"PhD","acknowledged_ssus":[{"_id":"ScienComp"}],"language":[{"iso":"eng"}],"doi":"10.15479/at:ista:13074","project":[{"name":"International IST Doctoral Program","call_identifier":"H2020","grant_number":"665385","_id":"2564DBCA-B435-11E9-9278-68D0E5697425"},{"call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","grant_number":"805223"}],"oa":1,"file_date_updated":"2023-05-24T16:12:59Z","ec_funded":1,"date_updated":"2023-08-04T10:33:27Z","date_created":"2023-05-23T17:07:53Z","author":[{"full_name":"Peste, Elena-Alexandra","last_name":"Peste","first_name":"Elena-Alexandra","id":"32D78294-F248-11E8-B48F-1D18A9856A87"}],"related_material":{"record":[{"id":"11458","relation":"part_of_dissertation","status":"public"},{"id":"13053","relation":"part_of_dissertation","status":"public"},{"id":"12299","relation":"part_of_dissertation","status":"public"}]},"publication_status":"published","publisher":"Institute of Science and Technology Austria","department":[{"_id":"GradSch"},{"_id":"DaAl"},{"_id":"ChLa"}],"year":"2023","day":"23","article_processing_charge":"No","has_accepted_license":"1","date_published":"2023-05-23T00:00:00Z","page":"147","citation":{"ista":"Peste E-A. 2023. Efficiency and generalization of sparse neural networks. Institute of Science and Technology Austria.","apa":"Peste, E.-A. (2023). Efficiency and generalization of sparse neural networks. Institute of Science and Technology Austria. https://doi.org/10.15479/at:ista:13074","ieee":"E.-A. Peste, “Efficiency and generalization of sparse neural networks,” Institute of Science and Technology Austria, 2023.","ama":"Peste E-A. Efficiency and generalization of sparse neural networks. 2023. doi:10.15479/at:ista:13074","chicago":"Peste, Elena-Alexandra. “Efficiency and Generalization of Sparse Neural Networks.” Institute of Science and Technology Austria, 2023. https://doi.org/10.15479/at:ista:13074.","mla":"Peste, Elena-Alexandra. Efficiency and Generalization of Sparse Neural Networks. Institute of Science and Technology Austria, 2023, doi:10.15479/at:ista:13074.","short":"E.-A. Peste, Efficiency and Generalization of Sparse Neural Networks, Institute of Science and Technology Austria, 2023."},"abstract":[{"text":"Deep learning has become an integral part of a large number of important applications, and many of the recent breakthroughs have been enabled by the ability to train very large models, capable to capture complex patterns and relationships from the data. At the same time, the massive sizes of modern deep learning models have made their deployment to smaller devices more challenging; this is particularly important, as in many applications the users rely on accurate deep learning predictions, but they only have access to devices with limited memory and compute power. One solution to this problem is to prune neural networks, by setting as many of their parameters as possible to zero, to obtain accurate sparse models with lower memory footprint. Despite the great research progress in obtaining sparse models that preserve accuracy, while satisfying memory and computational constraints, there are still many challenges associated with efficiently training sparse models, as well as understanding their generalization properties.\r\n\r\nThe focus of this thesis is to investigate how the training process of sparse models can be made more efficient, and to understand the differences between sparse and dense models in terms of how well they can generalize to changes in the data distribution. We first study a method for co-training sparse and dense models, at a lower cost compared to regular training. With our method we can obtain very accurate sparse networks, and dense models that can recover the baseline accuracy. Furthermore, we are able to more easily analyze the differences, at prediction level, between the sparse-dense model pairs. Next, we investigate the generalization properties of sparse neural networks in more detail, by studying how well different sparse models trained on a larger task can adapt to smaller, more specialized tasks, in a transfer learning scenario. Our analysis across multiple pruning methods and sparsity levels reveals that sparse models provide features that can transfer similarly to or better than the dense baseline. However, the choice of the pruning method plays an important role, and can influence the results when the features are fixed (linear finetuning), or when they are allowed to adapt to the new task (full finetuning). Using sparse models with fixed masks for finetuning on new tasks has an important practical advantage, as it enables training neural networks on smaller devices. However, one drawback of current pruning methods is that the entire training cycle has to be repeated to obtain the initial sparse model, for every sparsity target; in consequence, the entire training process is costly and also multiple models need to be stored. In the last part of the thesis we propose a method that can train accurate dense models that are compressible in a single step, to multiple sparsity levels, without additional finetuning. Our method results in sparse models that can be competitive with existing pruning methods, and which can also successfully generalize to new tasks.","lang":"eng"}],"alternative_title":["ISTA Thesis"],"type":"dissertation","file":[{"file_id":"13087","relation":"main_file","success":1,"checksum":"6b3354968403cb9d48cc5a83611fb571","date_created":"2023-05-24T16:11:16Z","date_updated":"2023-05-24T16:11:16Z","access_level":"open_access","file_name":"PhD_Thesis_Alexandra_Peste_final.pdf","creator":"epeste","content_type":"application/pdf","file_size":2152072},{"date_updated":"2023-05-24T16:12:59Z","date_created":"2023-05-24T16:12:59Z","checksum":"8d0df94bbcf4db72c991f22503b3fd60","relation":"source_file","file_id":"13088","content_type":"application/zip","file_size":1658293,"creator":"epeste","file_name":"PhD_Thesis_APeste.zip","access_level":"closed"}],"oa_version":"Published Version","ddc":["000"],"status":"public","title":"Efficiency and generalization of sparse neural networks","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","_id":"13074"},{"publication_status":"published","publisher":"Springer Nature","department":[{"_id":"DaAl"}],"year":"2023","date_updated":"2023-08-14T12:54:32Z","date_created":"2023-01-22T23:00:55Z","volume":36,"author":[{"full_name":"Aksenov, Vitalii","id":"2980135A-F248-11E8-B48F-1D18A9856A87","last_name":"Aksenov","first_name":"Vitalii"},{"full_name":"Alistarh, Dan-Adrian","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","last_name":"Alistarh","first_name":"Dan-Adrian"},{"last_name":"Drozdova","first_name":"Alexandra","full_name":"Drozdova, Alexandra"},{"full_name":"Mohtashami, Amirkeivan","last_name":"Mohtashami","first_name":"Amirkeivan"}],"month":"09","publication_identifier":{"eissn":["1432-0452"],"issn":["0178-2770"]},"quality_controlled":"1","isi":1,"external_id":{"isi":["000913424000001"],"arxiv":["2008.01009"]},"oa":1,"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2008.01009"}],"language":[{"iso":"eng"}],"doi":"10.1007/s00446-022-00441-x","type":"journal_article","abstract":[{"text":"The design and implementation of efficient concurrent data structures has seen significant attention. However, most of this work has focused on concurrent data structures providing good worst-case guarantees, although, in real workloads, objects are often accessed at different rates. Efficient distribution-adaptive data structures, such as splay-trees, are known in the sequential case; however, they often are hard to translate efficiently to the concurrent case. We investigate distribution-adaptive concurrent data structures, and propose a new design called the splay-list. At a high level, the splay-list is similar to a standard skip-list, with the key distinction that the height of each element adapts dynamically to its access rate: popular elements “move up,” whereas rarely-accessed elements decrease in height. We show that the splay-list provides order-optimal amortized complexity bounds for a subset of operations, while being amenable to efficient concurrent implementation. Experiments show that the splay-list can leverage distribution-adaptivity for performance, and can outperform the only previously-known distribution-adaptive concurrent design in certain workloads.","lang":"eng"}],"title":"The splay-list: A distribution-adaptive concurrent skip-list","status":"public","intvolume":" 36","_id":"12330","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Preprint","scopus_import":"1","day":"01","article_processing_charge":"No","article_type":"original","page":"395-418","publication":"Distributed Computing","citation":{"ama":"Aksenov V, Alistarh D-A, Drozdova A, Mohtashami A. The splay-list: A distribution-adaptive concurrent skip-list. Distributed Computing. 2023;36:395-418. doi:10.1007/s00446-022-00441-x","ista":"Aksenov V, Alistarh D-A, Drozdova A, Mohtashami A. 2023. The splay-list: A distribution-adaptive concurrent skip-list. Distributed Computing. 36, 395–418.","apa":"Aksenov, V., Alistarh, D.-A., Drozdova, A., & Mohtashami, A. (2023). The splay-list: A distribution-adaptive concurrent skip-list. Distributed Computing. Springer Nature. https://doi.org/10.1007/s00446-022-00441-x","ieee":"V. Aksenov, D.-A. Alistarh, A. Drozdova, and A. Mohtashami, “The splay-list: A distribution-adaptive concurrent skip-list,” Distributed Computing, vol. 36. Springer Nature, pp. 395–418, 2023.","mla":"Aksenov, Vitalii, et al. “The Splay-List: A Distribution-Adaptive Concurrent Skip-List.” Distributed Computing, vol. 36, Springer Nature, 2023, pp. 395–418, doi:10.1007/s00446-022-00441-x.","short":"V. Aksenov, D.-A. Alistarh, A. Drozdova, A. Mohtashami, Distributed Computing 36 (2023) 395–418.","chicago":"Aksenov, Vitalii, Dan-Adrian Alistarh, Alexandra Drozdova, and Amirkeivan Mohtashami. “The Splay-List: A Distribution-Adaptive Concurrent Skip-List.” Distributed Computing. Springer Nature, 2023. https://doi.org/10.1007/s00446-022-00441-x."},"date_published":"2023-09-01T00:00:00Z"},{"day":"30","article_processing_charge":"No","scopus_import":"1","date_published":"2023-07-30T00:00:00Z","publication":"Proceedings of the 40th International Conference on Machine Learning","citation":{"chicago":"Markov, Ilia, Adrian Vladu, Qi Guo, and Dan-Adrian Alistarh. “Quantized Distributed Training of Large Models with Convergence Guarantees.” In Proceedings of the 40th International Conference on Machine Learning, 202:24020–44. ML Research Press, 2023.","short":"I. Markov, A. Vladu, Q. Guo, D.-A. Alistarh, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 24020–24044.","mla":"Markov, Ilia, et al. “Quantized Distributed Training of Large Models with Convergence Guarantees.” Proceedings of the 40th International Conference on Machine Learning, vol. 202, ML Research Press, 2023, pp. 24020–44.","apa":"Markov, I., Vladu, A., Guo, Q., & Alistarh, D.-A. (2023). Quantized distributed training of large models with convergence guarantees. In Proceedings of the 40th International Conference on Machine Learning (Vol. 202, pp. 24020–24044). Honolulu, Hawaii, HI, United States: ML Research Press.","ieee":"I. Markov, A. Vladu, Q. Guo, and D.-A. Alistarh, “Quantized distributed training of large models with convergence guarantees,” in Proceedings of the 40th International Conference on Machine Learning, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 24020–24044.","ista":"Markov I, Vladu A, Guo Q, Alistarh D-A. 2023. Quantized distributed training of large models with convergence guarantees. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 24020–24044.","ama":"Markov I, Vladu A, Guo Q, Alistarh D-A. Quantized distributed training of large models with convergence guarantees. In: Proceedings of the 40th International Conference on Machine Learning. Vol 202. ML Research Press; 2023:24020-24044."},"page":"24020-24044","abstract":[{"lang":"eng","text":"Communication-reduction techniques are a popular way to improve scalability in data-parallel training of deep neural networks (DNNs). The recent emergence of large language models such as GPT has created the need for new approaches to exploit data-parallelism. Among these, fully-sharded data parallel (FSDP) training is highly popular, yet it still encounters scalability bottlenecks. One reason is that applying compression techniques to FSDP is challenging: as the vast majority of the communication involves the model’s weights, direct compression alters convergence and leads to accuracy loss. We present QSDP, a variant of FSDP which supports both gradient and weight quantization with theoretical guarantees, is simple to implement and has essentially no overheads. To derive QSDP we prove that a natural modification of SGD achieves convergence even when we only maintain quantized weights, and thus the domain over which we train consists of quantized points and is, therefore, highly non-convex. We validate this approach by training GPT-family models with up to 1.3 billion parameters on a multi-node cluster. Experiments show that QSDP preserves model accuracy, while completely removing the communication bottlenecks of FSDP, providing end-to-end speedups of up to 2.2x."}],"type":"conference","alternative_title":["PMLR"],"oa_version":"Preprint","_id":"14461","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","title":"Quantized distributed training of large models with convergence guarantees","status":"public","intvolume":" 202","month":"07","publication_identifier":{"eissn":["2640-3498"]},"conference":{"name":"ICML: International Conference on Machine Learning","location":"Honolulu, Hawaii, HI, United States","start_date":"2023-07-23","end_date":"2023-07-29"},"acknowledged_ssus":[{"_id":"ScienComp"}],"language":[{"iso":"eng"}],"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2302.02390"}],"oa":1,"external_id":{"arxiv":["2302.02390"]},"quality_controlled":"1","project":[{"grant_number":"805223","_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning","call_identifier":"H2020"}],"ec_funded":1,"author":[{"last_name":"Markov","first_name":"Ilia","id":"D0CF4148-C985-11E9-8066-0BDEE5697425","full_name":"Markov, Ilia"},{"first_name":"Adrian","last_name":"Vladu","full_name":"Vladu, Adrian"},{"last_name":"Guo","first_name":"Qi","full_name":"Guo, Qi"},{"full_name":"Alistarh, Dan-Adrian","first_name":"Dan-Adrian","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X"}],"date_created":"2023-10-29T23:01:17Z","date_updated":"2023-10-31T09:40:45Z","volume":202,"year":"2023","acknowledgement":"The authors gratefully acknowledge funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML), as well as experimental support from the IST Austria IT department, in particular Stefano Elefante, Andrei Hornoiu, and Alois Schloegl. AV acknowledges the support of the French Agence Nationale de la Recherche (ANR), under grant ANR-21-CE48-0016 (project COMCOPT), the support of Fondation Hadamard with a PRMO grant, and the support of CNRS with a CoopIntEER IEA grant (project ALFRED).","publication_status":"published","publisher":"ML Research Press","department":[{"_id":"DaAl"}]},{"abstract":[{"lang":"eng","text":"Autoencoders are a popular model in many branches of machine learning and lossy data compression. However, their fundamental limits, the performance of gradient methods and the features learnt during optimization remain poorly understood, even in the two-layer setting. In fact, earlier work has considered either linear autoencoders or specific training regimes (leading to vanishing or diverging compression rates). Our paper addresses this gap by focusing on non-linear two-layer autoencoders trained in the challenging proportional regime in which the input dimension scales linearly with the size of the representation. Our results characterize the minimizers of the population risk, and show that such minimizers are achieved by gradient methods; their structure is also unveiled, thus leading to a concise description of the features obtained via training. For the special case of a sign activation function, our analysis establishes the fundamental limits for the lossy compression of Gaussian sources via (shallow) autoencoders. Finally, while the results are proved for Gaussian data, numerical simulations on standard datasets display the universality of the theoretical predictions."}],"alternative_title":["PMLR"],"type":"conference","oa_version":"Preprint","title":"Fundamental limits of two-layer autoencoders, and achieving them with gradient methods","status":"public","intvolume":" 202","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"14459","day":"30","article_processing_charge":"No","scopus_import":"1","date_published":"2023-07-30T00:00:00Z","page":"31151-31209","publication":"Proceedings of the 40th International Conference on Machine Learning","citation":{"chicago":"Shevchenko, Aleksandr, Kevin Kögler, Hamed Hassani, and Marco Mondelli. “Fundamental Limits of Two-Layer Autoencoders, and Achieving Them with Gradient Methods.” In Proceedings of the 40th International Conference on Machine Learning, 202:31151–209. ML Research Press, 2023.","mla":"Shevchenko, Aleksandr, et al. “Fundamental Limits of Two-Layer Autoencoders, and Achieving Them with Gradient Methods.” Proceedings of the 40th International Conference on Machine Learning, vol. 202, ML Research Press, 2023, pp. 31151–209.","short":"A. Shevchenko, K. Kögler, H. Hassani, M. Mondelli, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 31151–31209.","ista":"Shevchenko A, Kögler K, Hassani H, Mondelli M. 2023. Fundamental limits of two-layer autoencoders, and achieving them with gradient methods. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 31151–31209.","apa":"Shevchenko, A., Kögler, K., Hassani, H., & Mondelli, M. (2023). Fundamental limits of two-layer autoencoders, and achieving them with gradient methods. In Proceedings of the 40th International Conference on Machine Learning (Vol. 202, pp. 31151–31209). Honolulu, Hawaii, HI, United States: ML Research Press.","ieee":"A. Shevchenko, K. Kögler, H. Hassani, and M. Mondelli, “Fundamental limits of two-layer autoencoders, and achieving them with gradient methods,” in Proceedings of the 40th International Conference on Machine Learning, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 31151–31209.","ama":"Shevchenko A, Kögler K, Hassani H, Mondelli M. Fundamental limits of two-layer autoencoders, and achieving them with gradient methods. In: Proceedings of the 40th International Conference on Machine Learning. Vol 202. ML Research Press; 2023:31151-31209."},"date_created":"2023-10-29T23:01:17Z","date_updated":"2023-10-31T08:52:28Z","volume":202,"author":[{"id":"F2B06EC2-C99E-11E9-89F0-752EE6697425","last_name":"Shevchenko","first_name":"Aleksandr","full_name":"Shevchenko, Aleksandr"},{"full_name":"Kögler, Kevin","id":"94ec913c-dc85-11ea-9058-e5051ab2428b","last_name":"Kögler","first_name":"Kevin"},{"first_name":"Hamed","last_name":"Hassani","full_name":"Hassani, Hamed"},{"last_name":"Mondelli","first_name":"Marco","orcid":"0000-0002-3242-7020","id":"27EB676C-8706-11E9-9510-7717E6697425","full_name":"Mondelli, Marco"}],"publication_status":"published","department":[{"_id":"MaMo"},{"_id":"DaAl"}],"publisher":"ML Research Press","acknowledgement":"Aleksandr Shevchenko, Kevin Kogler and Marco Mondelli are supported by the 2019 Lopez-Loreta Prize. Hamed Hassani acknowledges the support by the NSF CIF award (1910056) and the NSF Institute for CORE Emerging Methods in Data Science (EnCORE).","year":"2023","month":"07","publication_identifier":{"eissn":["2640-3498"]},"language":[{"iso":"eng"}],"conference":{"name":"ICML: International Conference on Machine Learning","start_date":"2023-07-23","location":"Honolulu, Hawaii, HI, United States","end_date":"2023-07-29"},"quality_controlled":"1","project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2212.13468"}],"external_id":{"arxiv":["2212.13468"]},"oa":1},{"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"14460","intvolume":" 202","status":"public","title":"SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge","oa_version":"Preprint","type":"conference","alternative_title":["PMLR"],"abstract":[{"text":"We provide an efficient implementation of the backpropagation algorithm, specialized to the case where the weights of the neural network being trained are sparse. Our algorithm is general, as it applies to arbitrary (unstructured) sparsity and common layer types (e.g., convolutional or linear). We provide a fast vectorized implementation on commodity CPUs, and show that it can yield speedups in end-to-end runtime experiments, both in transfer learning using already-sparsified networks, and in training sparse networks from scratch. Thus, our results provide the first support for sparse training on commodity hardware.","lang":"eng"}],"citation":{"mla":"Nikdan, Mahdi, et al. “SparseProp: Efficient Sparse Backpropagation for Faster Training of Neural Networks at the Edge.” Proceedings of the 40th International Conference on Machine Learning, vol. 202, ML Research Press, 2023, pp. 26215–27.","short":"M. Nikdan, T. Pegolotti, E.B. Iofinova, E. Kurtic, D.-A. Alistarh, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 26215–26227.","chicago":"Nikdan, Mahdi, Tommaso Pegolotti, Eugenia B Iofinova, Eldar Kurtic, and Dan-Adrian Alistarh. “SparseProp: Efficient Sparse Backpropagation for Faster Training of Neural Networks at the Edge.” In Proceedings of the 40th International Conference on Machine Learning, 202:26215–27. ML Research Press, 2023.","ama":"Nikdan M, Pegolotti T, Iofinova EB, Kurtic E, Alistarh D-A. SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge. In: Proceedings of the 40th International Conference on Machine Learning. Vol 202. ML Research Press; 2023:26215-26227.","ista":"Nikdan M, Pegolotti T, Iofinova EB, Kurtic E, Alistarh D-A. 2023. SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 26215–26227.","ieee":"M. Nikdan, T. Pegolotti, E. B. Iofinova, E. Kurtic, and D.-A. Alistarh, “SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge,” in Proceedings of the 40th International Conference on Machine Learning, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 26215–26227.","apa":"Nikdan, M., Pegolotti, T., Iofinova, E. B., Kurtic, E., & Alistarh, D.-A. (2023). SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge. In Proceedings of the 40th International Conference on Machine Learning (Vol. 202, pp. 26215–26227). Honolulu, Hawaii, HI, United States: ML Research Press."},"publication":"Proceedings of the 40th International Conference on Machine Learning","page":"26215-26227","date_published":"2023-07-30T00:00:00Z","scopus_import":"1","article_processing_charge":"No","day":"30","year":"2023","acknowledgement":"We would like to thank Elias Frantar for his valuable assistance and support at the outset of this project, and the anonymous ICML and SNN reviewers for very constructive feedback. EI was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35. DA acknowledges generous ERC support, via Starting Grant 805223 ScaleML. ","department":[{"_id":"DaAl"}],"publisher":"ML Research Press","publication_status":"published","author":[{"id":"66374281-f394-11eb-9cf6-869147deecc0","first_name":"Mahdi","last_name":"Nikdan","full_name":"Nikdan, Mahdi"},{"full_name":"Pegolotti, Tommaso","last_name":"Pegolotti","first_name":"Tommaso"},{"full_name":"Iofinova, Eugenia B","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","orcid":"0000-0002-7778-3221","first_name":"Eugenia B","last_name":"Iofinova"},{"full_name":"Kurtic, Eldar","last_name":"Kurtic","first_name":"Eldar","id":"47beb3a5-07b5-11eb-9b87-b108ec578218"},{"full_name":"Alistarh, Dan-Adrian","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian","last_name":"Alistarh"}],"volume":202,"date_updated":"2023-10-31T09:33:51Z","date_created":"2023-10-29T23:01:17Z","ec_funded":1,"oa":1,"external_id":{"arxiv":["2302.04852"]},"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2302.04852","open_access":"1"}],"project":[{"call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","grant_number":"805223"}],"quality_controlled":"1","conference":{"name":"ICML: International Conference on Machine Learning","location":"Honolulu, Hawaii, HI, United States","start_date":"2023-07-23","end_date":"2023-07-29"},"language":[{"iso":"eng"}],"publication_identifier":{"eissn":["2640-3498"]},"month":"07"},{"page":"10323-10337","citation":{"ista":"Frantar E, Alistarh D-A. 2023. SparseGPT: Massive language models can be accurately pruned in one-shot. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 10323–10337.","apa":"Frantar, E., & Alistarh, D.-A. (2023). SparseGPT: Massive language models can be accurately pruned in one-shot. In Proceedings of the 40th International Conference on Machine Learning (Vol. 202, pp. 10323–10337). Honolulu, Hawaii, HI, United States: ML Research Press.","ieee":"E. Frantar and D.-A. Alistarh, “SparseGPT: Massive language models can be accurately pruned in one-shot,” in Proceedings of the 40th International Conference on Machine Learning, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 10323–10337.","ama":"Frantar E, Alistarh D-A. SparseGPT: Massive language models can be accurately pruned in one-shot. In: Proceedings of the 40th International Conference on Machine Learning. Vol 202. ML Research Press; 2023:10323-10337.","chicago":"Frantar, Elias, and Dan-Adrian Alistarh. “SparseGPT: Massive Language Models Can Be Accurately Pruned in One-Shot.” In Proceedings of the 40th International Conference on Machine Learning, 202:10323–37. ML Research Press, 2023.","mla":"Frantar, Elias, and Dan-Adrian Alistarh. “SparseGPT: Massive Language Models Can Be Accurately Pruned in One-Shot.” Proceedings of the 40th International Conference on Machine Learning, vol. 202, ML Research Press, 2023, pp. 10323–37.","short":"E. Frantar, D.-A. Alistarh, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 10323–10337."},"publication":"Proceedings of the 40th International Conference on Machine Learning","date_published":"2023-07-30T00:00:00Z","scopus_import":"1","article_processing_charge":"No","day":"30","intvolume":" 202","title":"SparseGPT: Massive language models can be accurately pruned in one-shot","status":"public","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"14458","oa_version":"Preprint","alternative_title":["PMLR"],"type":"conference","abstract":[{"lang":"eng","text":"We show for the first time that large-scale generative pretrained transformer (GPT) family models can be pruned to at least 50% sparsity in one-shot, without any retraining, at minimal loss of accuracy. This is achieved via a new pruning method called SparseGPT, specifically designed to work efficiently and accurately on massive GPT-family models. We can execute SparseGPT on the largest available open-source models, OPT-175B and BLOOM-176B, in under 4.5 hours, and can reach 60% unstructured sparsity with negligible increase in perplexity: remarkably, more than 100 billion weights from these models can be ignored at inference time. SparseGPT generalizes to semi-structured (2:4 and 4:8) patterns, and is compatible with weight quantization approaches. The code is available at: https://github.com/IST-DASLab/sparsegpt."}],"project":[{"grant_number":"805223","_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning","call_identifier":"H2020"}],"quality_controlled":"1","oa":1,"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2301.00774"}],"external_id":{"arxiv":["2301.00774"]},"language":[{"iso":"eng"}],"acknowledged_ssus":[{"_id":"ScienComp"}],"conference":{"name":"ICML: International Conference on Machine Learning","end_date":"2023-07-29","start_date":"2023-07-23","location":"Honolulu, Hawaii, HI, United States"},"publication_identifier":{"eissn":["2640-3498"]},"month":"07","department":[{"_id":"DaAl"}],"publisher":"ML Research Press","publication_status":"published","year":"2023","acknowledgement":"The authors gratefully acknowledge funding from the European Research Council (ERC) under the European Union’s Horizon 2020 programme (grant agreement No. 805223 ScaleML), as well as experimental support from Eldar Kurtic, and from the IST Austria IT department, in particular Stefano Elefante, Andrei Hornoiu, and Alois Schloegl.","volume":202,"date_created":"2023-10-29T23:01:16Z","date_updated":"2023-10-31T09:59:42Z","author":[{"full_name":"Frantar, Elias","first_name":"Elias","last_name":"Frantar","id":"09a8f98d-ec99-11ea-ae11-c063a7b7fe5f"},{"first_name":"Dan-Adrian","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","full_name":"Alistarh, Dan-Adrian"}],"ec_funded":1},{"scopus_import":"1","article_processing_charge":"No","day":"25","page":"913-944","article_type":"original","citation":{"ama":"Alistarh D-A, Aspnes J, Ellen F, Gelashvili R, Zhu L. Why extension-based proofs fail. SIAM Journal on Computing. 2023;52(4):913-944. doi:10.1137/20M1375851","apa":"Alistarh, D.-A., Aspnes, J., Ellen, F., Gelashvili, R., & Zhu, L. (2023). Why extension-based proofs fail. SIAM Journal on Computing. Society for Industrial and Applied Mathematics. https://doi.org/10.1137/20M1375851","ieee":"D.-A. Alistarh, J. Aspnes, F. Ellen, R. Gelashvili, and L. Zhu, “Why extension-based proofs fail,” SIAM Journal on Computing, vol. 52, no. 4. Society for Industrial and Applied Mathematics, pp. 913–944, 2023.","ista":"Alistarh D-A, Aspnes J, Ellen F, Gelashvili R, Zhu L. 2023. Why extension-based proofs fail. SIAM Journal on Computing. 52(4), 913–944.","short":"D.-A. Alistarh, J. Aspnes, F. Ellen, R. Gelashvili, L. Zhu, SIAM Journal on Computing 52 (2023) 913–944.","mla":"Alistarh, Dan-Adrian, et al. “Why Extension-Based Proofs Fail.” SIAM Journal on Computing, vol. 52, no. 4, Society for Industrial and Applied Mathematics, 2023, pp. 913–44, doi:10.1137/20M1375851.","chicago":"Alistarh, Dan-Adrian, James Aspnes, Faith Ellen, Rati Gelashvili, and Leqi Zhu. “Why Extension-Based Proofs Fail.” SIAM Journal on Computing. Society for Industrial and Applied Mathematics, 2023. https://doi.org/10.1137/20M1375851."},"publication":"SIAM Journal on Computing","date_published":"2023-07-25T00:00:00Z","type":"journal_article","issue":"4","abstract":[{"text":"We introduce extension-based proofs, a class of impossibility proofs that includes valency arguments. They are modelled as an interaction between a prover and a protocol. Using proofs based on combinatorial topology, it has been shown that it is impossible to deterministically solve -set agreement among processes or approximate agreement on a cycle of length 4 among processes in a wait-free manner in asynchronous models where processes communicate using objects that can be constructed from shared registers. However, it was unknown whether proofs based on simpler techniques were possible. We show that these impossibility results cannot be obtained by extension-based proofs in the iterated snapshot model and, hence, extension-based proofs are limited in power.","lang":"eng"}],"intvolume":" 52","title":"Why extension-based proofs fail","status":"public","_id":"14364","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Preprint","publication_identifier":{"eissn":["1095-7111"],"issn":["0097-5397"]},"month":"07","project":[{"name":"Elastic Coordination for Scalable Machine Learning","call_identifier":"H2020","_id":"268A44D6-B435-11E9-9278-68D0E5697425","grant_number":"805223"}],"quality_controlled":"1","isi":1,"main_file_link":[{"url":"https://arxiv.org/abs/1811.01421","open_access":"1"}],"oa":1,"external_id":{"arxiv":["1811.01421"],"isi":["001082972300004"]},"language":[{"iso":"eng"}],"doi":"10.1137/20M1375851","ec_funded":1,"publisher":"Society for Industrial and Applied Mathematics","department":[{"_id":"DaAl"}],"publication_status":"published","acknowledgement":"We would like to thank Valerie King, Toniann Pitassi, and Michael Saks for helpful discussions and Shi Hao Liu for his useful feedback.\r\nThis research was supported by the Natural Science and Engineering Research Council of Canada under grants RGPIN-2015-05080 and RGPIN-2020-04178, a postgraduate scholarship, and a postdoctoral fellowship; a University of Toronto postdoctoral fellowship; the National Science Foundation under grants CCF-1217921, CCF-1301926, CCF-1637385, CCF-1650596, and IIS-1447786; the U.S. Department of Energy under grant ER26116/DE-SC0008923; the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme grant agreement 805223 ScaleML; and the Oracle and Intel corporations. Some of the work on this paper was done while Faith Ellen was visiting IST Austria.","year":"2023","volume":52,"date_updated":"2023-12-13T12:28:29Z","date_created":"2023-09-24T22:01:11Z","related_material":{"record":[{"id":"6676","status":"public","relation":"earlier_version"}]},"author":[{"full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh","first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"},{"last_name":"Aspnes","first_name":"James","full_name":"Aspnes, James"},{"full_name":"Ellen, Faith","first_name":"Faith","last_name":"Ellen"},{"last_name":"Gelashvili","first_name":"Rati","full_name":"Gelashvili, Rati"},{"full_name":"Zhu, Leqi","first_name":"Leqi","last_name":"Zhu","id":"a2117c59-cee4-11ed-b9d0-874ecf0f8ac5"}]},{"ec_funded":1,"publication_status":"published","department":[{"_id":"DaAl"},{"_id":"ChLa"}],"publisher":"IEEE","acknowledgement":"The authors would like to sincerely thank Sara Hooker for her feedback during the development of this work. EI was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35. AP and DA acknowledge generous ERC support, via Starting Grant 805223 ScaleML.","year":"2023","date_updated":"2024-01-10T08:59:26Z","date_created":"2024-01-10T08:42:40Z","author":[{"full_name":"Iofinova, Eugenia B","first_name":"Eugenia B","last_name":"Iofinova","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","orcid":"0000-0002-7778-3221"},{"last_name":"Peste","first_name":"Elena-Alexandra","id":"32D78294-F248-11E8-B48F-1D18A9856A87","full_name":"Peste, Elena-Alexandra"},{"first_name":"Dan-Adrian","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","full_name":"Alistarh, Dan-Adrian"}],"related_material":{"link":[{"relation":"software","url":"https://github.com/IST-DASLab/pruned-vision-model-bias"}]},"month":"08","publication_identifier":{"eisbn":["9798350301298"],"eissn":["2575-7075"]},"isi":1,"quality_controlled":"1","project":[{"_id":"9B9290DE-BA93-11EA-9121-9846C619BF3A","grant_number":" W1260-N35","name":"Vienna Graduate School on Computational Optimization"},{"call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning","grant_number":"805223","_id":"268A44D6-B435-11E9-9278-68D0E5697425"}],"external_id":{"isi":["001062531308068"],"arxiv":["2304.12622"]},"oa":1,"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2304.12622","open_access":"1"}],"language":[{"iso":"eng"}],"conference":{"name":"CVPR: Conference on Computer Vision and Pattern Recognition","end_date":"2023-06-24","start_date":"2023-06-17","location":"Vancouver, BC, Canada"},"doi":"10.1109/cvpr52729.2023.02334","type":"conference","abstract":[{"lang":"eng","text":"Pruning—that is, setting a significant subset of the parameters of a neural network to zero—is one of the most popular methods of model compression. Yet, several recent works have raised the issue that pruning may induce or exacerbate bias in the output of the compressed model. Despite existing evidence for this phenomenon, the relationship between neural network pruning and induced bias is not well-understood. In this work, we systematically investigate and characterize this phenomenon in Convolutional Neural Networks for computer vision. First, we show that it is in fact possible to obtain highly-sparse models, e.g. with less than 10% remaining weights, which do not decrease in accuracy nor substantially increase in bias when compared to dense models. At the same time, we also find that, at higher sparsities, pruned models exhibit higher uncertainty in their outputs, as well as increased correlations, which we directly link to increased bias. We propose easy-to-use criteria which, based only on the uncompressed model, establish whether bias will increase with pruning, and identify the samples most susceptible to biased predictions post-compression. Our code can be found at https://github.com/IST-DASLab/pruned-vision-model-bias."}],"status":"public","title":"Bias in pruned vision models: In-depth analysis and countermeasures","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"14771","oa_version":"Preprint","day":"22","article_processing_charge":"No","page":"24364-24373","publication":"2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition","citation":{"ama":"Iofinova EB, Peste E-A, Alistarh D-A. Bias in pruned vision models: In-depth analysis and countermeasures. In: 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition. IEEE; 2023:24364-24373. doi:10.1109/cvpr52729.2023.02334","ista":"Iofinova EB, Peste E-A, Alistarh D-A. 2023. Bias in pruned vision models: In-depth analysis and countermeasures. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Conference on Computer Vision and Pattern Recognition, 24364–24373.","apa":"Iofinova, E. B., Peste, E.-A., & Alistarh, D.-A. (2023). Bias in pruned vision models: In-depth analysis and countermeasures. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 24364–24373). Vancouver, BC, Canada: IEEE. https://doi.org/10.1109/cvpr52729.2023.02334","ieee":"E. B. Iofinova, E.-A. Peste, and D.-A. Alistarh, “Bias in pruned vision models: In-depth analysis and countermeasures,” in 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition, Vancouver, BC, Canada, 2023, pp. 24364–24373.","mla":"Iofinova, Eugenia B., et al. “Bias in Pruned Vision Models: In-Depth Analysis and Countermeasures.” 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition, IEEE, 2023, pp. 24364–73, doi:10.1109/cvpr52729.2023.02334.","short":"E.B. Iofinova, E.-A. Peste, D.-A. Alistarh, in:, 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition, IEEE, 2023, pp. 24364–24373.","chicago":"Iofinova, Eugenia B, Elena-Alexandra Peste, and Dan-Adrian Alistarh. “Bias in Pruned Vision Models: In-Depth Analysis and Countermeasures.” In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition, 24364–73. IEEE, 2023. https://doi.org/10.1109/cvpr52729.2023.02334."},"date_published":"2023-08-22T00:00:00Z"},{"month":"10","publication_identifier":{"eissn":["1533-7928"]},"tmp":{"name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png"},"external_id":{"isi":["001111578500001"],"arxiv":["2002.12410"]},"oa":1,"isi":1,"quality_controlled":"1","language":[{"iso":"eng"}],"file_date_updated":"2024-01-16T12:13:27Z","year":"2023","acknowledgement":"The work in Sections 1-5 was conducted while A. Beznosikov was a research intern in the Optimizationand Machine Learning Lab of Peter Richtárik at KAUST; this visit was funded by the KAUST Baseline Research Funding Scheme. The work of A. Beznosikov in Section 6 was conducted in Skoltech and was supported by Ministry of Science and Higher Education grant No. 075-10-2021-068. ","publication_status":"published","department":[{"_id":"DaAl"}],"publisher":"Journal of Machine Learning Research","author":[{"last_name":"Beznosikov","first_name":"Aleksandr","full_name":"Beznosikov, Aleksandr"},{"first_name":"Samuel","last_name":"Horvath","full_name":"Horvath, Samuel"},{"last_name":"Richtarik","first_name":"Peter","full_name":"Richtarik, Peter"},{"id":"dd546b39-0804-11ed-9c55-ef075c39778d","last_name":"Safaryan","first_name":"Mher","full_name":"Safaryan, Mher"}],"date_created":"2024-01-16T12:13:36Z","date_updated":"2024-01-17T09:14:13Z","volume":24,"day":"01","article_processing_charge":"Yes (in subscription journal)","has_accepted_license":"1","publication":"Journal of Machine Learning Research","citation":{"apa":"Beznosikov, A., Horvath, S., Richtarik, P., & Safaryan, M. (2023). On biased compression for distributed learning. Journal of Machine Learning Research. Journal of Machine Learning Research.","ieee":"A. Beznosikov, S. Horvath, P. Richtarik, and M. Safaryan, “On biased compression for distributed learning,” Journal of Machine Learning Research, vol. 24. Journal of Machine Learning Research, pp. 1–50, 2023.","ista":"Beznosikov A, Horvath S, Richtarik P, Safaryan M. 2023. On biased compression for distributed learning. Journal of Machine Learning Research. 24, 1–50.","ama":"Beznosikov A, Horvath S, Richtarik P, Safaryan M. On biased compression for distributed learning. Journal of Machine Learning Research. 2023;24:1-50.","chicago":"Beznosikov, Aleksandr, Samuel Horvath, Peter Richtarik, and Mher Safaryan. “On Biased Compression for Distributed Learning.” Journal of Machine Learning Research. Journal of Machine Learning Research, 2023.","short":"A. Beznosikov, S. Horvath, P. Richtarik, M. Safaryan, Journal of Machine Learning Research 24 (2023) 1–50.","mla":"Beznosikov, Aleksandr, et al. “On Biased Compression for Distributed Learning.” Journal of Machine Learning Research, vol. 24, Journal of Machine Learning Research, 2023, pp. 1–50."},"article_type":"original","page":"1-50","date_published":"2023-10-01T00:00:00Z","type":"journal_article","abstract":[{"text":"In the last few years, various communication compression techniques have emerged as an indispensable tool helping to alleviate the communication bottleneck in distributed learning. However, despite the fact biased compressors often show superior performance in practice when compared to the much more studied and understood unbiased compressors, very little is known about them. In this work we study three classes of biased compression operators, two of which are new, and their performance when applied to (stochastic) gradient descent and distributed (stochastic) gradient descent. We show for the first time that biased compressors can lead to linear convergence rates both in the single node and distributed settings. We prove that distributed compressed SGD method, employed with error feedback mechanism, enjoys the ergodic rate O(δLexp[−μKδL]+(C+δD)Kμ), where δ≥1 is a compression parameter which grows when more compression is applied, L and μ are the smoothness and strong convexity constants, C captures stochastic gradient noise (C=0 if full gradients are computed on each node) and D captures the variance of the gradients at the optimum (D=0 for over-parameterized models). Further, via a theoretical study of several synthetic and empirical distributions of communicated gradients, we shed light on why and by how much biased compressors outperform their unbiased variants. Finally, we propose several new biased compressors with promising theoretical guarantees and practical performance.","lang":"eng"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"14815","title":"On biased compression for distributed learning","ddc":["000"],"status":"public","intvolume":" 24","file":[{"date_updated":"2024-01-16T12:13:27Z","date_created":"2024-01-16T12:13:27Z","checksum":"c50f2b9db53938b755e30a085f464059","success":1,"relation":"main_file","file_id":"14816","file_size":1510993,"content_type":"application/pdf","creator":"dernst","file_name":"2023_JMLR_Beznosikov.pdf","access_level":"open_access"}],"oa_version":"Published Version"},{"date_updated":"2024-02-27T07:46:52Z","date_created":"2023-09-03T22:01:16Z","volume":13964,"author":[{"full_name":"Koval, Nikita","first_name":"Nikita","last_name":"Koval","id":"2F4DB10C-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Fedorov, Alexander","id":"2e711909-896a-11ed-bdf8-eb0f5a2984c6","first_name":"Alexander","last_name":"Fedorov"},{"first_name":"Maria","last_name":"Sokolova","full_name":"Sokolova, Maria"},{"first_name":"Dmitry","last_name":"Tsitelov","full_name":"Tsitelov, Dmitry"},{"full_name":"Alistarh, Dan-Adrian","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian","last_name":"Alistarh"}],"related_material":{"record":[{"relation":"research_data","status":"public","id":"14995"}]},"publication_status":"published","department":[{"_id":"DaAl"},{"_id":"GradSch"}],"publisher":"Springer Nature","year":"2023","file_date_updated":"2023-09-06T08:16:25Z","language":[{"iso":"eng"}],"conference":{"start_date":"2023-07-17","location":"Paris, France","end_date":"2023-07-22","name":"CAV: Computer Aided Verification"},"doi":"10.1007/978-3-031-37706-8_8","quality_controlled":"1","tmp":{"name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png"},"oa":1,"month":"07","publication_identifier":{"issn":["0302-9743"],"isbn":["9783031377051"],"eissn":["1611-3349"]},"oa_version":"Published Version","file":[{"date_updated":"2023-09-06T08:16:25Z","date_created":"2023-09-06T08:16:25Z","success":1,"checksum":"c346016393123a0a2338ad4d976f61bc","file_id":"14275","relation":"main_file","creator":"dernst","content_type":"application/pdf","file_size":421408,"file_name":"2023_LNCS_Koval.pdf","access_level":"open_access"}],"ddc":["000"],"title":"Lincheck: A practical framework for testing concurrent data structures on JVM","status":"public","intvolume":" 13964","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"14260","abstract":[{"text":"This paper presents Lincheck, a new practical and user-friendly framework for testing concurrent algorithms on the Java Virtual Machine (JVM). Lincheck provides a simple and declarative way to write concurrent tests: instead of describing how to perform the test, users specify what to test by declaring all the operations to examine; the framework automatically handles the rest. As a result, tests written with Lincheck are concise and easy to understand. The framework automatically generates a set of concurrent scenarios, examines them using stress-testing or bounded model checking, and verifies that the results of each invocation are correct. Notably, if an error is detected via model checking, Lincheck provides an easy-to-follow trace to reproduce it, significantly simplifying the bug investigation.\r\n\r\nTo the best of our knowledge, Lincheck is the first production-ready tool on the JVM that offers such a simple way of writing concurrent tests, without requiring special skills or expertise. We successfully integrated Lincheck in the development process of several large projects, such as Kotlin Coroutines, and identified new bugs in popular concurrency libraries, such as a race in Java’s standard ConcurrentLinkedDeque and a liveliness bug in Java’s AbstractQueuedSynchronizer framework, which is used in most of the synchronization primitives. We believe that Lincheck can significantly improve the quality and productivity of concurrent algorithms research and development and become the state-of-the-art tool for checking their correctness.","lang":"eng"}],"alternative_title":["LNCS"],"type":"conference","date_published":"2023-07-17T00:00:00Z","page":"156-169","publication":"35th International Conference on Computer Aided Verification ","citation":{"mla":"Koval, Nikita, et al. “Lincheck: A Practical Framework for Testing Concurrent Data Structures on JVM.” 35th International Conference on Computer Aided Verification , vol. 13964, Springer Nature, 2023, pp. 156–69, doi:10.1007/978-3-031-37706-8_8.","short":"N. Koval, A. Fedorov, M. Sokolova, D. Tsitelov, D.-A. Alistarh, in:, 35th International Conference on Computer Aided Verification , Springer Nature, 2023, pp. 156–169.","chicago":"Koval, Nikita, Alexander Fedorov, Maria Sokolova, Dmitry Tsitelov, and Dan-Adrian Alistarh. “Lincheck: A Practical Framework for Testing Concurrent Data Structures on JVM.” In 35th International Conference on Computer Aided Verification , 13964:156–69. Springer Nature, 2023. https://doi.org/10.1007/978-3-031-37706-8_8.","ama":"Koval N, Fedorov A, Sokolova M, Tsitelov D, Alistarh D-A. Lincheck: A practical framework for testing concurrent data structures on JVM. In: 35th International Conference on Computer Aided Verification . Vol 13964. Springer Nature; 2023:156-169. doi:10.1007/978-3-031-37706-8_8","ista":"Koval N, Fedorov A, Sokolova M, Tsitelov D, Alistarh D-A. 2023. Lincheck: A practical framework for testing concurrent data structures on JVM. 35th International Conference on Computer Aided Verification . CAV: Computer Aided Verification, LNCS, vol. 13964, 156–169.","apa":"Koval, N., Fedorov, A., Sokolova, M., Tsitelov, D., & Alistarh, D.-A. (2023). Lincheck: A practical framework for testing concurrent data structures on JVM. In 35th International Conference on Computer Aided Verification (Vol. 13964, pp. 156–169). Paris, France: Springer Nature. https://doi.org/10.1007/978-3-031-37706-8_8","ieee":"N. Koval, A. Fedorov, M. Sokolova, D. Tsitelov, and D.-A. Alistarh, “Lincheck: A practical framework for testing concurrent data structures on JVM,” in 35th International Conference on Computer Aided Verification , Paris, France, 2023, vol. 13964, pp. 156–169."},"day":"17","article_processing_charge":"Yes (in subscription journal)","has_accepted_license":"1","scopus_import":"1"},{"abstract":[{"lang":"eng","text":"Lincheck is a new practical and user-friendly framework for testing concurrent data structures on the Java Virtual Machine (JVM). It provides a simple and declarative way to write concurrent tests. Instead of describing how to perform the test, users specify what to test by declaring all the operations to examine; the framework automatically handles the rest. As a result, tests written with Lincheck are concise and easy to understand. \r\nThe artifact presents a collection of Lincheck tests that discover new bugs in popular libraries and implementations from the concurrency literature -- they are listed in Table 1, Section 3. To evaluate the performance of Lincheck analysis, the collection of tests also includes those which check correct data structures and, thus, always succeed. Similarly to Table 2, Section 3, the experiments demonstrate the reasonable time to perform a test. Finally, Lincheck provides user-friendly output with an easy-to-follow trace to reproduce a detected error, significantly simplifying further investigation."}],"type":"research_data_reference","oa_version":"Published Version","date_updated":"2024-02-27T07:46:52Z","date_created":"2024-02-14T15:14:13Z","related_material":{"record":[{"status":"public","relation":"used_in_publication","id":"14260"}]},"author":[{"id":"2F4DB10C-F248-11E8-B48F-1D18A9856A87","last_name":"Koval","first_name":"Nikita","full_name":"Koval, Nikita"},{"full_name":"Fedorov, Alexander","id":"2e711909-896a-11ed-bdf8-eb0f5a2984c6","last_name":"Fedorov","first_name":"Alexander"},{"full_name":"Sokolova, Maria","last_name":"Sokolova","first_name":"Maria"},{"first_name":"Dmitry","last_name":"Tsitelov","full_name":"Tsitelov, Dmitry"},{"orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","last_name":"Alistarh","first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian"}],"publisher":"Zenodo","department":[{"_id":"DaAl"}],"title":"Lincheck: A practical framework for testing concurrent data structures on JVM","ddc":["000"],"status":"public","_id":"14995","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2023","article_processing_charge":"No","day":"28","month":"04","doi":"10.5281/ZENODO.7877757","date_published":"2023-04-28T00:00:00Z","citation":{"short":"N. Koval, A. Fedorov, M. Sokolova, D. Tsitelov, D.-A. Alistarh, (2023).","mla":"Koval, Nikita, et al. Lincheck: A Practical Framework for Testing Concurrent Data Structures on JVM. Zenodo, 2023, doi:10.5281/ZENODO.7877757.","chicago":"Koval, Nikita, Alexander Fedorov, Maria Sokolova, Dmitry Tsitelov, and Dan-Adrian Alistarh. “Lincheck: A Practical Framework for Testing Concurrent Data Structures on JVM.” Zenodo, 2023. https://doi.org/10.5281/ZENODO.7877757.","ama":"Koval N, Fedorov A, Sokolova M, Tsitelov D, Alistarh D-A. Lincheck: A practical framework for testing concurrent data structures on JVM. 2023. doi:10.5281/ZENODO.7877757","ieee":"N. Koval, A. Fedorov, M. Sokolova, D. Tsitelov, and D.-A. Alistarh, “Lincheck: A practical framework for testing concurrent data structures on JVM.” Zenodo, 2023.","apa":"Koval, N., Fedorov, A., Sokolova, M., Tsitelov, D., & Alistarh, D.-A. (2023). Lincheck: A practical framework for testing concurrent data structures on JVM. Zenodo. https://doi.org/10.5281/ZENODO.7877757","ista":"Koval N, Fedorov A, Sokolova M, Tsitelov D, Alistarh D-A. 2023. Lincheck: A practical framework for testing concurrent data structures on JVM, Zenodo, 10.5281/ZENODO.7877757."},"oa":1,"main_file_link":[{"open_access":"1","url":"https://doi.org/10.5281/zenodo.7877757"}]},{"alternative_title":["LIPIcs"],"type":"conference","abstract":[{"lang":"eng","text":"Let G be a graph on n nodes. In the stochastic population protocol model, a collection of n indistinguishable, resource-limited nodes collectively solve tasks via pairwise interactions. In each interaction, two randomly chosen neighbors first read each other’s states, and then update their local states. A rich line of research has established tight upper and lower bounds on the complexity of fundamental tasks, such as majority and leader election, in this model, when G is a clique. Specifically, in the clique, these tasks can be solved fast, i.e., in n polylog n pairwise interactions, with high probability, using at most polylog n states per node.\r\nIn this work, we consider the more general setting where G is an arbitrary regular graph, and present a technique for simulating protocols designed for fully-connected networks in any connected regular graph. Our main result is a simulation that is efficient on many interesting graph families: roughly, the simulation overhead is polylogarithmic in the number of nodes, and quadratic in the conductance of the graph. As a sample application, we show that, in any regular graph with conductance φ, both leader election and exact majority can be solved in φ^{-2} ⋅ n polylog n pairwise interactions, with high probability, using at most φ^{-2} ⋅ polylog n states per node. This shows that there are fast and space-efficient population protocols for leader election and exact majority on graphs with good expansion properties. We believe our results will prove generally useful, as they allow efficient technology transfer between the well-mixed (clique) case, and the under-explored spatial setting."}],"intvolume":" 217","title":"Fast graphical population protocols","ddc":["510"],"status":"public","_id":"11184","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Published Version","file":[{"content_type":"application/pdf","file_size":959406,"creator":"dernst","access_level":"open_access","file_name":"2022_LIPICs_Alistarh.pdf","checksum":"2c7c982174c6f98c4ca6e92539d15086","success":1,"date_updated":"2022-05-02T08:06:33Z","date_created":"2022-05-02T08:06:33Z","relation":"main_file","file_id":"11346"}],"scopus_import":"1","has_accepted_license":"1","article_processing_charge":"No","day":"01","citation":{"ama":"Alistarh D-A, Gelashvili R, Rybicki J. Fast graphical population protocols. In: Bramas Q, Gramoli V, Milani A, eds. 25th International Conference on Principles of Distributed Systems. Vol 217. Schloss Dagstuhl - Leibniz-Zentrum für Informatik; 2022. doi:10.4230/LIPIcs.OPODIS.2021.14","ieee":"D.-A. Alistarh, R. Gelashvili, and J. Rybicki, “Fast graphical population protocols,” in 25th International Conference on Principles of Distributed Systems, Strasbourg, France, 2022, vol. 217.","apa":"Alistarh, D.-A., Gelashvili, R., & Rybicki, J. (2022). Fast graphical population protocols. In Q. Bramas, V. Gramoli, & A. Milani (Eds.), 25th International Conference on Principles of Distributed Systems (Vol. 217). Strasbourg, France: Schloss Dagstuhl - Leibniz-Zentrum für Informatik. https://doi.org/10.4230/LIPIcs.OPODIS.2021.14","ista":"Alistarh D-A, Gelashvili R, Rybicki J. 2022. Fast graphical population protocols. 25th International Conference on Principles of Distributed Systems. OPODIS, LIPIcs, vol. 217, 14.","short":"D.-A. Alistarh, R. Gelashvili, J. Rybicki, in:, Q. Bramas, V. Gramoli, A. Milani (Eds.), 25th International Conference on Principles of Distributed Systems, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022.","mla":"Alistarh, Dan-Adrian, et al. “Fast Graphical Population Protocols.” 25th International Conference on Principles of Distributed Systems, edited by Quentin Bramas et al., vol. 217, 14, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022, doi:10.4230/LIPIcs.OPODIS.2021.14.","chicago":"Alistarh, Dan-Adrian, Rati Gelashvili, and Joel Rybicki. “Fast Graphical Population Protocols.” In 25th International Conference on Principles of Distributed Systems, edited by Quentin Bramas, Vincent Gramoli, and Alessia Milani, Vol. 217. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022. https://doi.org/10.4230/LIPIcs.OPODIS.2021.14."},"publication":"25th International Conference on Principles of Distributed Systems","date_published":"2022-02-01T00:00:00Z","article_number":"14","ec_funded":1,"file_date_updated":"2022-05-02T08:06:33Z","editor":[{"full_name":"Bramas, Quentin","first_name":"Quentin","last_name":"Bramas"},{"full_name":"Gramoli, Vincent","first_name":"Vincent","last_name":"Gramoli"},{"last_name":"Milani","first_name":"Alessia","full_name":"Milani, Alessia"}],"department":[{"_id":"DaAl"}],"publisher":"Schloss Dagstuhl - Leibniz-Zentrum für Informatik","publication_status":"published","acknowledgement":"Dan Alistarh: This project has received funding from the European Research Council (ERC)\r\nunder the European Union’s Horizon 2020 research and innovation programme (grant agreement No.805223 ScaleML).\r\nJoel Rybicki: This project has received from the European Union’s Horizon 2020 research and\r\ninnovation programme under the Marie Skłodowska-Curie grant agreement No. 840605.\r\nAcknowledgements We grateful to Giorgi Nadiradze for pointing out a generalisation of the phase clock construction to non-regular graphs. We also thank anonymous reviewers for their useful comments on earlier versions of this manuscript.","year":"2022","volume":217,"date_created":"2022-04-17T22:01:47Z","date_updated":"2022-05-02T08:09:39Z","author":[{"full_name":"Alistarh, Dan-Adrian","first_name":"Dan-Adrian","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X"},{"first_name":"Rati","last_name":"Gelashvili","full_name":"Gelashvili, Rati"},{"full_name":"Rybicki, Joel","orcid":"0000-0002-6432-6646","id":"334EFD2E-F248-11E8-B48F-1D18A9856A87","last_name":"Rybicki","first_name":"Joel"}],"publication_identifier":{"isbn":["9783959772198"],"issn":["1868-8969"]},"month":"02","project":[{"call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning","grant_number":"805223","_id":"268A44D6-B435-11E9-9278-68D0E5697425"},{"grant_number":"840605","_id":"26A5D39A-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","name":"Coordination in constrained and natural distributed systems"}],"quality_controlled":"1","tmp":{"name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png"},"oa":1,"external_id":{"arxiv":["2102.08808"]},"language":[{"iso":"eng"}],"doi":"10.4230/LIPIcs.OPODIS.2021.14","conference":{"name":"OPODIS","end_date":"2021-12-15","location":"Strasbourg, France","start_date":"2021-12-13"}},{"date_published":"2022-02-01T00:00:00Z","citation":{"chicago":"Nikabadi, Amir, and Janne Korhonen. “Beyond Distributed Subgraph Detection: Induced Subgraphs, Multicolored Problems and Graph Parameters.” In 25th International Conference on Principles of Distributed Systems, edited by Quentin Bramas, Vincent Gramoli, and Alessia Milani, Vol. 217. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022. https://doi.org/10.4230/LIPIcs.OPODIS.2021.15.","mla":"Nikabadi, Amir, and Janne Korhonen. “Beyond Distributed Subgraph Detection: Induced Subgraphs, Multicolored Problems and Graph Parameters.” 25th International Conference on Principles of Distributed Systems, edited by Quentin Bramas et al., vol. 217, 15, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022, doi:10.4230/LIPIcs.OPODIS.2021.15.","short":"A. Nikabadi, J. Korhonen, in:, Q. Bramas, V. Gramoli, A. Milani (Eds.), 25th International Conference on Principles of Distributed Systems, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022.","ista":"Nikabadi A, Korhonen J. 2022. Beyond distributed subgraph detection: Induced subgraphs, multicolored problems and graph parameters. 25th International Conference on Principles of Distributed Systems. OPODIS, LIPIcs, vol. 217, 15.","ieee":"A. Nikabadi and J. Korhonen, “Beyond distributed subgraph detection: Induced subgraphs, multicolored problems and graph parameters,” in 25th International Conference on Principles of Distributed Systems, Strasbourg, France, 2022, vol. 217.","apa":"Nikabadi, A., & Korhonen, J. (2022). Beyond distributed subgraph detection: Induced subgraphs, multicolored problems and graph parameters. In Q. Bramas, V. Gramoli, & A. Milani (Eds.), 25th International Conference on Principles of Distributed Systems (Vol. 217). Strasbourg, France: Schloss Dagstuhl - Leibniz-Zentrum für Informatik. https://doi.org/10.4230/LIPIcs.OPODIS.2021.15","ama":"Nikabadi A, Korhonen J. Beyond distributed subgraph detection: Induced subgraphs, multicolored problems and graph parameters. In: Bramas Q, Gramoli V, Milani A, eds. 25th International Conference on Principles of Distributed Systems. Vol 217. Schloss Dagstuhl - Leibniz-Zentrum für Informatik; 2022. doi:10.4230/LIPIcs.OPODIS.2021.15"},"publication":"25th International Conference on Principles of Distributed Systems","has_accepted_license":"1","article_processing_charge":"No","day":"01","scopus_import":"1","oa_version":"Published Version","file":[{"content_type":"application/pdf","file_size":790396,"creator":"dernst","access_level":"open_access","file_name":"2022_LIPICs_Nikabadi.pdf","checksum":"626551c14de5d4091573200ed0535752","success":1,"date_updated":"2022-05-02T07:53:00Z","date_created":"2022-05-02T07:53:00Z","relation":"main_file","file_id":"11345"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"11183","intvolume":" 217","ddc":["510"],"title":"Beyond distributed subgraph detection: Induced subgraphs, multicolored problems and graph parameters","status":"public","abstract":[{"text":"Subgraph detection has recently been one of the most studied problems in the CONGEST model of distributed computing. In this work, we study the distributed complexity of problems closely related to subgraph detection, mainly focusing on induced subgraph detection. The main line of this work presents lower bounds and parameterized algorithms w.r.t structural parameters of the input graph:\r\n- On general graphs, we give unconditional lower bounds for induced detection of cycles and patterns of treewidth 2 in CONGEST. Moreover, by adapting reductions from centralized parameterized complexity, we prove lower bounds in CONGEST for detecting patterns with a 4-clique, and for induced path detection conditional on the hardness of triangle detection in the congested clique.\r\n- On graphs of bounded degeneracy, we show that induced paths can be detected fast in CONGEST using techniques from parameterized algorithms, while detecting cycles and patterns of treewidth 2 is hard.\r\n- On graphs of bounded vertex cover number, we show that induced subgraph detection is easy in CONGEST for any pattern graph. More specifically, we adapt a centralized parameterized algorithm for a more general maximum common induced subgraph detection problem to the distributed setting. In addition to these induced subgraph detection results, we study various related problems in the CONGEST and congested clique models, including for multicolored versions of subgraph-detection-like problems.","lang":"eng"}],"type":"conference","alternative_title":["LIPIcs"],"doi":"10.4230/LIPIcs.OPODIS.2021.15","conference":{"name":"OPODIS","end_date":"2021-12-15","location":"Strasbourg, France","start_date":"2021-12-13"},"language":[{"iso":"eng"}],"oa":1,"tmp":{"name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","short":"CC BY (4.0)","image":"/images/cc_by.png"},"project":[{"grant_number":"805223","_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning"}],"quality_controlled":"1","publication_identifier":{"isbn":["9783959772198"],"issn":["1868-8969"]},"month":"02","author":[{"full_name":"Nikabadi, Amir","first_name":"Amir","last_name":"Nikabadi"},{"full_name":"Korhonen, Janne","last_name":"Korhonen","first_name":"Janne","id":"C5402D42-15BC-11E9-A202-CA2BE6697425"}],"volume":217,"date_created":"2022-04-17T22:01:47Z","date_updated":"2022-05-02T07:56:35Z","year":"2022","acknowledgement":"Amir Nikabadi: Supported by the LABEX MILYON (ANR-10-LABX-0070) of Université de Lyon, within the program “Investissements d’Avenir” (ANR-11-IDEX-0007) operated by the French National Research Agency (ANR). Janne H. Korhonen: Supported by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML).\r\nWe thank François Le Gall and Masayuki Miyamoto for sharing their work on lower bounds for induced subgraph detection [36].","editor":[{"first_name":"Quentin","last_name":"Bramas","full_name":"Bramas, Quentin"},{"first_name":"Vincent","last_name":"Gramoli","full_name":"Gramoli, Vincent"},{"full_name":"Milani, Alessia","first_name":"Alessia","last_name":"Milani"}],"department":[{"_id":"DaAl"}],"publisher":"Schloss Dagstuhl - Leibniz-Zentrum für Informatik","publication_status":"published","ec_funded":1,"file_date_updated":"2022-05-02T07:53:00Z","article_number":"15"}]