[{"series_title":"Handbook of Model Checking","_id":"60","type":"book_chapter","status":"public","date_updated":"2021-01-12T08:05:35Z","citation":{"chicago":"Clarke, Edmund, Thomas A Henzinger, and Helmut Veith. “Introduction to Model Checking.” In Handbook of Model Checking, edited by Thomas A Henzinger, 1–26. Handbook of Model Checking. Springer, 2018. https://doi.org/10.1007/978-3-319-10575-8_1.","ista":"Clarke E, Henzinger TA, Veith H. 2018.Introduction to model checking. In: Handbook of Model Checking. , 1–26.","mla":"Clarke, Edmund, et al. “Introduction to Model Checking.” Handbook of Model Checking, edited by Thomas A Henzinger, Springer, 2018, pp. 1–26, doi:10.1007/978-3-319-10575-8_1.","apa":"Clarke, E., Henzinger, T. A., & Veith, H. (2018). Introduction to model checking. In T. A. Henzinger (Ed.), Handbook of Model Checking (pp. 1–26). Springer. https://doi.org/10.1007/978-3-319-10575-8_1","ama":"Clarke E, Henzinger TA, Veith H. Introduction to model checking. In: Henzinger TA, ed. Handbook of Model Checking. Handbook of Model Checking. Springer; 2018:1-26. doi:10.1007/978-3-319-10575-8_1","ieee":"E. Clarke, T. A. Henzinger, and H. Veith, “Introduction to model checking,” in Handbook of Model Checking, T. A. Henzinger, Ed. Springer, 2018, pp. 1–26.","short":"E. Clarke, T.A. Henzinger, H. Veith, in:, T.A. Henzinger (Ed.), Handbook of Model Checking, Springer, 2018, pp. 1–26."},"user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","author":[{"first_name":"Edmund","full_name":"Clarke, Edmund","last_name":"Clarke"},{"last_name":"Henzinger","orcid":"0000−0002−2985−7724","full_name":"Henzinger, Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","first_name":"Thomas A"},{"full_name":"Veith, Helmut","last_name":"Veith","first_name":"Helmut"}],"publist_id":"7994","editor":[{"first_name":"Thomas A","last_name":"Henzinger","full_name":"Henzinger, Thomas A"}],"title":"Introduction to model checking","department":[{"_id":"ToHe"}],"abstract":[{"text":"Model checking is a computer-assisted method for the analysis of dynamical systems that can be modeled by state-transition systems. Drawing from research traditions in mathematical logic, programming languages, hardware design, and theoretical computer science, model checking is now widely used for the verification of hardware and software in industry. This chapter is an introduction and short survey of model checking. The chapter aims to motivate and link the individual chapters of the handbook, and to provide context for readers who are not familiar with model checking.","lang":"eng"}],"oa_version":"None","quality_controlled":"1","scopus_import":1,"publisher":"Springer","month":"05","publication_status":"published","year":"2018","day":"19","publication":"Handbook of Model Checking","language":[{"iso":"eng"}],"page":"1 - 26","doi":"10.1007/978-3-319-10575-8_1","date_published":"2018-05-19T00:00:00Z","date_created":"2018-12-11T11:44:25Z"},{"oa":1,"publisher":"Bio-Protocol","quality_controlled":"1","acknowledgement":" FöFoLe project 947 (F.G.), the Friedrich-Baur-Stiftung project 41/16 (F.G.)","date_created":"2019-04-29T09:40:33Z","date_published":"2018-09-20T00:00:00Z","doi":"10.21769/bioprotoc.3018","publication":"Bio-Protocol","day":"20","year":"2018","has_accepted_license":"1","project":[{"_id":"260AA4E2-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"747687","name":"Mechanical Adaptation of Lamellipodial Actin Networks in Migrating Cells"}],"article_number":"e3018","title":"Platelet migration and bacterial trapping assay under flow","author":[{"first_name":"Shuxia","full_name":"Fan, Shuxia","last_name":"Fan"},{"last_name":"Lorenz","full_name":"Lorenz, Michael","first_name":"Michael"},{"first_name":"Steffen","full_name":"Massberg, Steffen","last_name":"Massberg"},{"orcid":"0000-0001-6120-3723","full_name":"Gärtner, Florian R","last_name":"Gärtner","id":"397A88EE-F248-11E8-B48F-1D18A9856A87","first_name":"Florian R"}],"user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","citation":{"apa":"Fan, S., Lorenz, M., Massberg, S., & Gärtner, F. R. (2018). Platelet migration and bacterial trapping assay under flow. Bio-Protocol. Bio-Protocol. https://doi.org/10.21769/bioprotoc.3018","ama":"Fan S, Lorenz M, Massberg S, Gärtner FR. Platelet migration and bacterial trapping assay under flow. Bio-Protocol. 2018;8(18). doi:10.21769/bioprotoc.3018","short":"S. Fan, M. Lorenz, S. Massberg, F.R. Gärtner, Bio-Protocol 8 (2018).","ieee":"S. Fan, M. Lorenz, S. Massberg, and F. R. Gärtner, “Platelet migration and bacterial trapping assay under flow,” Bio-Protocol, vol. 8, no. 18. Bio-Protocol, 2018.","mla":"Fan, Shuxia, et al. “Platelet Migration and Bacterial Trapping Assay under Flow.” Bio-Protocol, vol. 8, no. 18, e3018, Bio-Protocol, 2018, doi:10.21769/bioprotoc.3018.","ista":"Fan S, Lorenz M, Massberg S, Gärtner FR. 2018. Platelet migration and bacterial trapping assay under flow. Bio-Protocol. 8(18), e3018.","chicago":"Fan, Shuxia, Michael Lorenz, Steffen Massberg, and Florian R Gärtner. “Platelet Migration and Bacterial Trapping Assay under Flow.” Bio-Protocol. Bio-Protocol, 2018. https://doi.org/10.21769/bioprotoc.3018."},"intvolume":" 8","month":"09","oa_version":"Published Version","abstract":[{"text":"Blood platelets are critical for hemostasis and thrombosis, but also play diverse roles during immune responses. We have recently reported that platelets migrate at sites of infection in vitro and in vivo. Importantly, platelets use their ability to migrate to collect and bundle fibrin (ogen)-bound bacteria accomplishing efficient intravascular bacterial trapping. Here, we describe a method that allows analyzing platelet migration in vitro, focusing on their ability to collect bacteria and trap bacteria under flow.","lang":"eng"}],"ec_funded":1,"issue":"18","volume":8,"language":[{"iso":"eng"}],"file":[{"creator":"dernst","file_size":2928337,"date_updated":"2020-07-14T12:47:28Z","file_name":"2018_BioProtocol_Fan.pdf","date_created":"2019-04-30T08:04:33Z","relation":"main_file","access_level":"open_access","content_type":"application/pdf","checksum":"d4588377e789da7f360b553ae02c5119","file_id":"6360"}],"publication_status":"published","publication_identifier":{"issn":["2331-8325"]},"keyword":["Platelets","Cell migration","Bacteria","Shear flow","Fibrinogen","E. coli"],"status":"public","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"type":"journal_article","_id":"6354","department":[{"_id":"MiSi"}],"file_date_updated":"2020-07-14T12:47:28Z","ddc":["570"],"date_updated":"2021-01-12T08:07:12Z"},{"date_updated":"2020-07-14T23:06:21Z","citation":{"chicago":"Petritsch, Barbara. Open Access at IST Austria 2009-2017. IST Austria, 2018. https://doi.org/10.5281/zenodo.1410279.","ista":"Petritsch B. 2018. Open Access at IST Austria 2009-2017, IST Austria,p.","mla":"Petritsch, Barbara. Open Access at IST Austria 2009-2017. IST Austria, 2018, doi:10.5281/zenodo.1410279.","ama":"Petritsch B. Open Access at IST Austria 2009-2017. IST Austria; 2018. doi:10.5281/zenodo.1410279","apa":"Petritsch, B. (2018). Open Access at IST Austria 2009-2017. Presented at the Open-Access-Tage, Graz, Austria: IST Austria. https://doi.org/10.5281/zenodo.1410279","ieee":"B. Petritsch, Open Access at IST Austria 2009-2017. IST Austria, 2018.","short":"B. Petritsch, Open Access at IST Austria 2009-2017, IST Austria, 2018."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ddc":["020"],"author":[{"id":"406048EC-F248-11E8-B48F-1D18A9856A87","first_name":"Barbara","orcid":"0000-0003-2724-4614","full_name":"Petritsch, Barbara","last_name":"Petritsch"}],"file_date_updated":"2020-07-14T12:47:30Z","department":[{"_id":"E-Lib"}],"title":"Open Access at IST Austria 2009-2017","_id":"6459","conference":{"name":"Open-Access-Tage","end_date":"2018-09-26","location":"Graz, Austria","start_date":"2018-09-24"},"tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"type":"conference_poster","keyword":["Open Access","Publication Analysis"],"status":"public","year":"2018","publication_status":"published","has_accepted_license":"1","language":[{"iso":"eng"}],"file":[{"file_id":"6460","checksum":"9063ab4d10ea93353c3a03bbf53fbcf1","content_type":"application/pdf","relation":"main_file","access_level":"open_access","file_name":"Poster_Beitrag_125_Petritsch.pdf","date_created":"2019-05-16T07:26:25Z","file_size":1967778,"date_updated":"2020-07-14T12:47:30Z","creator":"dernst"}],"day":"24","date_created":"2019-05-16T07:27:14Z","doi":"10.5281/zenodo.1410279","date_published":"2018-09-24T00:00:00Z","oa_version":"Published Version","oa":1,"publisher":"IST Austria","month":"09"},{"oa_version":"None","abstract":[{"text":"This chapter finds an agreement of equivariant indices of semi-classical homomorphisms between pairwise mirror branes in the GL2 Higgs moduli space on a Riemann surface. On one side of the agreement, components of the Lagrangian brane of U(1,1) Higgs bundles, whose mirror was proposed by Hitchin to be certain even exterior powers of the hyperholomorphic Dirac bundle on the SL2 Higgs moduli space, are present. The agreement arises from a mysterious functional equation. This gives strong computational evidence for Hitchin’s proposal.","lang":"eng"}],"month":"01","publisher":"Oxford University Press","scopus_import":1,"quality_controlled":"1","language":[{"iso":"eng"}],"publication":"Geometry and Physics: Volume I","day":"01","year":"2018","publication_status":"published","publication_identifier":{"isbn":["9780198802013","9780191840500"]},"date_created":"2019-06-06T12:42:01Z","date_published":"2018-01-01T00:00:00Z","doi":"10.1093/oso/9780198802013.003.0009","page":"189-218","_id":"6525","status":"public","type":"book_chapter","user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","citation":{"chicago":"Hausel, Tamás, Anton Mellit, and Du Pei. “Mirror Symmetry with Branes by Equivariant Verlinde Formulas.” In Geometry and Physics: Volume I, 189–218. Oxford University Press, 2018. https://doi.org/10.1093/oso/9780198802013.003.0009.","ista":"Hausel T, Mellit A, Pei D. 2018.Mirror symmetry with branes by equivariant verlinde formulas. In: Geometry and Physics: Volume I. , 189–218.","mla":"Hausel, Tamás, et al. “Mirror Symmetry with Branes by Equivariant Verlinde Formulas.” Geometry and Physics: Volume I, Oxford University Press, 2018, pp. 189–218, doi:10.1093/oso/9780198802013.003.0009.","ieee":"T. Hausel, A. Mellit, and D. Pei, “Mirror symmetry with branes by equivariant verlinde formulas,” in Geometry and Physics: Volume I, Oxford University Press, 2018, pp. 189–218.","short":"T. Hausel, A. Mellit, D. Pei, in:, Geometry and Physics: Volume I, Oxford University Press, 2018, pp. 189–218.","ama":"Hausel T, Mellit A, Pei D. Mirror symmetry with branes by equivariant verlinde formulas. In: Geometry and Physics: Volume I. Oxford University Press; 2018:189-218. doi:10.1093/oso/9780198802013.003.0009","apa":"Hausel, T., Mellit, A., & Pei, D. (2018). Mirror symmetry with branes by equivariant verlinde formulas. In Geometry and Physics: Volume I (pp. 189–218). Oxford University Press. https://doi.org/10.1093/oso/9780198802013.003.0009"},"date_updated":"2021-01-12T08:07:52Z","title":"Mirror symmetry with branes by equivariant verlinde formulas","department":[{"_id":"TaHa"}],"author":[{"last_name":"Hausel","full_name":"Hausel, Tamás","id":"4A0666D8-F248-11E8-B48F-1D18A9856A87","first_name":"Tamás"},{"last_name":"Mellit","full_name":"Mellit, Anton","first_name":"Anton","id":"388D3134-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Du","last_name":"Pei","full_name":"Pei, Du"}]},{"article_number":"543-616","project":[{"call_identifier":"FP7","_id":"258DCDE6-B435-11E9-9278-68D0E5697425","name":"Random matrices, universality and disordered quantum systems","grant_number":"338804"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"short":"J. Lee, K. Schnelli, Probability Theory and Related Fields 171 (2018).","ieee":"J. Lee and K. Schnelli, “Local law and Tracy–Widom limit for sparse random matrices,” Probability Theory and Related Fields, vol. 171, no. 1–2. Springer, 2018.","apa":"Lee, J., & Schnelli, K. (2018). Local law and Tracy–Widom limit for sparse random matrices. Probability Theory and Related Fields. Springer. https://doi.org/10.1007/s00440-017-0787-8","ama":"Lee J, Schnelli K. Local law and Tracy–Widom limit for sparse random matrices. Probability Theory and Related Fields. 2018;171(1-2). doi:10.1007/s00440-017-0787-8","mla":"Lee, Jii, and Kevin Schnelli. “Local Law and Tracy–Widom Limit for Sparse Random Matrices.” Probability Theory and Related Fields, vol. 171, no. 1–2, 543–616, Springer, 2018, doi:10.1007/s00440-017-0787-8.","ista":"Lee J, Schnelli K. 2018. Local law and Tracy–Widom limit for sparse random matrices. Probability Theory and Related Fields. 171(1–2), 543–616.","chicago":"Lee, Jii, and Kevin Schnelli. “Local Law and Tracy–Widom Limit for Sparse Random Matrices.” Probability Theory and Related Fields. Springer, 2018. https://doi.org/10.1007/s00440-017-0787-8."},"title":"Local law and Tracy–Widom limit for sparse random matrices","publist_id":"7017","author":[{"last_name":"Lee","full_name":"Lee, Jii","first_name":"Jii"},{"orcid":"0000-0003-0954-3231","full_name":"Schnelli, Kevin","last_name":"Schnelli","id":"434AD0AE-F248-11E8-B48F-1D18A9856A87","first_name":"Kevin"}],"external_id":{"arxiv":["1605.08767"]},"quality_controlled":"1","publisher":"Springer","oa":1,"day":"14","publication":"Probability Theory and Related Fields","year":"2018","date_published":"2018-06-14T00:00:00Z","doi":"10.1007/s00440-017-0787-8","date_created":"2018-12-11T11:47:56Z","_id":"690","status":"public","type":"journal_article","date_updated":"2021-01-12T08:09:33Z","department":[{"_id":"LaEr"}],"oa_version":"Preprint","abstract":[{"text":"We consider spectral properties and the edge universality of sparse random matrices, the class of random matrices that includes the adjacency matrices of the Erdős–Rényi graph model G(N, p). We prove a local law for the eigenvalue density up to the spectral edges. Under a suitable condition on the sparsity, we also prove that the rescaled extremal eigenvalues exhibit GOE Tracy–Widom fluctuations if a deterministic shift of the spectral edge due to the sparsity is included. For the adjacency matrix of the Erdős–Rényi graph this establishes the Tracy–Widom fluctuations of the second largest eigenvalue when p is much larger than N−2/3 with a deterministic shift of order (Np)−1.","lang":"eng"}],"month":"06","intvolume":" 171","scopus_import":1,"main_file_link":[{"url":"https://arxiv.org/abs/1605.08767","open_access":"1"}],"language":[{"iso":"eng"}],"publication_status":"published","volume":171,"issue":"1-2","ec_funded":1},{"department":[{"_id":"VlKo"}],"date_updated":"2021-01-12T08:11:32Z","type":"journal_article","status":"public","_id":"703","volume":40,"issue":"7","publication_status":"published","publication_identifier":{"issn":["01628828"]},"language":[{"iso":"eng"}],"main_file_link":[{"url":"https://arxiv.org/abs/1508.07902","open_access":"1"}],"scopus_import":1,"intvolume":" 40","month":"07","abstract":[{"text":"We consider the NP-hard problem of MAP-inference for undirected discrete graphical models. We propose a polynomial time and practically efficient algorithm for finding a part of its optimal solution. Specifically, our algorithm marks some labels of the considered graphical model either as (i) optimal, meaning that they belong to all optimal solutions of the inference problem; (ii) non-optimal if they provably do not belong to any solution. With access to an exact solver of a linear programming relaxation to the MAP-inference problem, our algorithm marks the maximal possible (in a specified sense) number of labels. We also present a version of the algorithm, which has access to a suboptimal dual solver only and still can ensure the (non-)optimality for the marked labels, although the overall number of the marked labels may decrease. We propose an efficient implementation, which runs in time comparable to a single run of a suboptimal dual solver. Our method is well-scalable and shows state-of-the-art results on computational benchmarks from machine learning and computer vision.","lang":"eng"}],"oa_version":"Preprint","external_id":{"arxiv":["1508.07902"]},"author":[{"first_name":"Alexander","full_name":"Shekhovtsov, Alexander","last_name":"Shekhovtsov"},{"full_name":"Swoboda, Paul","last_name":"Swoboda","first_name":"Paul","id":"446560C6-F248-11E8-B48F-1D18A9856A87"},{"last_name":"Savchynskyy","full_name":"Savchynskyy, Bogdan","first_name":"Bogdan"}],"publist_id":"6992","title":"Maximum persistency via iterative relaxed inference with graphical models","citation":{"ama":"Shekhovtsov A, Swoboda P, Savchynskyy B. Maximum persistency via iterative relaxed inference with graphical models. IEEE Transactions on Pattern Analysis and Machine Intelligence. 2018;40(7):1668-1682. doi:10.1109/TPAMI.2017.2730884","apa":"Shekhovtsov, A., Swoboda, P., & Savchynskyy, B. (2018). Maximum persistency via iterative relaxed inference with graphical models. IEEE Transactions on Pattern Analysis and Machine Intelligence. IEEE. https://doi.org/10.1109/TPAMI.2017.2730884","ieee":"A. Shekhovtsov, P. Swoboda, and B. Savchynskyy, “Maximum persistency via iterative relaxed inference with graphical models,” IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 40, no. 7. IEEE, pp. 1668–1682, 2018.","short":"A. Shekhovtsov, P. Swoboda, B. Savchynskyy, IEEE Transactions on Pattern Analysis and Machine Intelligence 40 (2018) 1668–1682.","mla":"Shekhovtsov, Alexander, et al. “Maximum Persistency via Iterative Relaxed Inference with Graphical Models.” IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 40, no. 7, IEEE, 2018, pp. 1668–82, doi:10.1109/TPAMI.2017.2730884.","ista":"Shekhovtsov A, Swoboda P, Savchynskyy B. 2018. Maximum persistency via iterative relaxed inference with graphical models. IEEE Transactions on Pattern Analysis and Machine Intelligence. 40(7), 1668–1682.","chicago":"Shekhovtsov, Alexander, Paul Swoboda, and Bogdan Savchynskyy. “Maximum Persistency via Iterative Relaxed Inference with Graphical Models.” IEEE Transactions on Pattern Analysis and Machine Intelligence. IEEE, 2018. https://doi.org/10.1109/TPAMI.2017.2730884."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","page":"1668-1682","date_created":"2018-12-11T11:48:01Z","doi":"10.1109/TPAMI.2017.2730884","date_published":"2018-07-01T00:00:00Z","year":"2018","publication":"IEEE Transactions on Pattern Analysis and Machine Intelligence","day":"01","oa":1,"publisher":"IEEE","quality_controlled":"1"},{"type":"conference","conference":{"name":"EDBT: Conference on Extending Database Technology","end_date":"2018-03-29","location":"Vienna, Austria","start_date":"2018-03-26"},"tmp":{"short":"CC BY-NC-ND (4.0)","name":"Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)","legal_code_url":"https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode","image":"/images/cc_by_nc_nd.png"},"status":"public","_id":"7116","file_date_updated":"2020-07-14T12:47:49Z","department":[{"_id":"DaAl"}],"date_updated":"2023-02-23T12:59:17Z","ddc":["000"],"scopus_import":1,"month":"03","abstract":[{"lang":"eng","text":"Training deep learning models has received tremendous research interest recently. In particular, there has been intensive research on reducing the communication cost of training when using multiple computational devices, through reducing the precision of the underlying data representation. Naturally, such methods induce system trade-offs—lowering communication precision could de-crease communication overheads and improve scalability; but, on the other hand, it can also reduce the accuracy of training. In this paper, we study this trade-off space, and ask:Can low-precision communication consistently improve the end-to-end performance of training modern neural networks, with no accuracy loss?From the performance point of view, the answer to this question may appear deceptively easy: compressing communication through low precision should help when the ratio between communication and computation is high. However, this answer is less straightforward when we try to generalize this principle across various neural network architectures (e.g., AlexNet vs. ResNet),number of GPUs (e.g., 2 vs. 8 GPUs), machine configurations(e.g., EC2 instances vs. NVIDIA DGX-1), communication primitives (e.g., MPI vs. NCCL), and even different GPU architectures(e.g., Kepler vs. Pascal). Currently, it is not clear how a realistic realization of all these factors maps to the speed up provided by low-precision communication. In this paper, we conduct an empirical study to answer this question and report the insights."}],"oa_version":"Published Version","publication_identifier":{"isbn":["9783893180783"],"issn":["2367-2005"]},"publication_status":"published","file":[{"date_created":"2019-11-26T14:23:04Z","file_name":"2018_OpenProceedings_Grubic.pdf","creator":"dernst","date_updated":"2020-07-14T12:47:49Z","file_size":1603204,"file_id":"7118","checksum":"ec979b56abc71016d6e6adfdadbb4afe","access_level":"open_access","relation":"main_file","content_type":"application/pdf"}],"language":[{"iso":"eng"}],"author":[{"first_name":"Demjan","last_name":"Grubic","full_name":"Grubic, Demjan"},{"last_name":"Tam","full_name":"Tam, Leo","first_name":"Leo"},{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh"},{"first_name":"Ce","full_name":"Zhang, Ce","last_name":"Zhang"}],"article_processing_charge":"No","title":"Synchronous multi-GPU training for deep learning with low-precision communications: An empirical study","citation":{"short":"D. Grubic, L. Tam, D.-A. Alistarh, C. Zhang, in:, Proceedings of the 21st International Conference on Extending Database Technology, OpenProceedings, 2018, pp. 145–156.","ieee":"D. Grubic, L. Tam, D.-A. Alistarh, and C. Zhang, “Synchronous multi-GPU training for deep learning with low-precision communications: An empirical study,” in Proceedings of the 21st International Conference on Extending Database Technology, Vienna, Austria, 2018, pp. 145–156.","ama":"Grubic D, Tam L, Alistarh D-A, Zhang C. Synchronous multi-GPU training for deep learning with low-precision communications: An empirical study. In: Proceedings of the 21st International Conference on Extending Database Technology. OpenProceedings; 2018:145-156. doi:10.5441/002/EDBT.2018.14","apa":"Grubic, D., Tam, L., Alistarh, D.-A., & Zhang, C. (2018). Synchronous multi-GPU training for deep learning with low-precision communications: An empirical study. In Proceedings of the 21st International Conference on Extending Database Technology (pp. 145–156). Vienna, Austria: OpenProceedings. https://doi.org/10.5441/002/EDBT.2018.14","mla":"Grubic, Demjan, et al. “Synchronous Multi-GPU Training for Deep Learning with Low-Precision Communications: An Empirical Study.” Proceedings of the 21st International Conference on Extending Database Technology, OpenProceedings, 2018, pp. 145–56, doi:10.5441/002/EDBT.2018.14.","ista":"Grubic D, Tam L, Alistarh D-A, Zhang C. 2018. Synchronous multi-GPU training for deep learning with low-precision communications: An empirical study. Proceedings of the 21st International Conference on Extending Database Technology. EDBT: Conference on Extending Database Technology, 145–156.","chicago":"Grubic, Demjan, Leo Tam, Dan-Adrian Alistarh, and Ce Zhang. “Synchronous Multi-GPU Training for Deep Learning with Low-Precision Communications: An Empirical Study.” In Proceedings of the 21st International Conference on Extending Database Technology, 145–56. OpenProceedings, 2018. https://doi.org/10.5441/002/EDBT.2018.14."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publisher":"OpenProceedings","quality_controlled":"1","oa":1,"page":"145-156","doi":"10.5441/002/EDBT.2018.14","date_published":"2018-03-26T00:00:00Z","date_created":"2019-11-26T14:19:11Z","has_accepted_license":"1","year":"2018","day":"26","publication":"Proceedings of the 21st International Conference on Extending Database Technology"},{"main_file_link":[{"open_access":"1","url":"https://eprint.iacr.org/2018/194"}],"scopus_import":1,"alternative_title":["LIPIcs"],"intvolume":" 124","month":"12","abstract":[{"lang":"eng","text":"Proofs of space (PoS) [Dziembowski et al., CRYPTO'15] are proof systems where a prover can convince a verifier that he \"wastes\" disk space. PoS were introduced as a more ecological and economical replacement for proofs of work which are currently used to secure blockchains like Bitcoin. In this work we investigate extensions of PoS which allow the prover to embed useful data into the dedicated space, which later can be recovered. Our first contribution is a security proof for the original PoS from CRYPTO'15 in the random oracle model (the original proof only applied to a restricted class of adversaries which can store a subset of the data an honest prover would store). When this PoS is instantiated with recent constructions of maximally depth robust graphs, our proof implies basically optimal security. As a second contribution we show three different extensions of this PoS where useful data can be embedded into the space required by the prover. Our security proof for the PoS extends (non-trivially) to these constructions. We discuss how some of these variants can be used as proofs of catalytic space (PoCS), a notion we put forward in this work, and which basically is a PoS where most of the space required by the prover can be used to backup useful data. Finally we discuss how one of the extensions is a candidate construction for a proof of replication (PoR), a proof system recently suggested in the Filecoin whitepaper. "}],"oa_version":"Published Version","ec_funded":1,"volume":124,"publication_status":"published","publication_identifier":{"issn":["1868-8969"],"isbn":["978-3-95977-095-8"]},"language":[{"iso":"eng"}],"file":[{"file_size":822884,"date_updated":"2020-07-14T12:47:57Z","creator":"dernst","file_name":"2018_LIPIcs_Pietrzak.pdf","date_created":"2020-02-04T08:17:52Z","content_type":"application/pdf","relation":"main_file","access_level":"open_access","file_id":"7443","checksum":"5cebb7f7849a3beda898f697d755dd96"}],"conference":{"end_date":"2019-01-12","location":"San Diego, CA, United States","start_date":"2019-01-10","name":"ITCS: Innovations in theoretical Computer Science Conference"},"tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"type":"conference","status":"public","_id":"7407","department":[{"_id":"KrPi"}],"file_date_updated":"2020-07-14T12:47:57Z","date_updated":"2021-01-12T08:13:26Z","ddc":["000"],"oa":1,"publisher":"Schloss Dagstuhl - Leibniz-Zentrum für Informatik","quality_controlled":"1","page":"59:1-59:25","date_created":"2020-01-30T09:16:05Z","doi":"10.4230/LIPICS.ITCS.2019.59","date_published":"2018-12-31T00:00:00Z","year":"2018","has_accepted_license":"1","publication":"10th Innovations in Theoretical Computer Science Conference (ITCS 2019)","day":"31","project":[{"grant_number":"682815","name":"Teaching Old Crypto New Tricks","_id":"258AA5B2-B435-11E9-9278-68D0E5697425","call_identifier":"H2020"}],"article_processing_charge":"No","author":[{"first_name":"Krzysztof Z","id":"3E04A7AA-F248-11E8-B48F-1D18A9856A87","last_name":"Pietrzak","full_name":"Pietrzak, Krzysztof Z","orcid":"0000-0002-9139-1654"}],"title":"Proofs of catalytic space","citation":{"mla":"Pietrzak, Krzysztof Z. “Proofs of Catalytic Space.” 10th Innovations in Theoretical Computer Science Conference (ITCS 2019), vol. 124, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2018, p. 59:1-59:25, doi:10.4230/LIPICS.ITCS.2019.59.","apa":"Pietrzak, K. Z. (2018). Proofs of catalytic space. In 10th Innovations in Theoretical Computer Science Conference (ITCS 2019) (Vol. 124, p. 59:1-59:25). San Diego, CA, United States: Schloss Dagstuhl - Leibniz-Zentrum für Informatik. https://doi.org/10.4230/LIPICS.ITCS.2019.59","ama":"Pietrzak KZ. Proofs of catalytic space. In: 10th Innovations in Theoretical Computer Science Conference (ITCS 2019). Vol 124. Schloss Dagstuhl - Leibniz-Zentrum für Informatik; 2018:59:1-59:25. doi:10.4230/LIPICS.ITCS.2019.59","ieee":"K. Z. Pietrzak, “Proofs of catalytic space,” in 10th Innovations in Theoretical Computer Science Conference (ITCS 2019), San Diego, CA, United States, 2018, vol. 124, p. 59:1-59:25.","short":"K.Z. Pietrzak, in:, 10th Innovations in Theoretical Computer Science Conference (ITCS 2019), Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2018, p. 59:1-59:25.","chicago":"Pietrzak, Krzysztof Z. “Proofs of Catalytic Space.” In 10th Innovations in Theoretical Computer Science Conference (ITCS 2019), 124:59:1-59:25. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2018. https://doi.org/10.4230/LIPICS.ITCS.2019.59.","ista":"Pietrzak KZ. 2018. Proofs of catalytic space. 10th Innovations in Theoretical Computer Science Conference (ITCS 2019). ITCS: Innovations in theoretical Computer Science Conference, LIPIcs, vol. 124, 59:1-59:25."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87"},{"_id":"6001","article_number":"18","type":"journal_article","status":"public","citation":{"mla":"Alistarh, Dan-Adrian, et al. “ThreadScan: Automatic and Scalable Memory Reclamation.” ACM Transactions on Parallel Computing, vol. 4, no. 4, 18, Association for Computing Machinery, 2018, doi:10.1145/3201897.","apa":"Alistarh, D.-A., Leiserson, W., Matveev, A., & Shavit, N. (2018). ThreadScan: Automatic and scalable memory reclamation. ACM Transactions on Parallel Computing. Association for Computing Machinery. https://doi.org/10.1145/3201897","ama":"Alistarh D-A, Leiserson W, Matveev A, Shavit N. ThreadScan: Automatic and scalable memory reclamation. ACM Transactions on Parallel Computing. 2018;4(4). doi:10.1145/3201897","ieee":"D.-A. Alistarh, W. Leiserson, A. Matveev, and N. Shavit, “ThreadScan: Automatic and scalable memory reclamation,” ACM Transactions on Parallel Computing, vol. 4, no. 4. Association for Computing Machinery, 2018.","short":"D.-A. Alistarh, W. Leiserson, A. Matveev, N. Shavit, ACM Transactions on Parallel Computing 4 (2018).","chicago":"Alistarh, Dan-Adrian, William Leiserson, Alexander Matveev, and Nir Shavit. “ThreadScan: Automatic and Scalable Memory Reclamation.” ACM Transactions on Parallel Computing. Association for Computing Machinery, 2018. https://doi.org/10.1145/3201897.","ista":"Alistarh D-A, Leiserson W, Matveev A, Shavit N. 2018. ThreadScan: Automatic and scalable memory reclamation. ACM Transactions on Parallel Computing. 4(4), 18."},"date_updated":"2023-02-23T13:17:54Z","user_id":"3E5EF7F0-F248-11E8-B48F-1D18A9856A87","author":[{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian","orcid":"0000-0003-3650-940X","last_name":"Alistarh"},{"full_name":"Leiserson, William","last_name":"Leiserson","first_name":"William"},{"full_name":"Matveev, Alexander","last_name":"Matveev","first_name":"Alexander"},{"first_name":"Nir","last_name":"Shavit","full_name":"Shavit, Nir"}],"department":[{"_id":"DaAl"}],"title":"ThreadScan: Automatic and scalable memory reclamation","abstract":[{"lang":"eng","text":"The concurrent memory reclamation problem is that of devising a way for a deallocating thread to verify that no other concurrent threads hold references to a memory block being deallocated. To date, in the absence of automatic garbage collection, there is no satisfactory solution to this problem; existing tracking methods like hazard pointers, reference counters, or epoch-based techniques like RCU are either prohibitively expensive or require significant programming expertise to the extent that implementing them efficiently can be worthy of a publication. None of the existing techniques are automatic or even semi-automated.\r\nIn this article, we take a new approach to concurrent memory reclamation. Instead of manually tracking access to memory locations as done in techniques like hazard pointers, or restricting shared accesses to specific epoch boundaries as in RCU, our algorithm, called ThreadScan, leverages operating system signaling to automatically detect which memory locations are being accessed by concurrent threads.\r\nInitial empirical evidence shows that ThreadScan scales surprisingly well and requires negligible programming effort beyond the standard use of Malloc and Free."}],"oa_version":"None","scopus_import":1,"quality_controlled":"1","publisher":"Association for Computing Machinery","month":"09","intvolume":" 4","publication_identifier":{"issn":["2329-4949"]},"publication_status":"published","year":"2018","day":"01","publication":"ACM Transactions on Parallel Computing","language":[{"iso":"eng"}],"date_published":"2018-09-01T00:00:00Z","issue":"4","doi":"10.1145/3201897","volume":4,"related_material":{"record":[{"id":"779","status":"public","relation":"earlier_version"}]},"date_created":"2019-02-14T13:24:11Z"},{"file_date_updated":"2020-07-14T12:48:03Z","title":"Model compression via distillation and quantization","department":[{"_id":"DaAl"}],"external_id":{"arxiv":["1802.05668"]},"article_processing_charge":"No","author":[{"last_name":"Polino","full_name":"Polino, Antonio","first_name":"Antonio"},{"first_name":"Razvan","last_name":"Pascanu","full_name":"Pascanu, Razvan"},{"first_name":"Dan-Adrian","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","last_name":"Alistarh","orcid":"0000-0003-3650-940X","full_name":"Alistarh, Dan-Adrian"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ddc":["000"],"date_updated":"2023-02-23T13:18:41Z","citation":{"ista":"Polino A, Pascanu R, Alistarh D-A. 2018. Model compression via distillation and quantization. 6th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","chicago":"Polino, Antonio, Razvan Pascanu, and Dan-Adrian Alistarh. “Model Compression via Distillation and Quantization.” In 6th International Conference on Learning Representations, 2018.","apa":"Polino, A., Pascanu, R., & Alistarh, D.-A. (2018). Model compression via distillation and quantization. In 6th International Conference on Learning Representations. Vancouver, Canada.","ama":"Polino A, Pascanu R, Alistarh D-A. Model compression via distillation and quantization. In: 6th International Conference on Learning Representations. ; 2018.","ieee":"A. Polino, R. Pascanu, and D.-A. Alistarh, “Model compression via distillation and quantization,” in 6th International Conference on Learning Representations, Vancouver, Canada, 2018.","short":"A. Polino, R. Pascanu, D.-A. Alistarh, in:, 6th International Conference on Learning Representations, 2018.","mla":"Polino, Antonio, et al. “Model Compression via Distillation and Quantization.” 6th International Conference on Learning Representations, 2018."},"status":"public","conference":{"name":"ICLR: International Conference on Learning Representations","location":"Vancouver, Canada","end_date":"2018-05-03","start_date":"2018-04-30"},"type":"conference","_id":"7812","date_created":"2020-05-10T22:00:51Z","date_published":"2018-05-01T00:00:00Z","language":[{"iso":"eng"}],"publication":"6th International Conference on Learning Representations","file":[{"relation":"main_file","access_level":"open_access","content_type":"application/pdf","checksum":"a4336c167978e81891970e4e4517a8c3","file_id":"7894","creator":"dernst","file_size":308339,"date_updated":"2020-07-14T12:48:03Z","file_name":"2018_ICLR_Polino.pdf","date_created":"2020-05-26T13:02:00Z"}],"day":"01","publication_status":"published","year":"2018","has_accepted_license":"1","month":"05","oa":1,"quality_controlled":"1","scopus_import":1,"oa_version":"Published Version","abstract":[{"lang":"eng","text":"Deep neural networks (DNNs) continue to make significant advances, solving tasks from image classification to translation or reinforcement learning. One aspect of the field receiving considerable attention is efficiently executing deep models in resource-constrained environments, such as mobile or embedded devices. This paper focuses on this problem, and proposes two new compression methods, which jointly leverage weight quantization and distillation of larger teacher networks into smaller student networks. The first method we propose is called quantized distillation and leverages distillation during the training process, by incorporating distillation loss, expressed with respect to the teacher, into the training of a student network whose weights are quantized to a limited set of levels. The second method, differentiable quantization, optimizes the location of quantization points through stochastic gradient descent, to better fit the behavior of the teacher model. We validate both methods through experiments on convolutional and recurrent architectures. We show that quantized shallow students can reach similar accuracy levels to full-precision teacher models, while providing order of magnitude compression, and inference speedup that is linear in the depth reduction. In sum, our results enable DNNs for resource-constrained environments to leverage architecture and accuracy advances developed on more powerful devices."}]}]