[{"file_date_updated":"2022-01-26T07:38:08Z","year":"2021","acknowledgement":"The authors would like to thank the reviewers for their insightful comments. RH and RG were partially supported by\r\nHorizon-2020 ECSEL Project grant No. 783163 (iDev40). RH was partially supported by Boeing. ML was supported\r\nin part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award). SG was funded by FWF\r\nproject W1255-N23. JC was partially supported by NAWA Polish Returns grant PPN/PPO/2018/1/00029. SS was supported by NSF awards DCL-2040599, CCF-1918225, and CPS-1446832.\r\n","publication_status":"published","publisher":"AAAI Press","department":[{"_id":"GradSch"},{"_id":"ToHe"}],"author":[{"full_name":"Grunbacher, Sophie","first_name":"Sophie","last_name":"Grunbacher"},{"last_name":"Hasani","first_name":"Ramin","full_name":"Hasani, Ramin"},{"last_name":"Lechner","first_name":"Mathias","id":"3DC22916-F248-11E8-B48F-1D18A9856A87","full_name":"Lechner, Mathias"},{"first_name":"Jacek","last_name":"Cyranka","full_name":"Cyranka, Jacek"},{"full_name":"Smolka, Scott A","first_name":"Scott A","last_name":"Smolka"},{"full_name":"Grosu, Radu","last_name":"Grosu","first_name":"Radu"}],"date_created":"2022-01-25T15:47:20Z","date_updated":"2022-05-24T06:33:14Z","volume":35,"month":"05","publication_identifier":{"eissn":["2374-3468"],"isbn":["978-1-57735-866-4"],"issn":["2159-5399"]},"oa":1,"main_file_link":[{"url":"https://ojs.aaai.org/index.php/AAAI/article/view/17372","open_access":"1"}],"external_id":{"arxiv":["2012.08863"]},"quality_controlled":"1","project":[{"grant_number":"Z211","_id":"25F42A32-B435-11E9-9278-68D0E5697425","name":"The Wittgenstein Prize","call_identifier":"FWF"}],"conference":{"start_date":"2021-02-02","location":"Virtual","end_date":"2021-02-09","name":"AAAI: Association for the Advancement of Artificial Intelligence"},"language":[{"iso":"eng"}],"type":"conference","alternative_title":["Technical Tracks"],"abstract":[{"text":"We show that Neural ODEs, an emerging class of timecontinuous neural networks, can be verified by solving a set of global-optimization problems. For this purpose, we introduce Stochastic Lagrangian Reachability (SLR), an\r\nabstraction-based technique for constructing a tight Reachtube (an over-approximation of the set of reachable states\r\nover a given time-horizon), and provide stochastic guarantees in the form of confidence intervals for the Reachtube bounds. SLR inherently avoids the infamous wrapping effect (accumulation of over-approximation errors) by performing local optimization steps to expand safe regions instead of repeatedly forward-propagating them as is done by deterministic reachability methods. To enable fast local optimizations, we introduce a novel forward-mode adjoint sensitivity method to compute gradients without the need for backpropagation. Finally, we establish asymptotic and non-asymptotic convergence rates for SLR.","lang":"eng"}],"issue":"13","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"10669","title":"On the verification of neural ODEs with stochastic guarantees","status":"public","ddc":["000"],"intvolume":" 35","oa_version":"Published Version","file":[{"checksum":"468d07041e282a1d46ffdae92f709630","success":1,"date_updated":"2022-01-26T07:38:08Z","date_created":"2022-01-26T07:38:08Z","relation":"main_file","file_id":"10680","content_type":"application/pdf","file_size":286906,"creator":"mlechner","access_level":"open_access","file_name":"17372-Article Text-20866-1-2-20210518.pdf"}],"day":"28","has_accepted_license":"1","article_processing_charge":"No","publication":"Proceedings of the AAAI Conference on Artificial Intelligence","citation":{"chicago":"Grunbacher, Sophie, Ramin Hasani, Mathias Lechner, Jacek Cyranka, Scott A Smolka, and Radu Grosu. “On the Verification of Neural ODEs with Stochastic Guarantees.” In Proceedings of the AAAI Conference on Artificial Intelligence, 35:11525–35. AAAI Press, 2021.","mla":"Grunbacher, Sophie, et al. “On the Verification of Neural ODEs with Stochastic Guarantees.” Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, no. 13, AAAI Press, 2021, pp. 11525–35.","short":"S. Grunbacher, R. Hasani, M. Lechner, J. Cyranka, S.A. Smolka, R. Grosu, in:, Proceedings of the AAAI Conference on Artificial Intelligence, AAAI Press, 2021, pp. 11525–11535.","ista":"Grunbacher S, Hasani R, Lechner M, Cyranka J, Smolka SA, Grosu R. 2021. On the verification of neural ODEs with stochastic guarantees. Proceedings of the AAAI Conference on Artificial Intelligence. AAAI: Association for the Advancement of Artificial Intelligence, Technical Tracks, vol. 35, 11525–11535.","ieee":"S. Grunbacher, R. Hasani, M. Lechner, J. Cyranka, S. A. Smolka, and R. Grosu, “On the verification of neural ODEs with stochastic guarantees,” in Proceedings of the AAAI Conference on Artificial Intelligence, Virtual, 2021, vol. 35, no. 13, pp. 11525–11535.","apa":"Grunbacher, S., Hasani, R., Lechner, M., Cyranka, J., Smolka, S. A., & Grosu, R. (2021). On the verification of neural ODEs with stochastic guarantees. In Proceedings of the AAAI Conference on Artificial Intelligence (Vol. 35, pp. 11525–11535). Virtual: AAAI Press.","ama":"Grunbacher S, Hasani R, Lechner M, Cyranka J, Smolka SA, Grosu R. On the verification of neural ODEs with stochastic guarantees. In: Proceedings of the AAAI Conference on Artificial Intelligence. Vol 35. AAAI Press; 2021:11525-11535."},"page":"11525-11535","date_published":"2021-05-28T00:00:00Z"},{"publication_identifier":{"issn":["2159-5399"],"isbn":["978-1-57735-866-4"],"eissn":["2374-3468"]},"month":"05","project":[{"call_identifier":"FWF","name":"The Wittgenstein Prize","_id":"25F42A32-B435-11E9-9278-68D0E5697425","grant_number":"Z211"}],"quality_controlled":"1","main_file_link":[{"open_access":"1","url":"https://ojs.aaai.org/index.php/AAAI/article/view/16936"}],"oa":1,"external_id":{"arxiv":["2006.04439"]},"language":[{"iso":"eng"}],"conference":{"name":"AAAI: Association for the Advancement of Artificial Intelligence","start_date":"2021-02-02","location":"Virtual","end_date":"2021-02-09"},"file_date_updated":"2022-01-26T07:36:03Z","publisher":"AAAI Press","department":[{"_id":"GradSch"},{"_id":"ToHe"}],"publication_status":"published","year":"2021","acknowledgement":"R.H. and D.R. are partially supported by Boeing. R.H. and R.G. were partially supported by the Horizon-2020 ECSEL\r\nProject grant No. 783163 (iDev40). M.L. was supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award). A.A. is supported by the National Science Foundation (NSF) Graduate Research Fellowship Program. This research work is partially drawn from the PhD dissertation of R.H.","volume":35,"date_updated":"2022-05-24T06:36:54Z","date_created":"2022-01-25T15:48:36Z","author":[{"first_name":"Ramin","last_name":"Hasani","full_name":"Hasani, Ramin"},{"full_name":"Lechner, Mathias","id":"3DC22916-F248-11E8-B48F-1D18A9856A87","last_name":"Lechner","first_name":"Mathias"},{"full_name":"Amini, Alexander","first_name":"Alexander","last_name":"Amini"},{"full_name":"Rus, Daniela","last_name":"Rus","first_name":"Daniela"},{"full_name":"Grosu, Radu","first_name":"Radu","last_name":"Grosu"}],"article_processing_charge":"No","has_accepted_license":"1","day":"28","page":"7657-7666","citation":{"short":"R. Hasani, M. Lechner, A. Amini, D. Rus, R. Grosu, in:, Proceedings of the AAAI Conference on Artificial Intelligence, AAAI Press, 2021, pp. 7657–7666.","mla":"Hasani, Ramin, et al. “Liquid Time-Constant Networks.” Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, no. 9, AAAI Press, 2021, pp. 7657–66.","chicago":"Hasani, Ramin, Mathias Lechner, Alexander Amini, Daniela Rus, and Radu Grosu. “Liquid Time-Constant Networks.” In Proceedings of the AAAI Conference on Artificial Intelligence, 35:7657–66. AAAI Press, 2021.","ama":"Hasani R, Lechner M, Amini A, Rus D, Grosu R. Liquid time-constant networks. In: Proceedings of the AAAI Conference on Artificial Intelligence. Vol 35. AAAI Press; 2021:7657-7666.","ieee":"R. Hasani, M. Lechner, A. Amini, D. Rus, and R. Grosu, “Liquid time-constant networks,” in Proceedings of the AAAI Conference on Artificial Intelligence, Virtual, 2021, vol. 35, no. 9, pp. 7657–7666.","apa":"Hasani, R., Lechner, M., Amini, A., Rus, D., & Grosu, R. (2021). Liquid time-constant networks. In Proceedings of the AAAI Conference on Artificial Intelligence (Vol. 35, pp. 7657–7666). Virtual: AAAI Press.","ista":"Hasani R, Lechner M, Amini A, Rus D, Grosu R. 2021. Liquid time-constant networks. Proceedings of the AAAI Conference on Artificial Intelligence. AAAI: Association for the Advancement of Artificial Intelligence, Technical Tracks, vol. 35, 7657–7666."},"publication":"Proceedings of the AAAI Conference on Artificial Intelligence","date_published":"2021-05-28T00:00:00Z","alternative_title":["Technical Tracks"],"type":"conference","issue":"9","abstract":[{"lang":"eng","text":"We introduce a new class of time-continuous recurrent neural network models. Instead of declaring a learning system’s dynamics by implicit nonlinearities, we construct networks of linear first-order dynamical systems modulated via nonlinear interlinked gates. The resulting models represent dynamical systems with varying (i.e., liquid) time-constants coupled to their hidden state, with outputs being computed by numerical differential equation solvers. These neural networks exhibit stable and bounded behavior, yield superior expressivity within the family of neural ordinary differential equations, and give rise to improved performance on time-series prediction tasks. To demonstrate these properties, we first take a theoretical approach to find bounds over their dynamics, and compute their expressive power by the trajectory length measure in a latent trajectory space. We then conduct a series of time-series prediction experiments to manifest the approximation capability of Liquid Time-Constant Networks (LTCs) compared to classical and modern RNNs."}],"intvolume":" 35","ddc":["000"],"title":"Liquid time-constant networks","status":"public","_id":"10671","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Published Version","file":[{"content_type":"application/pdf","file_size":4302669,"creator":"mlechner","file_name":"16936-Article Text-20430-1-2-20210518 (1).pdf","access_level":"open_access","date_updated":"2022-01-26T07:36:03Z","date_created":"2022-01-26T07:36:03Z","checksum":"0f06995fba06dbcfa7ed965fc66027ff","success":1,"relation":"main_file","file_id":"10678"}]},{"date_published":"2021-05-28T00:00:00Z","publication":"Proceedings of the AAAI Conference on Artificial Intelligence","citation":{"ista":"Henzinger TA, Lechner M, Zikelic D. 2021. Scalable verification of quantized neural networks. Proceedings of the AAAI Conference on Artificial Intelligence. AAAI: Association for the Advancement of Artificial Intelligence, Technical Tracks, vol. 35, 3787–3795.","ieee":"T. A. Henzinger, M. Lechner, and D. Zikelic, “Scalable verification of quantized neural networks,” in Proceedings of the AAAI Conference on Artificial Intelligence, Virtual, 2021, vol. 35, no. 5A, pp. 3787–3795.","apa":"Henzinger, T. A., Lechner, M., & Zikelic, D. (2021). Scalable verification of quantized neural networks. In Proceedings of the AAAI Conference on Artificial Intelligence (Vol. 35, pp. 3787–3795). Virtual: AAAI Press.","ama":"Henzinger TA, Lechner M, Zikelic D. Scalable verification of quantized neural networks. In: Proceedings of the AAAI Conference on Artificial Intelligence. Vol 35. AAAI Press; 2021:3787-3795.","chicago":"Henzinger, Thomas A, Mathias Lechner, and Dorde Zikelic. “Scalable Verification of Quantized Neural Networks.” In Proceedings of the AAAI Conference on Artificial Intelligence, 35:3787–95. AAAI Press, 2021.","mla":"Henzinger, Thomas A., et al. “Scalable Verification of Quantized Neural Networks.” Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, no. 5A, AAAI Press, 2021, pp. 3787–95.","short":"T.A. Henzinger, M. Lechner, D. Zikelic, in:, Proceedings of the AAAI Conference on Artificial Intelligence, AAAI Press, 2021, pp. 3787–3795."},"page":"3787-3795","day":"28","article_processing_charge":"No","has_accepted_license":"1","scopus_import":"1","file":[{"access_level":"open_access","file_name":"16496-Article Text-19990-1-2-20210518 (1).pdf","content_type":"application/pdf","file_size":137235,"creator":"mlechner","relation":"main_file","file_id":"10684","checksum":"2bc8155b2526a70fba5b7301bc89dbd1","success":1,"date_created":"2022-01-26T07:41:16Z","date_updated":"2022-01-26T07:41:16Z"}],"oa_version":"Published Version","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"10665","status":"public","ddc":["000"],"title":"Scalable verification of quantized neural networks","intvolume":" 35","abstract":[{"lang":"eng","text":"Formal verification of neural networks is an active topic of research, and recent advances have significantly increased the size of the networks that verification tools can handle. However, most methods are designed for verification of an idealized model of the actual network which works over real arithmetic and ignores rounding imprecisions. This idealization is in stark contrast to network quantization, which is a technique that trades numerical precision for computational efficiency and is, therefore, often applied in practice. Neglecting rounding errors of such low-bit quantized neural networks has been shown to lead to wrong conclusions about the network’s correctness. Thus, the desired approach for verifying quantized neural networks would be one that takes these rounding errors\r\ninto account. In this paper, we show that verifying the bitexact implementation of quantized neural networks with bitvector specifications is PSPACE-hard, even though verifying idealized real-valued networks and satisfiability of bit-vector specifications alone are each in NP. Furthermore, we explore several practical heuristics toward closing the complexity gap between idealized and bit-exact verification. In particular, we propose three techniques for making SMT-based verification of quantized neural networks more scalable. Our experiments demonstrate that our proposed methods allow a speedup of up to three orders of magnitude over existing approaches."}],"issue":"5A","type":"conference","alternative_title":["Technical Tracks"],"conference":{"end_date":"2021-02-09","location":"Virtual","start_date":"2021-02-02","name":"AAAI: Association for the Advancement of Artificial Intelligence"},"language":[{"iso":"eng"}],"external_id":{"arxiv":["2012.08185"]},"oa":1,"main_file_link":[{"open_access":"1","url":"https://ojs.aaai.org/index.php/AAAI/article/view/16496"}],"quality_controlled":"1","project":[{"_id":"2564DBCA-B435-11E9-9278-68D0E5697425","grant_number":"665385","call_identifier":"H2020","name":"International IST Doctoral Program"},{"call_identifier":"FWF","name":"The Wittgenstein Prize","grant_number":"Z211","_id":"25F42A32-B435-11E9-9278-68D0E5697425"},{"call_identifier":"H2020","name":"Formal Methods for Stochastic Models: Algorithms and Applications","_id":"0599E47C-7A3F-11EA-A408-12923DDC885E","grant_number":"863818"}],"month":"05","publication_identifier":{"issn":["2159-5399"],"eissn":["2374-3468"],"isbn":["978-1-57735-866-4"]},"author":[{"full_name":"Henzinger, Thomas A","last_name":"Henzinger","first_name":"Thomas A","orcid":"0000-0002-2985-7724","id":"40876CD8-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Lechner, Mathias","id":"3DC22916-F248-11E8-B48F-1D18A9856A87","first_name":"Mathias","last_name":"Lechner"},{"full_name":"Zikelic, Dorde","id":"294AA7A6-F248-11E8-B48F-1D18A9856A87","last_name":"Zikelic","first_name":"Dorde"}],"related_material":{"record":[{"status":"public","relation":"dissertation_contains","id":"11362"}]},"date_created":"2022-01-25T15:15:02Z","date_updated":"2023-06-23T07:01:11Z","volume":35,"acknowledgement":"This research was supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein\r\nAward), ERC CoG 863818 (FoRM-SMArt), and the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie Grant Agreement No. 665385.\r\n","year":"2021","publication_status":"published","department":[{"_id":"GradSch"},{"_id":"ToHe"}],"publisher":"AAAI Press","file_date_updated":"2022-01-26T07:41:16Z","ec_funded":1}]