@inproceedings{10669, abstract = {We show that Neural ODEs, an emerging class of timecontinuous neural networks, can be verified by solving a set of global-optimization problems. For this purpose, we introduce Stochastic Lagrangian Reachability (SLR), an abstraction-based technique for constructing a tight Reachtube (an over-approximation of the set of reachable states over a given time-horizon), and provide stochastic guarantees in the form of confidence intervals for the Reachtube bounds. SLR inherently avoids the infamous wrapping effect (accumulation of over-approximation errors) by performing local optimization steps to expand safe regions instead of repeatedly forward-propagating them as is done by deterministic reachability methods. To enable fast local optimizations, we introduce a novel forward-mode adjoint sensitivity method to compute gradients without the need for backpropagation. Finally, we establish asymptotic and non-asymptotic convergence rates for SLR.}, author = {Grunbacher, Sophie and Hasani, Ramin and Lechner, Mathias and Cyranka, Jacek and Smolka, Scott A and Grosu, Radu}, booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence}, isbn = {978-1-57735-866-4}, issn = {2374-3468}, location = {Virtual}, number = {13}, pages = {11525--11535}, publisher = {AAAI Press}, title = {{On the verification of neural ODEs with stochastic guarantees}}, volume = {35}, year = {2021}, } @inproceedings{10671, abstract = {We introduce a new class of time-continuous recurrent neural network models. Instead of declaring a learning system’s dynamics by implicit nonlinearities, we construct networks of linear first-order dynamical systems modulated via nonlinear interlinked gates. The resulting models represent dynamical systems with varying (i.e., liquid) time-constants coupled to their hidden state, with outputs being computed by numerical differential equation solvers. These neural networks exhibit stable and bounded behavior, yield superior expressivity within the family of neural ordinary differential equations, and give rise to improved performance on time-series prediction tasks. To demonstrate these properties, we first take a theoretical approach to find bounds over their dynamics, and compute their expressive power by the trajectory length measure in a latent trajectory space. We then conduct a series of time-series prediction experiments to manifest the approximation capability of Liquid Time-Constant Networks (LTCs) compared to classical and modern RNNs.}, author = {Hasani, Ramin and Lechner, Mathias and Amini, Alexander and Rus, Daniela and Grosu, Radu}, booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence}, isbn = {978-1-57735-866-4}, issn = {2374-3468}, location = {Virtual}, number = {9}, pages = {7657--7666}, publisher = {AAAI Press}, title = {{Liquid time-constant networks}}, volume = {35}, year = {2021}, } @inproceedings{10665, abstract = {Formal verification of neural networks is an active topic of research, and recent advances have significantly increased the size of the networks that verification tools can handle. However, most methods are designed for verification of an idealized model of the actual network which works over real arithmetic and ignores rounding imprecisions. This idealization is in stark contrast to network quantization, which is a technique that trades numerical precision for computational efficiency and is, therefore, often applied in practice. Neglecting rounding errors of such low-bit quantized neural networks has been shown to lead to wrong conclusions about the network’s correctness. Thus, the desired approach for verifying quantized neural networks would be one that takes these rounding errors into account. In this paper, we show that verifying the bitexact implementation of quantized neural networks with bitvector specifications is PSPACE-hard, even though verifying idealized real-valued networks and satisfiability of bit-vector specifications alone are each in NP. Furthermore, we explore several practical heuristics toward closing the complexity gap between idealized and bit-exact verification. In particular, we propose three techniques for making SMT-based verification of quantized neural networks more scalable. Our experiments demonstrate that our proposed methods allow a speedup of up to three orders of magnitude over existing approaches.}, author = {Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde}, booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence}, isbn = {978-1-57735-866-4}, issn = {2374-3468}, location = {Virtual}, number = {5A}, pages = {3787--3795}, publisher = {AAAI Press}, title = {{Scalable verification of quantized neural networks}}, volume = {35}, year = {2021}, }