@inproceedings{8571, abstract = {We present the results of a friendly competition for formal verification of continuous and hybrid systems with nonlinear continuous dynamics. The friendly competition took place as part of the workshop Applied Verification for Continuous and Hybrid Systems (ARCH) in 2020. This year, 6 tools Ariadne, CORA, DynIbex, Flow*, Isabelle/HOL, and JuliaReach (in alphabetic order) participated. These tools are applied to solve reachability analysis problems on six benchmark problems, two of them featuring hybrid dynamics. We do not rank the tools based on the results, but show the current status and discover the potential advantages of different tools.}, author = {Geretti, Luca and Alexandre Dit Sandretto, Julien and Althoff, Matthias and Benet, Luis and Chapoutot, Alexandre and Chen, Xin and Collins, Pieter and Forets, Marcelo and Freire, Daniel and Immler, Fabian and Kochdumper, Niklas and Sanders, David and Schilling, Christian}, booktitle = {EPiC Series in Computing}, pages = {49--75}, publisher = {EasyChair}, title = {{ARCH-COMP20 Category Report: Continuous and hybrid systems with nonlinear dynamics}}, doi = {10.29007/zkf6}, volume = {74}, year = {2020}, } @inproceedings{8600, abstract = {A vector addition system with states (VASS) consists of a finite set of states and counters. A transition changes the current state to the next state, and every counter is either incremented, or decremented, or left unchanged. A state and value for each counter is a configuration; and a computation is an infinite sequence of configurations with transitions between successive configurations. A probabilistic VASS consists of a VASS along with a probability distribution over the transitions for each state. Qualitative properties such as state and configuration reachability have been widely studied for VASS. In this work we consider multi-dimensional long-run average objectives for VASS and probabilistic VASS. For a counter, the cost of a configuration is the value of the counter; and the long-run average value of a computation for the counter is the long-run average of the costs of the configurations in the computation. The multi-dimensional long-run average problem given a VASS and a threshold value for each counter, asks whether there is a computation such that for each counter the long-run average value for the counter does not exceed the respective threshold. For probabilistic VASS, instead of the existence of a computation, we consider whether the expected long-run average value for each counter does not exceed the respective threshold. Our main results are as follows: we show that the multi-dimensional long-run average problem (a) is NP-complete for integer-valued VASS; (b) is undecidable for natural-valued VASS (i.e., nonnegative counters); and (c) can be solved in polynomial time for probabilistic integer-valued VASS, and probabilistic natural-valued VASS when all computations are non-terminating.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, booktitle = {31st International Conference on Concurrency Theory}, isbn = {9783959771603}, issn = {18688969}, location = {Virtual}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Multi-dimensional long-run average problems for vector addition systems with states}}, doi = {10.4230/LIPIcs.CONCUR.2020.23}, volume = {171}, year = {2020}, } @inproceedings{8599, abstract = {A graph game is a two-player zero-sum game in which the players move a token throughout a graph to produce an infinite path, which determines the winner or payoff of the game. In bidding games, both players have budgets, and in each turn, we hold an "auction" (bidding) to determine which player moves the token. In this survey, we consider several bidding mechanisms and study their effect on the properties of the game. Specifically, bidding games, and in particular bidding games of infinite duration, have an intriguing equivalence with random-turn games in which in each turn, the player who moves is chosen randomly. We show how minor changes in the bidding mechanism lead to unexpected differences in the equivalence with random-turn games.}, author = {Avni, Guy and Henzinger, Thomas A}, booktitle = {31st International Conference on Concurrency Theory}, isbn = {9783959771603}, issn = {18688969}, location = {Virtual}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{A survey of bidding games on graphs}}, doi = {10.4230/LIPIcs.CONCUR.2020.2}, volume = {171}, year = {2020}, } @inproceedings{9040, abstract = {Machine learning and formal methods have complimentary benefits and drawbacks. In this work, we address the controller-design problem with a combination of techniques from both fields. The use of black-box neural networks in deep reinforcement learning (deep RL) poses a challenge for such a combination. Instead of reasoning formally about the output of deep RL, which we call the wizard, we extract from it a decision-tree based model, which we refer to as the magic book. Using the extracted model as an intermediary, we are able to handle problems that are infeasible for either deep RL or formal methods by themselves. First, we suggest, for the first time, a synthesis procedure that is based on a magic book. We synthesize a stand-alone correct-by-design controller that enjoys the favorable performance of RL. Second, we incorporate a magic book in a bounded model checking (BMC) procedure. BMC allows us to find numerous traces of the plant under the control of the wizard, which a user can use to increase the trustworthiness of the wizard and direct further training.}, author = {Alamdari, Par Alizadeh and Avni, Guy and Henzinger, Thomas A and Lukina, Anna}, booktitle = {Proceedings of the 20th Conference on Formal Methods in Computer-Aided Design}, isbn = {9783854480426}, issn = {2708-7824}, location = {Online Conference}, pages = {138--147}, publisher = {TU Wien Academic Press}, title = {{Formal methods with a touch of magic}}, doi = {10.34727/2020/isbn.978-3-85448-042-6_21}, year = {2020}, } @inproceedings{9632, abstract = {Second-order information, in the form of Hessian- or Inverse-Hessian-vector products, is a fundamental tool for solving optimization problems. Recently, there has been significant interest in utilizing this information in the context of deep neural networks; however, relatively little is known about the quality of existing approximations in this context. Our work examines this question, identifies issues with existing approaches, and proposes a method called WoodFisher to compute a faithful and efficient estimate of the inverse Hessian. Our main application is to neural network compression, where we build on the classic Optimal Brain Damage/Surgeon framework. We demonstrate that WoodFisher significantly outperforms popular state-of-the-art methods for oneshot pruning. Further, even when iterative, gradual pruning is allowed, our method results in a gain in test accuracy over the state-of-the-art approaches, for standard image classification datasets such as ImageNet ILSVRC. We examine how our method can be extended to take into account first-order information, as well as illustrate its ability to automatically set layer-wise pruning thresholds and perform compression in the limited-data regime. The code is available at the following link, https://github.com/IST-DASLab/WoodFisher.}, author = {Singh, Sidak Pal and Alistarh, Dan-Adrian}, booktitle = {Advances in Neural Information Processing Systems}, isbn = {9781713829546}, issn = {10495258}, location = {Vancouver, Canada}, pages = {18098--18109}, publisher = {Curran Associates}, title = {{WoodFisher: Efficient second-order approximation for neural network compression}}, volume = {33}, year = {2020}, } @inproceedings{9103, abstract = {We introduce LRT-NG, a set of techniques and an associated toolset that computes a reachtube (an over-approximation of the set of reachable states over a given time horizon) of a nonlinear dynamical system. LRT-NG significantly advances the state-of-the-art Langrangian Reachability and its associated tool LRT. From a theoretical perspective, LRT-NG is superior to LRT in three ways. First, it uses for the first time an analytically computed metric for the propagated ball which is proven to minimize the ball’s volume. We emphasize that the metric computation is the centerpiece of all bloating-based techniques. Secondly, it computes the next reachset as the intersection of two balls: one based on the Cartesian metric and the other on the new metric. While the two metrics were previously considered opposing approaches, their joint use considerably tightens the reachtubes. Thirdly, it avoids the "wrapping effect" associated with the validated integration of the center of the reachset, by optimally absorbing the interval approximation in the radius of the next ball. From a tool-development perspective, LRT-NG is superior to LRT in two ways. First, it is a standalone tool that no longer relies on CAPD. This required the implementation of the Lohner method and a Runge-Kutta time-propagation method. Secondly, it has an improved interface, allowing the input model and initial conditions to be provided as external input files. Our experiments on a comprehensive set of benchmarks, including two Neural ODEs, demonstrates LRT-NG’s superior performance compared to LRT, CAPD, and Flow*.}, author = {Gruenbacher, Sophie and Cyranka, Jacek and Lechner, Mathias and Islam, Md Ariful and Smolka, Scott A. and Grosu, Radu}, booktitle = {Proceedings of the 59th IEEE Conference on Decision and Control}, isbn = {9781728174471}, issn = {07431546}, location = {Jeju Islang, Korea (South)}, pages = {1556--1563}, publisher = {IEEE}, title = {{Lagrangian reachtubes: The next generation}}, doi = {10.1109/CDC42340.2020.9304042}, volume = {2020}, year = {2020}, } @inproceedings{10672, abstract = {The family of feedback alignment (FA) algorithms aims to provide a more biologically motivated alternative to backpropagation (BP), by substituting the computations that are unrealistic to be implemented in physical brains. While FA algorithms have been shown to work well in practice, there is a lack of rigorous theory proofing their learning capabilities. Here we introduce the first feedback alignment algorithm with provable learning guarantees. In contrast to existing work, we do not require any assumption about the size or depth of the network except that it has a single output neuron, i.e., such as for binary classification tasks. We show that our FA algorithm can deliver its theoretical promises in practice, surpassing the learning performance of existing FA methods and matching backpropagation in binary classification tasks. Finally, we demonstrate the limits of our FA variant when the number of output neurons grows beyond a certain quantity.}, author = {Lechner, Mathias}, booktitle = {8th International Conference on Learning Representations}, location = {Virtual ; Addis Ababa, Ethiopia}, publisher = {ICLR}, title = {{Learning representations for binary-classification without backpropagation}}, year = {2020}, } @inproceedings{7808, abstract = {Quantization converts neural networks into low-bit fixed-point computations which can be carried out by efficient integer-only hardware, and is standard practice for the deployment of neural networks on real-time embedded devices. However, like their real-numbered counterpart, quantized networks are not immune to malicious misclassification caused by adversarial attacks. We investigate how quantization affects a network’s robustness to adversarial attacks, which is a formal verification question. We show that neither robustness nor non-robustness are monotonic with changing the number of bits for the representation and, also, neither are preserved by quantization from a real-numbered network. For this reason, we introduce a verification method for quantized neural networks which, using SMT solving over bit-vectors, accounts for their exact, bit-precise semantics. We built a tool and analyzed the effect of quantization on a classifier for the MNIST dataset. We demonstrate that, compared to our method, existing methods for the analysis of real-numbered networks often derive false conclusions about their quantizations, both when determining robustness and when detecting attacks, and that existing methods for quantized networks often miss attacks. Furthermore, we applied our method beyond robustness, showing how the number of bits in quantization enlarges the gender bias of a predictor for students’ grades.}, author = {Giacobbe, Mirco and Henzinger, Thomas A and Lechner, Mathias}, booktitle = {International Conference on Tools and Algorithms for the Construction and Analysis of Systems}, isbn = {9783030452360}, issn = {16113349}, location = {Dublin, Ireland}, pages = {79--97}, publisher = {Springer Nature}, title = {{How many bits does it take to quantize your neural network?}}, doi = {10.1007/978-3-030-45237-7_5}, volume = {12079}, year = {2020}, } @article{6761, abstract = {In resource allocation games, selfish players share resources that are needed in order to fulfill their objectives. The cost of using a resource depends on the load on it. In the traditional setting, the players make their choices concurrently and in one-shot. That is, a strategy for a player is a subset of the resources. We introduce and study dynamic resource allocation games. In this setting, the game proceeds in phases. In each phase each player chooses one resource. A scheduler dictates the order in which the players proceed in a phase, possibly scheduling several players to proceed concurrently. The game ends when each player has collected a set of resources that fulfills his objective. The cost for each player then depends on this set as well as on the load on the resources in it – we consider both congestion and cost-sharing games. We argue that the dynamic setting is the suitable setting for many applications in practice. We study the stability of dynamic resource allocation games, where the appropriate notion of stability is that of subgame perfect equilibrium, study the inefficiency incurred due to selfish behavior, and also study problems that are particular to the dynamic setting, like constraints on the order in which resources can be chosen or the problem of finding a scheduler that achieves stability.}, author = {Avni, Guy and Henzinger, Thomas A and Kupferman, Orna}, issn = {03043975}, journal = {Theoretical Computer Science}, pages = {42--55}, publisher = {Elsevier}, title = {{Dynamic resource allocation games}}, doi = {10.1016/j.tcs.2019.06.031}, volume = {807}, year = {2020}, } @inproceedings{7505, abstract = {Neural networks have demonstrated unmatched performance in a range of classification tasks. Despite numerous efforts of the research community, novelty detection remains one of the significant limitations of neural networks. The ability to identify previously unseen inputs as novel is crucial for our understanding of the decisions made by neural networks. At runtime, inputs not falling into any of the categories learned during training cannot be classified correctly by the neural network. Existing approaches treat the neural network as a black box and try to detect novel inputs based on the confidence of the output predictions. However, neural networks are not trained to reduce their confidence for novel inputs, which limits the effectiveness of these approaches. We propose a framework to monitor a neural network by observing the hidden layers. We employ a common abstraction from program analysis - boxes - to identify novel behaviors in the monitored layers, i.e., inputs that cause behaviors outside the box. For each neuron, the boxes range over the values seen in training. The framework is efficient and flexible to achieve a desired trade-off between raising false warnings and detecting novel inputs. We illustrate the performance and the robustness to variability in the unknown classes on popular image-classification benchmarks.}, author = {Henzinger, Thomas A and Lukina, Anna and Schilling, Christian}, booktitle = {24th European Conference on Artificial Intelligence}, location = {Santiago de Compostela, Spain}, pages = {2433--2440}, publisher = {IOS Press}, title = {{Outside the box: Abstraction-based monitoring of neural networks}}, doi = {10.3233/FAIA200375}, volume = {325}, year = {2020}, }