@inproceedings{12467, abstract = {Safety and liveness are elementary concepts of computation, and the foundation of many verification paradigms. The safety-liveness classification of boolean properties characterizes whether a given property can be falsified by observing a finite prefix of an infinite computation trace (always for safety, never for liveness). In quantitative specification and verification, properties assign not truth values, but quantitative values to infinite traces (e.g., a cost, or the distance to a boolean property). We introduce quantitative safety and liveness, and we prove that our definitions induce conservative quantitative generalizations of both (1)~the safety-progress hierarchy of boolean properties and (2)~the safety-liveness decomposition of boolean properties. In particular, we show that every quantitative property can be written as the pointwise minimum of a quantitative safety property and a quantitative liveness property. Consequently, like boolean properties, also quantitative properties can be min-decomposed into safety and liveness parts, or alternatively, max-decomposed into co-safety and co-liveness parts. Moreover, quantitative properties can be approximated naturally. We prove that every quantitative property that has both safe and co-safe approximations can be monitored arbitrarily precisely by a monitor that uses only a finite number of states.}, author = {Henzinger, Thomas A and Mazzocchi, Nicolas Adrien and Sarac, Naci E}, booktitle = {26th International Conference Foundations of Software Science and Computation Structures}, isbn = {9783031308284}, issn = {1611-3349}, location = {Paris, France}, pages = {349--370}, publisher = {Springer Nature}, title = {{Quantitative safety and liveness}}, doi = {10.1007/978-3-031-30829-1_17}, volume = {13992}, year = {2023}, } @inproceedings{13292, abstract = {The operator precedence languages (OPLs) represent the largest known subclass of the context-free languages which enjoys all desirable closure and decidability properties. This includes the decidability of language inclusion, which is the ultimate verification problem. Operator precedence grammars, automata, and logics have been investigated and used, for example, to verify programs with arithmetic expressions and exceptions (both of which are deterministic pushdown but lie outside the scope of the visibly pushdown languages). In this paper, we complete the picture and give, for the first time, an algebraic characterization of the class of OPLs in the form of a syntactic congruence that has finitely many equivalence classes exactly for the operator precedence languages. This is a generalization of the celebrated Myhill-Nerode theorem for the regular languages to OPLs. As one of the consequences, we show that universality and language inclusion for nondeterministic operator precedence automata can be solved by an antichain algorithm. Antichain algorithms avoid determinization and complementation through an explicit subset construction, by leveraging a quasi-order on words, which allows the pruning of the search space for counterexample words without sacrificing completeness. Antichain algorithms can be implemented symbolically, and these implementations are today the best-performing algorithms in practice for the inclusion of finite automata. We give a generic construction of the quasi-order needed for antichain algorithms from a finite syntactic congruence. This yields the first antichain algorithm for OPLs, an algorithm that solves the ExpTime-hard language inclusion problem for OPLs in exponential time.}, author = {Henzinger, Thomas A and Kebis, Pavol and Mazzocchi, Nicolas Adrien and Sarac, Naci E}, booktitle = {50th International Colloquium on Automata, Languages, and Programming}, isbn = {9783959772785}, issn = {1868-8969}, location = {Paderborn, Germany}, pages = {129:1----129:20}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Regular methods for operator precedence languages}}, doi = {10.4230/LIPIcs.ICALP.2023.129}, volume = {261}, year = {2023}, } @article{12704, abstract = {Adversarial training (i.e., training on adversarially perturbed input data) is a well-studied method for making neural networks robust to potential adversarial attacks during inference. However, the improved robustness does not come for free but rather is accompanied by a decrease in overall model accuracy and performance. Recent work has shown that, in practical robot learning applications, the effects of adversarial training do not pose a fair trade-off but inflict a net loss when measured in holistic robot performance. This work revisits the robustness-accuracy trade-off in robot learning by systematically analyzing if recent advances in robust training methods and theory in conjunction with adversarial robot learning, are capable of making adversarial training suitable for real-world robot applications. We evaluate three different robot learning tasks ranging from autonomous driving in a high-fidelity environment amenable to sim-to-real deployment to mobile robot navigation and gesture recognition. Our results demonstrate that, while these techniques make incremental improvements on the trade-off on a relative scale, the negative impact on the nominal accuracy caused by adversarial training still outweighs the improved robustness by an order of magnitude. We conclude that although progress is happening, further advances in robust learning methods are necessary before they can benefit robot learning tasks in practice.}, author = {Lechner, Mathias and Amini, Alexander and Rus, Daniela and Henzinger, Thomas A}, issn = {2377-3766}, journal = {IEEE Robotics and Automation Letters}, number = {3}, pages = {1595--1602}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Revisiting the adversarial robustness-accuracy tradeoff in robot learning}}, doi = {10.1109/LRA.2023.3240930}, volume = {8}, year = {2023}, } @article{12876, abstract = {Motivation: The problem of model inference is of fundamental importance to systems biology. Logical models (e.g. Boolean networks; BNs) represent a computationally attractive approach capable of handling large biological networks. The models are typically inferred from experimental data. However, even with a substantial amount of experimental data supported by some prior knowledge, existing inference methods often focus on a small sample of admissible candidate models only. Results: We propose Boolean network sketches as a new formal instrument for the inference of Boolean networks. A sketch integrates (typically partial) knowledge about the network’s topology and the update logic (obtained through, e.g. a biological knowledge base or a literature search), as well as further assumptions about the properties of the network’s transitions (e.g. the form of its attractor landscape), and additional restrictions on the model dynamics given by the measured experimental data. Our new BNs inference algorithm starts with an ‘initial’ sketch, which is extended by adding restrictions representing experimental data to a ‘data-informed’ sketch and subsequently computes all BNs consistent with the data-informed sketch. Our algorithm is based on a symbolic representation and coloured model-checking. Our approach is unique in its ability to cover a broad spectrum of knowledge and efficiently produce a compact representation of all inferred BNs. We evaluate the method on a non-trivial collection of real-world and simulated data.}, author = {Beneš, Nikola and Brim, Luboš and Huvar, Ondřej and Pastva, Samuel and Šafránek, David}, issn = {1367-4811}, journal = {Bioinformatics}, number = {4}, publisher = {Oxford Academic}, title = {{Boolean network sketches: A unifying framework for logical model inference}}, doi = {10.1093/bioinformatics/btad158}, volume = {39}, year = {2023}, } @inproceedings{14242, abstract = {We study the problem of training and certifying adversarially robust quantized neural networks (QNNs). Quantization is a technique for making neural networks more efficient by running them using low-bit integer arithmetic and is therefore commonly adopted in industry. Recent work has shown that floating-point neural networks that have been verified to be robust can become vulnerable to adversarial attacks after quantization, and certification of the quantized representation is necessary to guarantee robustness. In this work, we present quantization-aware interval bound propagation (QA-IBP), a novel method for training robust QNNs. Inspired by advances in robust learning of non-quantized networks, our training algorithm computes the gradient of an abstract representation of the actual network. Unlike existing approaches, our method can handle the discrete semantics of QNNs. Based on QA-IBP, we also develop a complete verification procedure for verifying the adversarial robustness of QNNs, which is guaranteed to terminate and produce a correct answer. Compared to existing approaches, the key advantage of our verification procedure is that it runs entirely on GPU or other accelerator devices. We demonstrate experimentally that our approach significantly outperforms existing methods and establish the new state-of-the-art for training and certifying the robustness of QNNs.}, author = {Lechner, Mathias and Zikelic, Dorde and Chatterjee, Krishnendu and Henzinger, Thomas A and Rus, Daniela}, booktitle = {Proceedings of the 37th AAAI Conference on Artificial Intelligence}, isbn = {9781577358800}, location = {Washington, DC, United States}, number = {12}, pages = {14964--14973}, publisher = {Association for the Advancement of Artificial Intelligence}, title = {{Quantization-aware interval bound propagation for training certifiably robust quantized neural networks}}, doi = {10.1609/aaai.v37i12.26747}, volume = {37}, year = {2023}, } @inproceedings{14243, abstract = {Two-player zero-sum "graph games" are central in logic, verification, and multi-agent systems. The game proceeds by placing a token on a vertex of a graph, and allowing the players to move it to produce an infinite path, which determines the winner or payoff of the game. Traditionally, the players alternate turns in moving the token. In "bidding games", however, the players have budgets and in each turn, an auction (bidding) determines which player moves the token. So far, bidding games have only been studied as full-information games. In this work we initiate the study of partial-information bidding games: we study bidding games in which a player's initial budget is drawn from a known probability distribution. We show that while for some bidding mechanisms and objectives, it is straightforward to adapt the results from the full-information setting to the partial-information setting, for others, the analysis is significantly more challenging, requires new techniques, and gives rise to interesting results. Specifically, we study games with "mean-payoff" objectives in combination with "poorman" bidding. We construct optimal strategies for a partially-informed player who plays against a fully-informed adversary. We show that, somewhat surprisingly, the "value" under pure strategies does not necessarily exist in such games.}, author = {Avni, Guy and Jecker, Ismael R and Zikelic, Dorde}, booktitle = {Proceedings of the 37th AAAI Conference on Artificial Intelligence}, isbn = {9781577358800}, location = {Washington, DC, United States}, number = {5}, pages = {5464--5471}, title = {{Bidding graph games with partially-observable budgets}}, doi = {10.1609/aaai.v37i5.25679}, volume = {37}, year = {2023}, } @inproceedings{13310, abstract = {Machine-learned systems are in widespread use for making decisions about humans, and it is important that they are fair, i.e., not biased against individuals based on sensitive attributes. We present runtime verification of algorithmic fairness for systems whose models are unknown, but are assumed to have a Markov chain structure. We introduce a specification language that can model many common algorithmic fairness properties, such as demographic parity, equal opportunity, and social burden. We build monitors that observe a long sequence of events as generated by a given system, and output, after each observation, a quantitative estimate of how fair or biased the system was on that run until that point in time. The estimate is proven to be correct modulo a variable error bound and a given confidence level, where the error bound gets tighter as the observed sequence gets longer. Our monitors are of two types, and use, respectively, frequentist and Bayesian statistical inference techniques. While the frequentist monitors compute estimates that are objectively correct with respect to the ground truth, the Bayesian monitors compute estimates that are correct subject to a given prior belief about the system’s model. Using a prototype implementation, we show how we can monitor if a bank is fair in giving loans to applicants from different social backgrounds, and if a college is fair in admitting students while maintaining a reasonable financial burden on the society. Although they exhibit different theoretical complexities in certain cases, in our experiments, both frequentist and Bayesian monitors took less than a millisecond to update their verdicts after each observation.}, author = {Henzinger, Thomas A and Karimi, Mahyar and Kueffner, Konstantin and Mallik, Kaushik}, booktitle = {Computer Aided Verification}, isbn = {9783031377020}, issn = {1611-3349}, location = {Paris, France}, pages = {358–382}, publisher = {Springer Nature}, title = {{Monitoring algorithmic fairness}}, doi = {10.1007/978-3-031-37703-7_17}, volume = {13965}, year = {2023}, } @inproceedings{13221, abstract = {The safety-liveness dichotomy is a fundamental concept in formal languages which plays a key role in verification. Recently, this dichotomy has been lifted to quantitative properties, which are arbitrary functions from infinite words to partially-ordered domains. We look into harnessing the dichotomy for the specific classes of quantitative properties expressed by quantitative automata. These automata contain finitely many states and rational-valued transition weights, and their common value functions Inf, Sup, LimInf, LimSup, LimInfAvg, LimSupAvg, and DSum map infinite words into the totallyordered domain of real numbers. In this automata-theoretic setting, we establish a connection between quantitative safety and topological continuity and provide an alternative characterization of quantitative safety and liveness in terms of their boolean counterparts. For all common value functions, we show how the safety closure of a quantitative automaton can be constructed in PTime, and we provide PSpace-complete checks of whether a given quantitative automaton is safe or live, with the exception of LimInfAvg and LimSupAvg automata, for which the safety check is in ExpSpace. Moreover, for deterministic Sup, LimInf, and LimSup automata, we give PTime decompositions into safe and live automata. These decompositions enable the separation of techniques for safety and liveness verification for quantitative specifications.}, author = {Boker, Udi and Henzinger, Thomas A and Mazzocchi, Nicolas Adrien and Sarac, Naci E}, booktitle = {34th International Conference on Concurrency Theory}, isbn = {9783959772990}, issn = {1868-8969}, location = {Antwerp, Belgium}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Safety and liveness of quantitative automata}}, doi = {10.4230/LIPIcs.CONCUR.2023.17}, volume = {279}, year = {2023}, } @inproceedings{14405, abstract = {We introduce hypernode automata as a new specification formalism for hyperproperties of concurrent systems. They are finite automata with nodes labeled with hypernode logic formulas and transitions labeled with actions. A hypernode logic formula specifies relations between sequences of variable values in different system executions. Unlike HyperLTL, hypernode logic takes an asynchronous view on execution traces by constraining the values and the order of value changes of each variable without correlating the timing of the changes. Different execution traces are synchronized solely through the transitions of hypernode automata. Hypernode automata naturally combine asynchronicity at the node level with synchronicity at the transition level. We show that the model-checking problem for hypernode automata is decidable over action-labeled Kripke structures, whose actions induce transitions of the specification automata. For this reason, hypernode automaton is a suitable formalism for specifying and verifying asynchronous hyperproperties, such as declassifying observational determinism in multi-threaded programs.}, author = {Bartocci, Ezio and Henzinger, Thomas A and Nickovic, Dejan and Oliveira da Costa, Ana}, booktitle = {34th International Conference on Concurrency Theory}, isbn = {9783959772990}, issn = {18688969}, location = {Antwerp, Belgium}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Hypernode automata}}, doi = {10.4230/LIPIcs.CONCUR.2023.21}, volume = {279}, year = {2023}, } @inproceedings{14454, abstract = {As AI and machine-learned software are used increasingly for making decisions that affect humans, it is imperative that they remain fair and unbiased in their decisions. To complement design-time bias mitigation measures, runtime verification techniques have been introduced recently to monitor the algorithmic fairness of deployed systems. Previous monitoring techniques assume full observability of the states of the (unknown) monitored system. Moreover, they can monitor only fairness properties that are specified as arithmetic expressions over the probabilities of different events. In this work, we extend fairness monitoring to systems modeled as partially observed Markov chains (POMC), and to specifications containing arithmetic expressions over the expected values of numerical functions on event sequences. The only assumptions we make are that the underlying POMC is aperiodic and starts in the stationary distribution, with a bound on its mixing time being known. These assumptions enable us to estimate a given property for the entire distribution of possible executions of the monitored POMC, by observing only a single execution. Our monitors observe a long run of the system and, after each new observation, output updated PAC-estimates of how fair or biased the system is. The monitors are computationally lightweight and, using a prototype implementation, we demonstrate their effectiveness on several real-world examples.}, author = {Henzinger, Thomas A and Kueffner, Konstantin and Mallik, Kaushik}, booktitle = {23rd International Conference on Runtime Verification}, isbn = {9783031442667}, issn = {1611-3349}, location = {Thessaloniki, Greece}, pages = {291--311}, publisher = {Springer Nature}, title = {{Monitoring algorithmic fairness under partial observations}}, doi = {10.1007/978-3-031-44267-4_15}, volume = {14245}, year = {2023}, } @inproceedings{14518, abstract = {We consider bidding games, a class of two-player zero-sum graph games. The game proceeds as follows. Both players have bounded budgets. A token is placed on a vertex of a graph, in each turn the players simultaneously submit bids, and the higher bidder moves the token, where we break bidding ties in favor of Player 1. Player 1 wins the game iff the token visits a designated target vertex. We consider, for the first time, poorman discrete-bidding in which the granularity of the bids is restricted and the higher bid is paid to the bank. Previous work either did not impose granularity restrictions or considered Richman bidding (bids are paid to the opponent). While the latter mechanisms are technically more accessible, the former is more appealing from a practical standpoint. Our study focuses on threshold budgets, which is the necessary and sufficient initial budget required for Player 1 to ensure winning against a given Player 2 budget. We first show existence of thresholds. In DAGs, we show that threshold budgets can be approximated with error bounds by thresholds under continuous-bidding and that they exhibit a periodic behavior. We identify closed-form solutions in special cases. We implement and experiment with an algorithm to find threshold budgets.}, author = {Avni, Guy and Meggendorfer, Tobias and Sadhukhan, Suman and Tkadlec, Josef and Zikelic, Dorde}, booktitle = {Frontiers in Artificial Intelligence and Applications}, isbn = {9781643684369}, issn = {0922-6389}, location = {Krakow, Poland}, pages = {141--148}, publisher = {IOS Press}, title = {{Reachability poorman discrete-bidding games}}, doi = {10.3233/FAIA230264}, volume = {372}, year = {2023}, } @inproceedings{14559, abstract = {We consider the problem of learning control policies in discrete-time stochastic systems which guarantee that the system stabilizes within some specified stabilization region with probability 1. Our approach is based on the novel notion of stabilizing ranking supermartingales (sRSMs) that we introduce in this work. Our sRSMs overcome the limitation of methods proposed in previous works whose applicability is restricted to systems in which the stabilizing region cannot be left once entered under any control policy. We present a learning procedure that learns a control policy together with an sRSM that formally certifies probability 1 stability, both learned as neural networks. We show that this procedure can also be adapted to formally verifying that, under a given Lipschitz continuous control policy, the stochastic system stabilizes within some stabilizing region with probability 1. Our experimental evaluation shows that our learning procedure can successfully learn provably stabilizing policies in practice.}, author = {Ansaripour, Matin and Chatterjee, Krishnendu and Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde}, booktitle = {21st International Symposium on Automated Technology for Verification and Analysis}, isbn = {9783031453281}, issn = {1611-3349}, location = {Singapore, Singapore}, pages = {357--379}, publisher = {Springer Nature}, title = {{Learning provably stabilizing neural controllers for discrete-time stochastic systems}}, doi = {10.1007/978-3-031-45329-8_17}, volume = {14215}, year = {2023}, } @inproceedings{13228, abstract = {A machine-learned system that is fair in static decision-making tasks may have biased societal impacts in the long-run. This may happen when the system interacts with humans and feedback patterns emerge, reinforcing old biases in the system and creating new biases. While existing works try to identify and mitigate long-run biases through smart system design, we introduce techniques for monitoring fairness in real time. Our goal is to build and deploy a monitor that will continuously observe a long sequence of events generated by the system in the wild, and will output, with each event, a verdict on how fair the system is at the current point in time. The advantages of monitoring are two-fold. Firstly, fairness is evaluated at run-time, which is important because unfair behaviors may not be eliminated a priori, at design-time, due to partial knowledge about the system and the environment, as well as uncertainties and dynamic changes in the system and the environment, such as the unpredictability of human behavior. Secondly, monitors are by design oblivious to how the monitored system is constructed, which makes them suitable to be used as trusted third-party fairness watchdogs. They function as computationally lightweight statistical estimators, and their correctness proofs rely on the rigorous analysis of the stochastic process that models the assumptions about the underlying dynamics of the system. We show, both in theory and experiments, how monitors can warn us (1) if a bank’s credit policy over time has created an unfair distribution of credit scores among the population, and (2) if a resource allocator’s allocation policy over time has made unfair allocations. Our experiments demonstrate that the monitors introduce very low overhead. We believe that runtime monitoring is an important and mathematically rigorous new addition to the fairness toolbox.}, author = {Henzinger, Thomas A and Karimi, Mahyar and Kueffner, Konstantin and Mallik, Kaushik}, booktitle = {FAccT '23: Proceedings of the 2023 ACM Conference on Fairness, Accountability, and Transparency}, isbn = {9781450372527}, location = {Chicago, IL, United States}, pages = {604--614}, publisher = {Association for Computing Machinery}, title = {{Runtime monitoring of dynamic fairness properties}}, doi = {10.1145/3593013.3594028}, year = {2023}, } @article{13263, abstract = {Motivation: Boolean networks are simple but efficient mathematical formalism for modelling complex biological systems. However, having only two levels of activation is sometimes not enough to fully capture the dynamics of real-world biological systems. Hence, the need for multi-valued networks (MVNs), a generalization of Boolean networks. Despite the importance of MVNs for modelling biological systems, only limited progress has been made on developing theories, analysis methods, and tools that can support them. In particular, the recent use of trap spaces in Boolean networks made a great impact on the field of systems biology, but there has been no similar concept defined and studied for MVNs to date. Results: In this work, we generalize the concept of trap spaces in Boolean networks to that in MVNs. We then develop the theory and the analysis methods for trap spaces in MVNs. In particular, we implement all proposed methods in a Python package called trapmvn. Not only showing the applicability of our approach via a realistic case study, we also evaluate the time efficiency of the method on a large collection of real-world models. The experimental results confirm the time efficiency, which we believe enables more accurate analysis on larger and more complex multi-valued models.}, author = {Trinh, Van Giang and Benhamou, Belaid and Henzinger, Thomas A and Pastva, Samuel}, issn = {1367-4811}, journal = {Bioinformatics}, number = {Supplement_1}, pages = {i513--i522}, publisher = {Oxford Academic}, title = {{Trap spaces of multi-valued networks: Definition, computation, and applications}}, doi = {10.1093/bioinformatics/btad262}, volume = {39}, year = {2023}, } @article{14400, abstract = {We consider the problem of computing the maximal probability of satisfying an -regular specification for stochastic, continuous-state, nonlinear systems evolving in discrete time. The problem reduces, after automata-theoretic constructions, to finding the maximal probability of satisfying a parity condition on a (possibly hybrid) state space. While characterizing the exact satisfaction probability is open, we show that a lower bound on this probability can be obtained by (I) computing an under-approximation of the qualitative winning region, i.e., states from which the parity condition can be enforced almost surely, and (II) computing the maximal probability of reaching this qualitative winning region. The heart of our approach is a technique to symbolically compute the under-approximation of the qualitative winning region in step (I) via a finite-state abstraction of the original system as a -player parity game. Our abstraction procedure uses only the support of the probabilistic evolution; it does not use precise numerical transition probabilities. We prove that the winning set in the abstract -player game induces an under-approximation of the qualitative winning region in the original synthesis problem, along with a policy to solve it. By combining these contributions with (a) a symbolic fixpoint algorithm to solve -player games and (b) existing techniques for reachability policy synthesis in stochastic nonlinear systems, we get an abstraction-based algorithm for finding a lower bound on the maximal satisfaction probability. We have implemented the abstraction-based algorithm in Mascot-SDS, where we combined the outlined abstraction step with our tool Genie (Majumdar et al., 2023) that solves -player parity games (through a reduction to Rabin games) more efficiently than existing algorithms. We evaluated our implementation on the nonlinear model of a perturbed bistable switch from the literature. We show empirically that the lower bound on the winning region computed by our approach is precise, by comparing against an over-approximation of the qualitative winning region. Moreover, our implementation outperforms a recently proposed tool for solving this problem by a large margin.}, author = {Majumdar, Rupak and Mallik, Kaushik and Schmuck, Anne Kathrin and Soudjani, Sadegh}, issn = {1751-570X}, journal = {Nonlinear Analysis: Hybrid Systems}, publisher = {Elsevier}, title = {{Symbolic control for stochastic systems via finite parity games}}, doi = {10.1016/j.nahs.2023.101430}, volume = {51}, year = {2023}, } @inproceedings{14718, abstract = {Binary decision diagrams (BDDs) are one of the fundamental data structures in formal methods and computer science in general. However, the performance of BDD-based algorithms greatly depends on memory latency due to the reliance on large hash tables and thus, by extension, on the speed of random memory access. This hinders the full utilisation of resources available on modern CPUs, since the absolute memory latency has not improved significantly for at least a decade. In this paper, we explore several implementation techniques that improve the performance of BDD manipulation either through enhanced memory locality or by partially eliminating random memory access. On a benchmark suite of 600+ BDDs derived from real-world applications, we demonstrate runtime that is comparable or better than parallelising the same operations on eight CPU cores. }, author = {Pastva, Samuel and Henzinger, Thomas A}, booktitle = {Proceedings of the 23rd Conference on Formal Methods in Computer-Aided Design}, isbn = {9783854480600}, location = {Ames, IA, United States}, pages = {122--131}, publisher = {TU Vienna Academic Press}, title = {{Binary decision diagrams on modern hardware}}, doi = {10.34727/2023/isbn.978-3-85448-060-0_20}, year = {2023}, } @inproceedings{14830, abstract = {We study the problem of learning controllers for discrete-time non-linear stochastic dynamical systems with formal reach-avoid guarantees. This work presents the first method for providing formal reach-avoid guarantees, which combine and generalize stability and safety guarantees, with a tolerable probability threshold p in [0,1] over the infinite time horizon. Our method leverages advances in machine learning literature and it represents formal certificates as neural networks. In particular, we learn a certificate in the form of a reach-avoid supermartingale (RASM), a novel notion that we introduce in this work. Our RASMs provide reachability and avoidance guarantees by imposing constraints on what can be viewed as a stochastic extension of level sets of Lyapunov functions for deterministic systems. Our approach solves several important problems -- it can be used to learn a control policy from scratch, to verify a reach-avoid specification for a fixed control policy, or to fine-tune a pre-trained policy if it does not satisfy the reach-avoid specification. We validate our approach on 3 stochastic non-linear reinforcement learning tasks.}, author = {Zikelic, Dorde and Lechner, Mathias and Henzinger, Thomas A and Chatterjee, Krishnendu}, booktitle = {Proceedings of the 37th AAAI Conference on Artificial Intelligence}, issn = {2374-3468}, keywords = {General Medicine}, location = {Washington, DC, United States}, number = {10}, pages = {11926--11935}, publisher = {Association for the Advancement of Artificial Intelligence}, title = {{Learning control policies for stochastic systems with reach-avoid guarantees}}, doi = {10.1609/aaai.v37i10.26407}, volume = {37}, year = {2023}, } @article{13234, abstract = {Neural-network classifiers achieve high accuracy when predicting the class of an input that they were trained to identify. Maintaining this accuracy in dynamic environments, where inputs frequently fall outside the fixed set of initially known classes, remains a challenge. We consider the problem of monitoring the classification decisions of neural networks in the presence of novel classes. For this purpose, we generalize our recently proposed abstraction-based monitor from binary output to real-valued quantitative output. This quantitative output enables new applications, two of which we investigate in the paper. As our first application, we introduce an algorithmic framework for active monitoring of a neural network, which allows us to learn new classes dynamically and yet maintain high monitoring performance. As our second application, we present an offline procedure to retrain the neural network to improve the monitor’s detection performance without deteriorating the network’s classification accuracy. Our experimental evaluation demonstrates both the benefits of our active monitoring framework in dynamic scenarios and the effectiveness of the retraining procedure.}, author = {Kueffner, Konstantin and Lukina, Anna and Schilling, Christian and Henzinger, Thomas A}, issn = {1433-2787}, journal = {International Journal on Software Tools for Technology Transfer}, pages = {575--592}, publisher = {Springer Nature}, title = {{Into the unknown: Active monitoring of neural networks (extended version)}}, doi = {10.1007/s10009-023-00711-4}, volume = {25}, year = {2023}, } @article{14920, abstract = {We consider fixpoint algorithms for two-player games on graphs with $\omega$-regular winning conditions, where the environment is constrained by a strong transition fairness assumption. Strong transition fairness is a widely occurring special case of strong fairness, which requires that any execution is strongly fair with respect to a specified set of live edges: whenever the source vertex of a live edge is visited infinitely often along a play, the edge itself is traversed infinitely often along the play as well. We show that, surprisingly, strong transition fairness retains the algorithmic characteristics of the fixpoint algorithms for $\omega$-regular games -- the new algorithms have the same alternation depth as the classical algorithms but invoke a new type of predecessor operator. For Rabin games with $k$ pairs, the complexity of the new algorithm is $O(n^{k+2}k!)$ symbolic steps, which is independent of the number of live edges in the strong transition fairness assumption. Further, we show that GR(1) specifications with strong transition fairness assumptions can be solved with a 3-nested fixpoint algorithm, same as the usual algorithm. In contrast, strong fairness necessarily requires increasing the alternation depth depending on the number of fairness assumptions. We get symbolic algorithms for (generalized) Rabin, parity and GR(1) objectives under strong transition fairness assumptions as well as a direct symbolic algorithm for qualitative winning in stochastic $\omega$-regular games that runs in $O(n^{k+2}k!)$ symbolic steps, improving the state of the art. Finally, we have implemented a BDD-based synthesis engine based on our algorithm. We show on a set of synthetic and real benchmarks that our algorithm is scalable, parallelizable, and outperforms previous algorithms by orders of magnitude.}, author = {Banerjee, Tamajit and Majumdar, Rupak and Mallik, Kaushik and Schmuck, Anne-Kathrin and Soudjani, Sadegh}, issn = {2751-4838}, journal = {TheoretiCS}, publisher = {EPI Sciences}, title = {{Fast symbolic algorithms for mega-regular games under strong transition fairness}}, doi = {10.46298/theoretics.23.4}, volume = {2}, year = {2023}, } @inproceedings{14411, abstract = {Partially specified Boolean networks (PSBNs) represent a promising framework for the qualitative modelling of biological systems in which the logic of interactions is not completely known. Phenotype control aims to stabilise the network in states exhibiting specific traits. In this paper, we define the phenotype control problem in the context of asynchronous PSBNs and propose a novel semi-symbolic algorithm for solving this problem with permanent variable perturbations.}, author = {Beneš, Nikola and Brim, Luboš and Pastva, Samuel and Šafránek, David and Šmijáková, Eva}, booktitle = {21st International Conference on Computational Methods in Systems Biology}, isbn = {9783031426964}, issn = {1611-3349}, location = {Luxembourg City, Luxembourg}, pages = {18--35}, publisher = {Springer Nature}, title = {{Phenotype control of partially specified boolean networks}}, doi = {10.1007/978-3-031-42697-1_2}, volume = {14137}, year = {2023}, }