@unpublished{14600, abstract = {We study the problem of learning controllers for discrete-time non-linear stochastic dynamical systems with formal reach-avoid guarantees. This work presents the first method for providing formal reach-avoid guarantees, which combine and generalize stability and safety guarantees, with a tolerable probability threshold $p\in[0,1]$ over the infinite time horizon. Our method leverages advances in machine learning literature and it represents formal certificates as neural networks. In particular, we learn a certificate in the form of a reach-avoid supermartingale (RASM), a novel notion that we introduce in this work. Our RASMs provide reachability and avoidance guarantees by imposing constraints on what can be viewed as a stochastic extension of level sets of Lyapunov functions for deterministic systems. Our approach solves several important problems -- it can be used to learn a control policy from scratch, to verify a reach-avoid specification for a fixed control policy, or to fine-tune a pre-trained policy if it does not satisfy the reach-avoid specification. We validate our approach on $3$ stochastic non-linear reinforcement learning tasks.}, author = {Zikelic, Dorde and Lechner, Mathias and Henzinger, Thomas A and Chatterjee, Krishnendu}, booktitle = {arXiv}, title = {{Learning control policies for stochastic systems with reach-avoid guarantees}}, doi = {10.48550/ARXIV.2210.05308}, year = {2022}, } @inproceedings{10052, abstract = {A deterministic finite automaton (DFA) 𝒜 is composite if its language L(𝒜) can be decomposed into an intersection ⋂_{i = 1}^k L(𝒜_i) of languages of smaller DFAs. Otherwise, 𝒜 is prime. This notion of primality was introduced by Kupferman and Mosheiff in 2013, and while they proved that we can decide whether a DFA is composite, the precise complexity of this problem is still open, with a doubly-exponential gap between the upper and lower bounds. In this work, we focus on permutation DFAs, i.e., those for which the transition monoid is a group. We provide an NP algorithm to decide whether a permutation DFA is composite, and show that the difficulty of this problem comes from the number of non-accepting states of the instance: we give a fixed-parameter tractable algorithm with the number of rejecting states as the parameter. Moreover, we investigate the class of commutative permutation DFAs. Their structural properties allow us to decide compositionality in NL, and even in LOGSPACE if the alphabet size is fixed. Despite this low complexity, we show that complex behaviors still arise in this class: we provide a family of composite DFAs each requiring polynomially many factors with respect to its size. We also consider the variant of the problem that asks whether a DFA is k-factor composite, that is, decomposable into k smaller DFAs, for some given integer k ∈ ℕ. We show that, for commutative permutation DFAs, restricting the number of factors makes the decision computationally harder, and yields a problem with tight bounds: it is NP-complete. Finally, we show that in general, this problem is in PSPACE, and it is in LOGSPACE for DFAs with a singleton alphabet.}, author = {Jecker, Ismael R and Mazzocchi, Nicolas and Wolf, Petra}, booktitle = {32nd International Conference on Concurrency Theory}, isbn = {978-3-9597-7203-7}, issn = {1868-8969}, location = {Paris, France}, publisher = {Schloss Dagstuhl - Leibniz Zentrum für Informatik}, title = {{Decomposing permutation automata}}, doi = {10.4230/LIPIcs.CONCUR.2021.18}, volume = {203}, year = {2021}, } @inproceedings{10054, abstract = {Graphs and games on graphs are fundamental models for the analysis of reactive systems, in particular, for model-checking and the synthesis of reactive systems. The class of ω-regular languages provides a robust specification formalism for the desired properties of reactive systems. In the classical infinitary formulation of the liveness part of an ω-regular specification, a "good" event must happen eventually without any bound between the good events. A stronger notion of liveness is bounded liveness, which requires that good events happen within d transitions. Given a graph or a game graph with n vertices, m edges, and a bounded liveness objective, the previous best-known algorithmic bounds are as follows: (i) O(dm) for graphs, which in the worst-case is O(n³); and (ii) O(n² d²) for games on graphs. Our main contributions improve these long-standing algorithmic bounds. For graphs we present: (i) a randomized algorithm with one-sided error with running time O(n^{2.5} log n) for the bounded liveness objectives; and (ii) a deterministic linear-time algorithm for the complement of bounded liveness objectives. For games on graphs, we present an O(n² d) time algorithm for the bounded liveness objectives.}, author = {Chatterjee, Krishnendu and Henzinger, Monika H and Kale, Sagar Sudhir and Svozil, Alexander}, booktitle = {48th International Colloquium on Automata, Languages, and Programming}, isbn = {978-3-95977-195-5}, issn = {1868-8969}, location = {Glasgow, Scotland}, publisher = {Schloss Dagstuhl - Leibniz Zentrum für Informatik}, title = {{Faster algorithms for bounded liveness in graphs and game graphs}}, doi = {10.4230/LIPIcs.ICALP.2021.124}, volume = {198}, year = {2021}, } @inproceedings{10075, abstract = {We study the expressiveness and succinctness of good-for-games pushdown automata (GFG-PDA) over finite words, that is, pushdown automata whose nondeterminism can be resolved based on the run constructed so far, but independently of the remainder of the input word. We prove that GFG-PDA recognise more languages than deterministic PDA (DPDA) but not all context-free languages (CFL). This class is orthogonal to unambiguous CFL. We further show that GFG-PDA can be exponentially more succinct than DPDA, while PDA can be double-exponentially more succinct than GFG-PDA. We also study GFGness in visibly pushdown automata (VPA), which enjoy better closure properties than PDA, and for which we show GFGness to be ExpTime-complete. GFG-VPA can be exponentially more succinct than deterministic VPA, while VPA can be exponentially more succinct than GFG-VPA. Both of these lower bounds are tight. Finally, we study the complexity of resolving nondeterminism in GFG-PDA. Every GFG-PDA has a positional resolver, a function that resolves nondeterminism and that is only dependant on the current configuration. Pushdown transducers are sufficient to implement the resolvers of GFG-VPA, but not those of GFG-PDA. GFG-PDA with finite-state resolvers are determinisable.}, author = {Guha, Shibashis and Jecker, Ismael R and Lehtinen, Karoliina and Zimmermann, Martin}, booktitle = {46th International Symposium on Mathematical Foundations of Computer Science}, isbn = {978-3-9597-7201-3}, issn = {1868-8969}, location = {Tallinn, Estonia}, publisher = {Schloss Dagstuhl - Leibniz Zentrum für Informatik}, title = {{A bit of nondeterminism makes pushdown automata expressive and succinct}}, doi = {10.4230/LIPIcs.MFCS.2021.53}, volume = {202}, year = {2021}, } @inproceedings{10630, abstract = {In the Intersection Non-emptiness problem, we are given a list of finite automata A_1, A_2,… , A_m over a common alphabet Σ as input, and the goal is to determine whether some string w ∈ Σ^* lies in the intersection of the languages accepted by the automata in the list. We analyze the complexity of the Intersection Non-emptiness problem under the promise that all input automata accept a language in some level of the dot-depth hierarchy, or some level of the Straubing-Thérien hierarchy. Automata accepting languages from the lowest levels of these hierarchies arise naturally in the context of model checking. We identify a dichotomy in the dot-depth hierarchy by showing that the problem is already NP-complete when all input automata accept languages of the levels B_0 or B_{1/2} and already PSPACE-hard when all automata accept a language from the level B_1. Conversely, we identify a tetrachotomy in the Straubing-Thérien hierarchy. More precisely, we show that the problem is in AC^0 when restricted to level L_0; complete for L or NL, depending on the input representation, when restricted to languages in the level L_{1/2}; NP-complete when the input is given as DFAs accepting a language in L_1 or L_{3/2}; and finally, PSPACE-complete when the input automata accept languages in level L_2 or higher. Moreover, we show that the proof technique used to show containment in NP for DFAs accepting languages in L_1 or L_{3/2} does not generalize to the context of NFAs. To prove this, we identify a family of languages that provide an exponential separation between the state complexity of general NFAs and that of partially ordered NFAs. To the best of our knowledge, this is the first superpolynomial separation between these two models of computation.}, author = {Arrighi, Emmanuel and Fernau, Henning and Hoffmann, Stefan and Holzer, Markus and Jecker, Ismael R and De Oliveira Oliveira, Mateus and Wolf, Petra}, booktitle = {41st IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science}, isbn = {978-3-9597-7215-0}, issn = {1868-8969}, location = {Virtual}, publisher = {Schloss Dagstuhl - Leibniz Zentrum für Informatik}, title = {{On the complexity of intersection non-emptiness for star-free language classes}}, doi = {10.4230/LIPIcs.FSTTCS.2021.34}, volume = {213}, year = {2021}, } @inproceedings{10629, abstract = {Product graphs arise naturally in formal verification and program analysis. For example, the analysis of two concurrent threads requires the product of two component control-flow graphs, and for language inclusion of deterministic automata the product of two automata is constructed. In many cases, the component graphs have constant treewidth, e.g., when the input contains control-flow graphs of programs. We consider the algorithmic analysis of products of two constant-treewidth graphs with respect to three classic specification languages, namely, (a) algebraic properties, (b) mean-payoff properties, and (c) initial credit for energy properties. Our main contributions are as follows. Consider a graph G that is the product of two constant-treewidth graphs of size n each. First, given an idempotent semiring, we present an algorithm that computes the semiring transitive closure of G in time Õ(n⁴). Since the output has size Θ(n⁴), our algorithm is optimal (up to polylog factors). Second, given a mean-payoff objective, we present an O(n³)-time algorithm for deciding whether the value of a starting state is non-negative, improving the previously known O(n⁴) bound. Third, given an initial credit for energy objective, we present an O(n⁵)-time algorithm for computing the minimum initial credit for all nodes of G, improving the previously known O(n⁸) bound. At the heart of our approach lies an algorithm for the efficient construction of strongly-balanced tree decompositions of constant-treewidth graphs. Given a constant-treewidth graph G' of n nodes and a positive integer λ, our algorithm constructs a binary tree decomposition of G' of width O(λ) with the property that the size of each subtree decreases geometrically with rate (1/2 + 2^{-λ}).}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas}, booktitle = {41st IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science}, isbn = {978-3-9597-7215-0}, issn = {1868-8969}, location = {Virtual}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Quantitative verification on product graphs of small treewidth}}, doi = {10.4230/LIPIcs.FSTTCS.2021.42}, volume = {213}, year = {2021}, } @inproceedings{10694, abstract = {In a two-player zero-sum graph game the players move a token throughout a graph to produce an infinite path, which determines the winner or payoff of the game. Traditionally, the players alternate turns in moving the token. In bidding games, however, the players have budgets, and in each turn, we hold an “auction” (bidding) to determine which player moves the token: both players simultaneously submit bids and the higher bidder moves the token. The bidding mechanisms differ in their payment schemes. Bidding games were largely studied with variants of first-price bidding in which only the higher bidder pays his bid. We focus on all-pay bidding, where both players pay their bids. Finite-duration all-pay bidding games were studied and shown to be technically more challenging than their first-price counterparts. We study for the first time, infinite-duration all-pay bidding games. Our most interesting results are for mean-payoff objectives: we portray a complete picture for games played on strongly-connected graphs. We study both pure (deterministic) and mixed (probabilistic) strategies and completely characterize the optimal and almost-sure (with probability 1) payoffs the players can respectively guarantee. We show that mean-payoff games under all-pay bidding exhibit the intriguing mathematical properties of their first-price counterparts; namely, an equivalence with random-turn games in which in each turn, the player who moves is selected according to a (biased) coin toss. The equivalences for all-pay bidding are more intricate and unexpected than for first-price bidding.}, author = {Avni, Guy and Jecker, Ismael R and Zikelic, Dorde}, booktitle = {Proceedings of the 2021 ACM-SIAM Symposium on Discrete Algorithms}, editor = {Marx, Dániel}, isbn = {978-1-61197-646-5}, location = {Virtual}, pages = {617--636}, publisher = {Society for Industrial and Applied Mathematics}, title = {{Infinite-duration all-pay bidding games}}, doi = {10.1137/1.9781611976465.38}, year = {2021}, } @inproceedings{10847, abstract = {We study the two-player zero-sum extension of the partially observable stochastic shortest-path problem where one agent has only partial information about the environment. We formulate this problem as a partially observable stochastic game (POSG): given a set of target states and negative rewards for each transition, the player with imperfect information maximizes the expected undiscounted total reward until a target state is reached. The second player with the perfect information aims for the opposite. We base our formalism on POSGs with one-sided observability (OS-POSGs) and give the following contributions: (1) we introduce a novel heuristic search value iteration algorithm that iteratively solves depth-limited variants of the game, (2) we derive the bound on the depth guaranteeing an arbitrary precision, (3) we propose a novel upper-bound estimation that allows early terminations, and (4) we experimentally evaluate the algorithm on a pursuit-evasion game.}, author = {Tomášek, Petr and Horák, Karel and Aradhye, Aditya and Bošanský, Branislav and Chatterjee, Krishnendu}, booktitle = {30th International Joint Conference on Artificial Intelligence}, isbn = {9780999241196}, issn = {1045-0823}, location = {Virtual, Online}, pages = {4182--4189}, publisher = {International Joint Conferences on Artificial Intelligence}, title = {{Solving partially observable stochastic shortest-path games}}, doi = {10.24963/ijcai.2021/575}, year = {2021}, } @inproceedings{9296, abstract = { matching is compatible to two or more labeled point sets of size n with labels {1,…,n} if its straight-line drawing on each of these point sets is crossing-free. We study the maximum number of edges in a matching compatible to two or more labeled point sets in general position in the plane. We show that for any two labeled convex sets of n points there exists a compatible matching with ⌊2n−−√⌋ edges. More generally, for any ℓ labeled point sets we construct compatible matchings of size Ω(n1/ℓ) . As a corresponding upper bound, we use probabilistic arguments to show that for any ℓ given sets of n points there exists a labeling of each set such that the largest compatible matching has O(n2/(ℓ+1)) edges. Finally, we show that Θ(logn) copies of any set of n points are necessary and sufficient for the existence of a labeling such that any compatible matching consists only of a single edge.}, author = {Aichholzer, Oswin and Arroyo Guevara, Alan M and Masárová, Zuzana and Parada, Irene and Perz, Daniel and Pilz, Alexander and Tkadlec, Josef and Vogtenhuber, Birgit}, booktitle = {15th International Conference on Algorithms and Computation}, isbn = {9783030682101}, issn = {16113349}, location = {Yangon, Myanmar}, pages = {221--233}, publisher = {Springer Nature}, title = {{On compatible matchings}}, doi = {10.1007/978-3-030-68211-8_18}, volume = {12635}, year = {2021}, } @inbook{9403, abstract = {Optimal decision making requires individuals to know their available options and to anticipate correctly what consequences these options have. In many social interactions, however, we refrain from gathering all relevant information, even if this information would help us make better decisions and is costless to obtain. This chapter examines several examples of “deliberate ignorance.” Two simple models are proposed to illustrate how ignorance can evolve among self-interested and payoff - maximizing individuals, and open problems are highlighted that lie ahead for future research to explore.}, author = {Schmid, Laura and Hilbe, Christian}, booktitle = {Deliberate Ignorance: Choosing Not To Know}, editor = {Hertwig, Ralph and Engel, Christoph}, isbn = {978-0-262-04559-9}, pages = {139--152}, publisher = {MIT Press}, title = {{The evolution of strategic ignorance in strategic interaction}}, volume = {29}, year = {2021}, } @inproceedings{12767, abstract = {Several problems in planning and reactive synthesis can be reduced to the analysis of two-player quantitative graph games. Optimization is one form of analysis. We argue that in many cases it may be better to replace the optimization problem with the satisficing problem, where instead of searching for optimal solutions, the goal is to search for solutions that adhere to a given threshold bound. This work defines and investigates the satisficing problem on a two-player graph game with the discounted-sum cost model. We show that while the satisficing problem can be solved using numerical methods just like the optimization problem, this approach does not render compelling benefits over optimization. When the discount factor is, however, an integer, we present another approach to satisficing, which is purely based on automata methods. We show that this approach is algorithmically more performant – both theoretically and empirically – and demonstrates the broader applicability of satisficing over optimization.}, author = {Bansal, Suguman and Chatterjee, Krishnendu and Vardi, Moshe Y.}, booktitle = {27th International Conference on Tools and Algorithms for the Construction and Analysis of Systems}, isbn = {9783030720155}, issn = {1611-3349}, location = {Luxembourg City, Luxembourg}, pages = {20--37}, publisher = {Springer Nature}, title = {{On satisficing in quantitative games}}, doi = {10.1007/978-3-030-72016-2}, volume = {12651}, year = {2021}, } @inproceedings{10667, abstract = {Bayesian neural networks (BNNs) place distributions over the weights of a neural network to model uncertainty in the data and the network's prediction. We consider the problem of verifying safety when running a Bayesian neural network policy in a feedback loop with infinite time horizon systems. Compared to the existing sampling-based approaches, which are inapplicable to the infinite time horizon setting, we train a separate deterministic neural network that serves as an infinite time horizon safety certificate. In particular, we show that the certificate network guarantees the safety of the system over a subset of the BNN weight posterior's support. Our method first computes a safe weight set and then alters the BNN's weight posterior to reject samples outside this set. Moreover, we show how to extend our approach to a safe-exploration reinforcement learning setting, in order to avoid unsafe trajectories during the training of the policy. We evaluate our approach on a series of reinforcement learning benchmarks, including non-Lyapunovian safety specifications.}, author = {Lechner, Mathias and Žikelić, Ðorđe and Chatterjee, Krishnendu and Henzinger, Thomas A}, booktitle = {35th Conference on Neural Information Processing Systems}, location = {Virtual}, title = {{Infinite time horizon safety of Bayesian neural networks}}, doi = {10.48550/arXiv.2111.03165}, year = {2021}, } @article{8793, abstract = {We study optimal election sequences for repeatedly selecting a (very) small group of leaders among a set of participants (players) with publicly known unique ids. In every time slot, every player has to select exactly one player that it considers to be the current leader, oblivious to the selection of the other players, but with the overarching goal of maximizing a given parameterized global (“social”) payoff function in the limit. We consider a quite generic model, where the local payoff achieved by a given player depends, weighted by some arbitrary but fixed real parameter, on the number of different leaders chosen in a round, the number of players that choose the given player as the leader, and whether the chosen leader has changed w.r.t. the previous round or not. The social payoff can be the maximum, average or minimum local payoff of the players. Possible applications include quite diverse examples such as rotating coordinator-based distributed algorithms and long-haul formation flying of social birds. Depending on the weights and the particular social payoff, optimal sequences can be very different, from simple round-robin where all players chose the same leader alternatingly every time slot to very exotic patterns, where a small group of leaders (at most 2) is elected in every time slot. Moreover, we study the question if and when a single player would not benefit w.r.t. its local payoff when deviating from the given optimal sequence, i.e., when our optimal sequences are Nash equilibria in the restricted strategy space of oblivious strategies. As this is the case for many parameterizations of our model, our results reveal that no punishment is needed to make it rational for the players to optimize the social payoff.}, author = {Zeiner, Martin and Schmid, Ulrich and Chatterjee, Krishnendu}, issn = {0166218X}, journal = {Discrete Applied Mathematics}, number = {1}, pages = {392--415}, publisher = {Elsevier}, title = {{Optimal strategies for selecting coordinators}}, doi = {10.1016/j.dam.2020.10.022}, volume = {289}, year = {2021}, } @article{9381, abstract = {A game of rock-paper-scissors is an interesting example of an interaction where none of the pure strategies strictly dominates all others, leading to a cyclic pattern. In this work, we consider an unstable version of rock-paper-scissors dynamics and allow individuals to make behavioural mistakes during the strategy execution. We show that such an assumption can break a cyclic relationship leading to a stable equilibrium emerging with only one strategy surviving. We consider two cases: completely random mistakes when individuals have no bias towards any strategy and a general form of mistakes. Then, we determine conditions for a strategy to dominate all other strategies. However, given that individuals who adopt a dominating strategy are still prone to behavioural mistakes in the observed behaviour, we may still observe extinct strategies. That is, behavioural mistakes in strategy execution stabilise evolutionary dynamics leading to an evolutionary stable and, potentially, mixed co-existence equilibrium.}, author = {Kleshnina, Maria and Streipert, Sabrina S. and Filar, Jerzy A. and Chatterjee, Krishnendu}, issn = {15537358}, journal = {PLoS Computational Biology}, number = {4}, publisher = {Public Library of Science}, title = {{Mistakes can stabilise the dynamics of rock-paper-scissors games}}, doi = {10.1371/journal.pcbi.1008523}, volume = {17}, year = {2021}, } @article{9640, abstract = {Selection and random drift determine the probability that novel mutations fixate in a population. Population structure is known to affect the dynamics of the evolutionary process. Amplifiers of selection are population structures that increase the fixation probability of beneficial mutants compared to well-mixed populations. Over the past 15 years, extensive research has produced remarkable structures called strong amplifiers which guarantee that every beneficial mutation fixates with high probability. But strong amplification has come at the cost of considerably delaying the fixation event, which can slow down the overall rate of evolution. However, the precise relationship between fixation probability and time has remained elusive. Here we characterize the slowdown effect of strong amplification. First, we prove that all strong amplifiers must delay the fixation event at least to some extent. Second, we construct strong amplifiers that delay the fixation event only marginally as compared to the well-mixed populations. Our results thus establish a tight relationship between fixation probability and time: Strong amplification always comes at a cost of a slowdown, but more than a marginal slowdown is not needed.}, author = {Tkadlec, Josef and Pavlogiannis, Andreas and Chatterjee, Krishnendu and Nowak, Martin A.}, issn = {20411723}, journal = {Nature Communications}, number = {1}, publisher = {Springer Nature}, title = {{Fast and strong amplifiers of natural selection}}, doi = {10.1038/s41467-021-24271-w}, volume = {12}, year = {2021}, } @inproceedings{9646, abstract = {We consider the fundamental problem of deriving quantitative bounds on the probability that a given assertion is violated in a probabilistic program. We provide automated algorithms that obtain both lower and upper bounds on the assertion violation probability. The main novelty of our approach is that we prove new and dedicated fixed-point theorems which serve as the theoretical basis of our algorithms and enable us to reason about assertion violation bounds in terms of pre and post fixed-point functions. To synthesize such fixed-points, we devise algorithms that utilize a wide range of mathematical tools, including repulsing ranking supermartingales, Hoeffding's lemma, Minkowski decompositions, Jensen's inequality, and convex optimization. On the theoretical side, we provide (i) the first automated algorithm for lower-bounds on assertion violation probabilities, (ii) the first complete algorithm for upper-bounds of exponential form in affine programs, and (iii) provably and significantly tighter upper-bounds than the previous approaches. On the practical side, we show our algorithms can handle a wide variety of programs from the literature and synthesize bounds that are remarkably tighter than previous results, in some cases by thousands of orders of magnitude.}, author = {Wang, Jinyi and Sun, Yican and Fu, Hongfei and Chatterjee, Krishnendu and Goharshady, Amir Kafshdar}, booktitle = {Proceedings of the 42nd ACM SIGPLAN International Conference on Programming Language Design and Implementation}, isbn = {9781450383912}, location = {Online}, pages = {1171--1186}, publisher = {Association for Computing Machinery}, title = {{Quantitative analysis of assertion violations in probabilistic programs}}, doi = {10.1145/3453483.3454102}, year = {2021}, } @inproceedings{9645, abstract = {We consider the fundamental problem of reachability analysis over imperative programs with real variables. Previous works that tackle reachability are either unable to handle programs consisting of general loops (e.g. symbolic execution), or lack completeness guarantees (e.g. abstract interpretation), or are not automated (e.g. incorrectness logic). In contrast, we propose a novel approach for reachability analysis that can handle general and complex loops, is complete, and can be entirely automated for a wide family of programs. Through the notion of Inductive Reachability Witnesses (IRWs), our approach extends ideas from both invariant generation and termination to reachability analysis. We first show that our IRW-based approach is sound and complete for reachability analysis of imperative programs. Then, we focus on linear and polynomial programs and develop automated methods for synthesizing linear and polynomial IRWs. In the linear case, we follow the well-known approaches using Farkas' Lemma. Our main contribution is in the polynomial case, where we present a push-button semi-complete algorithm. We achieve this using a novel combination of classical theorems in real algebraic geometry, such as Putinar's Positivstellensatz and Hilbert's Strong Nullstellensatz. Finally, our experimental results show we can prove complex reachability objectives over various benchmarks that were beyond the reach of previous methods.}, author = {Asadi, Ali and Chatterjee, Krishnendu and Fu, Hongfei and Goharshady, Amir Kafshdar and Mahdavi, Mohammad}, booktitle = {Proceedings of the 42nd ACM SIGPLAN International Conference on Programming Language Design and Implementation}, isbn = {9781450383912}, location = {Online}, pages = {772--787}, publisher = {Association for Computing Machinery}, title = {{Polynomial reachability witnesses via Stellensätze}}, doi = {10.1145/3453483.3454076}, year = {2021}, } @inproceedings{10002, abstract = {We present a faster symbolic algorithm for the following central problem in probabilistic verification: Compute the maximal end-component (MEC) decomposition of Markov decision processes (MDPs). This problem generalizes the SCC decomposition problem of graphs and closed recurrent sets of Markov chains. The model of symbolic algorithms is widely used in formal verification and model-checking, where access to the input model is restricted to only symbolic operations (e.g., basic set operations and computation of one-step neighborhood). For an input MDP with n vertices and m edges, the classical symbolic algorithm from the 1990s for the MEC decomposition requires O(n2) symbolic operations and O(1) symbolic space. The only other symbolic algorithm for the MEC decomposition requires O(nm−−√) symbolic operations and O(m−−√) symbolic space. A main open question is whether the worst-case O(n2) bound for symbolic operations can be beaten. We present a symbolic algorithm that requires O˜(n1.5) symbolic operations and O˜(n−−√) symbolic space. Moreover, the parametrization of our algorithm provides a trade-off between symbolic operations and symbolic space: for all 0<ϵ≤1/2 the symbolic algorithm requires O˜(n2−ϵ) symbolic operations and O˜(nϵ) symbolic space ( O˜ hides poly-logarithmic factors). Using our techniques we present faster algorithms for computing the almost-sure winning regions of ω -regular objectives for MDPs. We consider the canonical parity objectives for ω -regular objectives, and for parity objectives with d -priorities we present an algorithm that computes the almost-sure winning region with O˜(n2−ϵ) symbolic operations and O˜(nϵ) symbolic space, for all 0<ϵ≤1/2 .}, author = {Chatterjee, Krishnendu and Dvorak, Wolfgang and Henzinger, Monika H and Svozil, Alexander}, booktitle = {Proceedings of the 36th Annual ACM/IEEE Symposium on Logic in Computer Science}, isbn = {978-1-6654-4896-3}, issn = {1043-6871}, keywords = {Computer science, Computational modeling, Markov processes, Probabilistic logic, Formal verification, Game Theory}, location = {Rome, Italy}, pages = {1--13}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Symbolic time and space tradeoffs for probabilistic verification}}, doi = {10.1109/LICS52264.2021.9470739}, year = {2021}, } @inproceedings{10004, abstract = {Markov chains are the de facto finite-state model for stochastic dynamical systems, and Markov decision processes (MDPs) extend Markov chains by incorporating non-deterministic behaviors. Given an MDP and rewards on states, a classical optimization criterion is the maximal expected total reward where the MDP stops after T steps, which can be computed by a simple dynamic programming algorithm. We consider a natural generalization of the problem where the stopping times can be chosen according to a probability distribution, such that the expected stopping time is T, to optimize the expected total reward. Quite surprisingly we establish inter-reducibility of the expected stopping-time problem for Markov chains with the Positivity problem (which is related to the well-known Skolem problem), for which establishing either decidability or undecidability would be a major breakthrough. Given the hardness of the exact problem, we consider the approximate version of the problem: we show that it can be solved in exponential time for Markov chains and in exponential space for MDPs.}, author = {Chatterjee, Krishnendu and Doyen, Laurent}, booktitle = {Proceedings of the 36th Annual ACM/IEEE Symposium on Logic in Computer Science}, isbn = {978-1-6654-4896-3}, issn = {1043-6871}, keywords = {Computer science, Heuristic algorithms, Memory management, Automata, Markov processes, Probability distribution, Complexity theory}, location = {Rome, Italy}, pages = {1--13}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Stochastic processes with expected stopping time}}, doi = {10.1109/LICS52264.2021.9470595}, year = {2021}, } @inproceedings{10055, abstract = {Repeated idempotent elements are commonly used to characterise iterable behaviours in abstract models of computation. Therefore, given a monoid M, it is natural to ask how long a sequence of elements of M needs to be to ensure the presence of consecutive idempotent factors. This question is formalised through the notion of the Ramsey function R_M associated to M, obtained by mapping every k ∈ ℕ to the minimal integer R_M(k) such that every word u ∈ M^* of length R_M(k) contains k consecutive non-empty factors that correspond to the same idempotent element of M. In this work, we study the behaviour of the Ramsey function R_M by investigating the regular 𝒟-length of M, defined as the largest size L(M) of a submonoid of M isomorphic to the set of natural numbers {1,2, …, L(M)} equipped with the max operation. We show that the regular 𝒟-length of M determines the degree of R_M, by proving that k^L(M) ≤ R_M(k) ≤ (k|M|⁴)^L(M). To allow applications of this result, we provide the value of the regular 𝒟-length of diverse monoids. In particular, we prove that the full monoid of n × n Boolean matrices, which is used to express transition monoids of non-deterministic automata, has a regular 𝒟-length of (n²+n+2)/2.}, author = {Jecker, Ismael R}, booktitle = {38th International Symposium on Theoretical Aspects of Computer Science}, isbn = {978-3-9597-7180-1}, issn = {1868-8969}, location = {Saarbrücken, Germany}, publisher = {Schloss Dagstuhl - Leibniz Zentrum für Informatik}, title = {{A Ramsey theorem for finite monoids}}, doi = {10.4230/LIPIcs.STACS.2021.44}, volume = {187}, year = {2021}, }