@inproceedings{9987, abstract = {Stateless model checking (SMC) is one of the standard approaches to the verification of concurrent programs. As scheduling non-determinism creates exponentially large spaces of thread interleavings, SMC attempts to partition this space into equivalence classes and explore only a few representatives from each class. The efficiency of this approach depends on two factors: (a) the coarseness of the partitioning, and (b) the time to generate representatives in each class. For this reason, the search for coarse partitionings that are efficiently explorable is an active research challenge. In this work we present RVF-SMC , a new SMC algorithm that uses a novel reads-value-from (RVF) partitioning. Intuitively, two interleavings are deemed equivalent if they agree on the value obtained in each read event, and read events induce consistent causal orderings between them. The RVF partitioning is provably coarser than recent approaches based on Mazurkiewicz and “reads-from” partitionings. Our experimental evaluation reveals that RVF is quite often a very effective equivalence, as the underlying partitioning is exponentially coarser than other approaches. Moreover, RVF-SMC generates representatives very efficiently, as the reduction in the partitioning is often met with significant speed-ups in the model checking task.}, author = {Agarwal, Pratyush and Chatterjee, Krishnendu and Pathak, Shreya and Pavlogiannis, Andreas and Toman, Viktor}, booktitle = {33rd International Conference on Computer-Aided Verification }, isbn = {978-3-030-81684-1}, issn = {1611-3349}, location = {Virtual}, pages = {341--366}, publisher = {Springer Nature}, title = {{Stateless model checking under a reads-value-from equivalence}}, doi = {10.1007/978-3-030-81685-8_16}, volume = {12759 }, year = {2021}, } @article{10191, abstract = {In this work we solve the algorithmic problem of consistency verification for the TSO and PSO memory models given a reads-from map, denoted VTSO-rf and VPSO-rf, respectively. For an execution of n events over k threads and d variables, we establish novel bounds that scale as nk+1 for TSO and as nk+1· min(nk2, 2k· d) for PSO. Moreover, based on our solution to these problems, we develop an SMC algorithm under TSO and PSO that uses the RF equivalence. The algorithm is exploration-optimal, in the sense that it is guaranteed to explore each class of the RF partitioning exactly once, and spends polynomial time per class when k is bounded. Finally, we implement all our algorithms in the SMC tool Nidhugg, and perform a large number of experiments over benchmarks from existing literature. Our experimental results show that our algorithms for VTSO-rf and VPSO-rf provide significant scalability improvements over standard alternatives. Moreover, when used for SMC, the RF partitioning is often much coarser than the standard Shasha-Snir partitioning for TSO/PSO, which yields a significant speedup in the model checking task. }, author = {Bui, Truc Lam and Chatterjee, Krishnendu and Gautam, Tushar and Pavlogiannis, Andreas and Toman, Viktor}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, keywords = {safety, risk, reliability and quality, software}, number = {OOPSLA}, publisher = {Association for Computing Machinery}, title = {{The reads-from equivalence for the TSO and PSO memory models}}, doi = {10.1145/3485541}, volume = {5}, year = {2021}, } @phdthesis{10199, abstract = {The design and verification of concurrent systems remains an open challenge due to the non-determinism that arises from the inter-process communication. In particular, concurrent programs are notoriously difficult both to be written correctly and to be analyzed formally, as complex thread interaction has to be accounted for. The difficulties are further exacerbated when concurrent programs get executed on modern-day hardware, which contains various buffering and caching mechanisms for efficiency reasons. This causes further subtle non-determinism, which can often produce very unintuitive behavior of the concurrent programs. Model checking is at the forefront of tackling the verification problem, where the task is to decide, given as input a concurrent system and a desired property, whether the system satisfies the property. The inherent state-space explosion problem in model checking of concurrent systems causes naïve explicit methods not to scale, thus more inventive methods are required. One such method is stateless model checking (SMC), which explores in memory-efficient manner the program executions rather than the states of the program. State-of-the-art SMC is typically coupled with partial order reduction (POR) techniques, which argue that certain executions provably produce identical system behavior, thus limiting the amount of executions one needs to explore in order to cover all possible behaviors. Another method to tackle the state-space explosion is symbolic model checking, where the considered techniques operate on a succinct implicit representation of the input system rather than explicitly accessing the system. In this thesis we present new techniques for verification of concurrent systems. We present several novel POR methods for SMC of concurrent programs under various models of semantics, some of which account for write-buffering mechanisms. Additionally, we present novel algorithms for symbolic model checking of finite-state concurrent systems, where the desired property of the systems is to ensure a formally defined notion of fairness.}, author = {Toman, Viktor}, issn = {2663-337X}, keywords = {concurrency, verification, model checking}, pages = {166}, publisher = {Institute of Science and Technology Austria}, title = {{Improved verification techniques for concurrent systems}}, doi = {10.15479/at:ista:10199}, year = {2021}, } @article{9293, abstract = {We consider planning problems for graphs, Markov Decision Processes (MDPs), and games on graphs in an explicit state space. While graphs represent the most basic planning model, MDPs represent interaction with nature and games on graphs represent interaction with an adversarial environment. We consider two planning problems with k different target sets: (a) the coverage problem asks whether there is a plan for each individual target set; and (b) the sequential target reachability problem asks whether the targets can be reached in a given sequence. For the coverage problem, we present a linear-time algorithm for graphs, and quadratic conditional lower bound for MDPs and games on graphs. For the sequential target problem, we present a linear-time algorithm for graphs, a sub-quadratic algorithm for MDPs, and a quadratic conditional lower bound for games on graphs. Our results with conditional lower bounds, based on the boolean matrix multiplication (BMM) conjecture and strong exponential time hypothesis (SETH), establish (i) model-separation results showing that for the coverage problem MDPs and games on graphs are harder than graphs, and for the sequential reachability problem games on graphs are harder than MDPs and graphs; and (ii) problem-separation results showing that for MDPs the coverage problem is harder than the sequential target problem.}, author = {Chatterjee, Krishnendu and Dvořák, Wolfgang and Henzinger, Monika H and Svozil, Alexander}, issn = {0004-3702}, journal = {Artificial Intelligence}, number = {8}, publisher = {Elsevier}, title = {{Algorithms and conditional lower bounds for planning problems}}, doi = {10.1016/j.artint.2021.103499}, volume = {297}, year = {2021}, } @article{9393, abstract = {We consider the core algorithmic problems related to verification of systems with respect to three classical quantitative properties, namely, the mean-payoff, the ratio, and the minimum initial credit for energy property. The algorithmic problem given a graph and a quantitative property asks to compute the optimal value (the infimum value over all traces) from every node of the graph. We consider graphs with bounded treewidth—a class that contains the control flow graphs of most programs. Let n denote the number of nodes of a graph, m the number of edges (for bounded treewidth 𝑚=𝑂(𝑛)) and W the largest absolute value of the weights. Our main theoretical results are as follows. First, for the minimum initial credit problem we show that (1) for general graphs the problem can be solved in 𝑂(𝑛2⋅𝑚) time and the associated decision problem in 𝑂(𝑛⋅𝑚) time, improving the previous known 𝑂(𝑛3⋅𝑚⋅log(𝑛⋅𝑊)) and 𝑂(𝑛2⋅𝑚) bounds, respectively; and (2) for bounded treewidth graphs we present an algorithm that requires 𝑂(𝑛⋅log𝑛) time. Second, for bounded treewidth graphs we present an algorithm that approximates the mean-payoff value within a factor of 1+𝜖 in time 𝑂(𝑛⋅log(𝑛/𝜖)) as compared to the classical exact algorithms on general graphs that require quadratic time. Third, for the ratio property we present an algorithm that for bounded treewidth graphs works in time 𝑂(𝑛⋅log(|𝑎⋅𝑏|))=𝑂(𝑛⋅log(𝑛⋅𝑊)), when the output is 𝑎𝑏, as compared to the previously best known algorithm on general graphs with running time 𝑂(𝑛2⋅log(𝑛⋅𝑊)). We have implemented some of our algorithms and show that they present a significant speedup on standard benchmarks.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas}, issn = {1572-8102}, journal = {Formal Methods in System Design}, pages = {401--428}, publisher = {Springer}, title = {{Faster algorithms for quantitative verification in bounded treewidth graphs}}, doi = {10.1007/s10703-021-00373-5}, volume = {57}, year = {2021}, } @inproceedings{9644, abstract = {We present a new approach to proving non-termination of non-deterministic integer programs. Our technique is rather simple but efficient. It relies on a purely syntactic reversal of the program's transition system followed by a constraint-based invariant synthesis with constraints coming from both the original and the reversed transition system. The latter task is performed by a simple call to an off-the-shelf SMT-solver, which allows us to leverage the latest advances in SMT-solving. Moreover, our method offers a combination of features not present (as a whole) in previous approaches: it handles programs with non-determinism, provides relative completeness guarantees and supports programs with polynomial arithmetic. The experiments performed with our prototype tool RevTerm show that our approach, despite its simplicity and stronger theoretical guarantees, is at least on par with the state-of-the-art tools, often achieving a non-trivial improvement under a proper configuration of its parameters.}, author = {Chatterjee, Krishnendu and Goharshady, Ehsan Kafshdar and Novotný, Petr and Zikelic, Dorde}, booktitle = {Proceedings of the 42nd ACM SIGPLAN International Conference on Programming Language Design and Implementation}, isbn = {9781450383912}, location = {Online}, pages = {1033--1048}, publisher = {Association for Computing Machinery}, title = {{Proving non-termination by program reversal}}, doi = {10.1145/3453483.3454093}, year = {2021}, } @inproceedings{10414, abstract = {We consider the almost-sure (a.s.) termination problem for probabilistic programs, which are a stochastic extension of classical imperative programs. Lexicographic ranking functions provide a sound and practical approach for termination of non-probabilistic programs, and their extension to probabilistic programs is achieved via lexicographic ranking supermartingales (LexRSMs). However, LexRSMs introduced in the previous work have a limitation that impedes their automation: all of their components have to be non-negative in all reachable states. This might result in LexRSM not existing even for simple terminating programs. Our contributions are twofold: First, we introduce a generalization of LexRSMs which allows for some components to be negative. This standard feature of non-probabilistic termination proofs was hitherto not known to be sound in the probabilistic setting, as the soundness proof requires a careful analysis of the underlying stochastic process. Second, we present polynomial-time algorithms using our generalized LexRSMs for proving a.s. termination in broad classes of linear-arithmetic programs.}, author = {Chatterjee, Krishnendu and Goharshady, Ehsan Kafshdar and Novotný, Petr and Zárevúcky, Jiří and Zikelic, Dorde}, booktitle = {24th International Symposium on Formal Methods}, isbn = {9-783-0309-0869-0}, issn = {1611-3349}, location = {Virtual}, pages = {619--639}, publisher = {Springer Nature}, title = {{On lexicographic proof rules for probabilistic termination}}, doi = {10.1007/978-3-030-90870-6_33}, volume = {13047}, year = {2021}, } @phdthesis{8934, abstract = {In this thesis, we consider several of the most classical and fundamental problems in static analysis and formal verification, including invariant generation, reachability analysis, termination analysis of probabilistic programs, data-flow analysis, quantitative analysis of Markov chains and Markov decision processes, and the problem of data packing in cache management. We use techniques from parameterized complexity theory, polyhedral geometry, and real algebraic geometry to significantly improve the state-of-the-art, in terms of both scalability and completeness guarantees, for the mentioned problems. In some cases, our results are the first theoretical improvements for the respective problems in two or three decades.}, author = {Goharshady, Amir Kafshdar}, issn = {2663-337X}, pages = {278}, publisher = {Institute of Science and Technology Austria}, title = {{Parameterized and algebro-geometric advances in static program analysis}}, doi = {10.15479/AT:ISTA:8934}, year = {2021}, } @phdthesis{10293, abstract = {Indirect reciprocity in evolutionary game theory is a prominent mechanism for explaining the evolution of cooperation among unrelated individuals. In contrast to direct reciprocity, which is based on individuals meeting repeatedly, and conditionally cooperating by using their own experiences, indirect reciprocity is based on individuals’ reputations. If a player helps another, this increases the helper’s public standing, benefitting them in the future. This lets cooperation in the population emerge without individuals having to meet more than once. While the two modes of reciprocity are intertwined, they are difficult to compare. Thus, they are usually studied in isolation. Direct reciprocity can maintain cooperation with simple strategies, and is robust against noise even when players do not remember more than their partner’s last action. Meanwhile, indirect reciprocity requires its successful strategies, or social norms, to be more complex. Exhaustive search previously identified eight such norms, called the “leading eight”, which excel at maintaining cooperation. However, as the first result of this thesis, we show that the leading eight break down once we remove the fundamental assumption that information is synchronized and public, such that everyone agrees on reputations. Once we consider a more realistic scenario of imperfect information, where reputations are private, and individuals occasionally misinterpret or miss observations, the leading eight do not promote cooperation anymore. Instead, minor initial disagreements can proliferate, fragmenting populations into subgroups. In a next step, we consider ways to mitigate this issue. We first explore whether introducing “generosity” can stabilize cooperation when players use the leading eight strategies in noisy environments. This approach of modifying strategies to include probabilistic elements for coping with errors is known to work well in direct reciprocity. However, as we show here, it fails for the more complex norms of indirect reciprocity. Imperfect information still prevents cooperation from evolving. On the other hand, we succeeded to show in this thesis that modifying the leading eight to use “quantitative assessment”, i.e. tracking reputation scores on a scale beyond good and bad, and making overall judgments of others based on a threshold, is highly successful, even when noise increases in the environment. Cooperation can flourish when reputations are more nuanced, and players have a broader understanding what it means to be “good.” Finally, we present a single theoretical framework that unites the two modes of reciprocity despite their differences. Within this framework, we identify a novel simple and successful strategy for indirect reciprocity, which can cope with noisy environments and has an analogue in direct reciprocity. We can also analyze decision making when different sources of information are available. Our results help highlight that for sustaining cooperation, already the most simple rules of reciprocity can be sufficient.}, author = {Schmid, Laura}, issn = {2663-337X}, pages = {171}, publisher = {Institute of Science and Technology Austria}, title = {{Evolution of cooperation via (in)direct reciprocity under imperfect information}}, doi = {10.15479/at:ista:10293}, year = {2021}, } @article{9997, abstract = {Indirect reciprocity is a mechanism for the evolution of cooperation based on social norms. This mechanism requires that individuals in a population observe and judge each other’s behaviors. Individuals with a good reputation are more likely to receive help from others. Previous work suggests that indirect reciprocity is only effective when all relevant information is reliable and publicly available. Otherwise, individuals may disagree on how to assess others, even if they all apply the same social norm. Such disagreements can lead to a breakdown of cooperation. Here we explore whether the predominantly studied ‘leading eight’ social norms of indirect reciprocity can be made more robust by equipping them with an element of generosity. To this end, we distinguish between two kinds of generosity. According to assessment generosity, individuals occasionally assign a good reputation to group members who would usually be regarded as bad. According to action generosity, individuals occasionally cooperate with group members with whom they would usually defect. Using individual-based simulations, we show that the two kinds of generosity have a very different effect on the resulting reputation dynamics. Assessment generosity tends to add to the overall noise and allows defectors to invade. In contrast, a limited amount of action generosity can be beneficial in a few cases. However, even when action generosity is beneficial, the respective simulations do not result in full cooperation. Our results suggest that while generosity can favor cooperation when individuals use the most simple strategies of reciprocity, it is disadvantageous when individuals use more complex social norms.}, author = {Schmid, Laura and Shati, Pouya and Hilbe, Christian and Chatterjee, Krishnendu}, issn = {2045-2322}, journal = {Scientific Reports}, keywords = {Multidisciplinary}, number = {1}, publisher = {Springer Nature}, title = {{The evolution of indirect reciprocity under action and assessment generosity}}, doi = {10.1038/s41598-021-96932-1}, volume = {11}, year = {2021}, } @article{9402, abstract = {Direct and indirect reciprocity are key mechanisms for the evolution of cooperation. Direct reciprocity means that individuals use their own experience to decide whether to cooperate with another person. Indirect reciprocity means that they also consider the experiences of others. Although these two mechanisms are intertwined, they are typically studied in isolation. Here, we introduce a mathematical framework that allows us to explore both kinds of reciprocity simultaneously. We show that the well-known ‘generous tit-for-tat’ strategy of direct reciprocity has a natural analogue in indirect reciprocity, which we call ‘generous scoring’. Using an equilibrium analysis, we characterize under which conditions either of the two strategies can maintain cooperation. With simulations, we additionally explore which kind of reciprocity evolves when members of a population engage in social learning to adapt to their environment. Our results draw unexpected connections between direct and indirect reciprocity while highlighting important differences regarding their evolvability.}, author = {Schmid, Laura and Chatterjee, Krishnendu and Hilbe, Christian and Nowak, Martin A.}, issn = {2397-3374}, journal = {Nature Human Behaviour}, number = {10}, pages = {1292–1302}, publisher = {Springer Nature}, title = {{A unified framework of direct and indirect reciprocity}}, doi = {10.1038/s41562-021-01114-8}, volume = {5}, year = {2021}, } @inproceedings{7346, abstract = {The Price of Anarchy (PoA) is a well-established game-theoretic concept to shed light on coordination issues arising in open distributed systems. Leaving agents to selfishly optimize comes with the risk of ending up in sub-optimal states (in terms of performance and/or costs), compared to a centralized system design. However, the PoA relies on strong assumptions about agents' rationality (e.g., resources and information) and interactions, whereas in many distributed systems agents interact locally with bounded resources. They do so repeatedly over time (in contrast to "one-shot games"), and their strategies may evolve. Using a more realistic evolutionary game model, this paper introduces a realized evolutionary Price of Anarchy (ePoA). The ePoA allows an exploration of equilibrium selection in dynamic distributed systems with multiple equilibria, based on local interactions of simple memoryless agents. Considering a fundamental game related to virus propagation on networks, we present analytical bounds on the ePoA in basic network topologies and for different strategy update dynamics. In particular, deriving stationary distributions of the stochastic evolutionary process, we find that the Nash equilibria are not always the most abundant states, and that different processes can feature significant off-equilibrium behavior, leading to a significantly higher ePoA compared to the PoA studied traditionally in the literature. }, author = {Schmid, Laura and Chatterjee, Krishnendu and Schmid, Stefan}, booktitle = {Proceedings of the 23rd International Conference on Principles of Distributed Systems}, location = {Neuchâtel, Switzerland}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{The evolutionary price of anarchy: Locally bounded agents in a dynamic virus game}}, doi = {10.4230/LIPIcs.OPODIS.2019.21}, volume = {153}, year = {2020}, } @inproceedings{8600, abstract = {A vector addition system with states (VASS) consists of a finite set of states and counters. A transition changes the current state to the next state, and every counter is either incremented, or decremented, or left unchanged. A state and value for each counter is a configuration; and a computation is an infinite sequence of configurations with transitions between successive configurations. A probabilistic VASS consists of a VASS along with a probability distribution over the transitions for each state. Qualitative properties such as state and configuration reachability have been widely studied for VASS. In this work we consider multi-dimensional long-run average objectives for VASS and probabilistic VASS. For a counter, the cost of a configuration is the value of the counter; and the long-run average value of a computation for the counter is the long-run average of the costs of the configurations in the computation. The multi-dimensional long-run average problem given a VASS and a threshold value for each counter, asks whether there is a computation such that for each counter the long-run average value for the counter does not exceed the respective threshold. For probabilistic VASS, instead of the existence of a computation, we consider whether the expected long-run average value for each counter does not exceed the respective threshold. Our main results are as follows: we show that the multi-dimensional long-run average problem (a) is NP-complete for integer-valued VASS; (b) is undecidable for natural-valued VASS (i.e., nonnegative counters); and (c) can be solved in polynomial time for probabilistic integer-valued VASS, and probabilistic natural-valued VASS when all computations are non-terminating.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, booktitle = {31st International Conference on Concurrency Theory}, isbn = {9783959771603}, issn = {18688969}, location = {Virtual}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Multi-dimensional long-run average problems for vector addition systems with states}}, doi = {10.4230/LIPIcs.CONCUR.2020.23}, volume = {171}, year = {2020}, } @inproceedings{8533, abstract = {Game of Life is a simple and elegant model to study dynamical system over networks. The model consists of a graph where every vertex has one of two types, namely, dead or alive. A configuration is a mapping of the vertices to the types. An update rule describes how the type of a vertex is updated given the types of its neighbors. In every round, all vertices are updated synchronously, which leads to a configuration update. While in general, Game of Life allows a broad range of update rules, we focus on two simple families of update rules, namely, underpopulation and overpopulation, that model several interesting dynamics studied in the literature. In both settings, a dead vertex requires at least a desired number of live neighbors to become alive. For underpopulation (resp., overpopulation), a live vertex requires at least (resp. at most) a desired number of live neighbors to remain alive. We study the basic computation problems, e.g., configuration reachability, for these two families of rules. For underpopulation rules, we show that these problems can be solved in polynomial time, whereas for overpopulation rules they are PSPACE-complete.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Jecker, Ismael R and Svoboda, Jakub}, booktitle = {45th International Symposium on Mathematical Foundations of Computer Science}, isbn = {9783959771597}, issn = {18688969}, location = {Prague, Czech Republic}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Simplified game of life: Algorithms and complexity}}, doi = {10.4230/LIPIcs.MFCS.2020.22}, volume = {170}, year = {2020}, } @inproceedings{8534, abstract = {A regular language L of finite words is composite if there are regular languages L₁,L₂,…,L_t such that L = ⋂_{i = 1}^t L_i and the index (number of states in a minimal DFA) of every language L_i is strictly smaller than the index of L. Otherwise, L is prime. Primality of regular languages was introduced and studied in [O. Kupferman and J. Mosheiff, 2015], where the complexity of deciding the primality of the language of a given DFA was left open, with a doubly-exponential gap between the upper and lower bounds. We study primality for unary regular languages, namely regular languages with a singleton alphabet. A unary language corresponds to a subset of ℕ, making the study of unary prime languages closer to that of primality in number theory. We show that the setting of languages is richer. In particular, while every composite number is the product of two smaller numbers, the number t of languages necessary to decompose a composite unary language induces a strict hierarchy. In addition, a primality witness for a unary language L, namely a word that is not in L but is in all products of languages that contain L and have an index smaller than L’s, may be of exponential length. Still, we are able to characterize compositionality by structural properties of a DFA for L, leading to a LogSpace algorithm for primality checking of unary DFAs.}, author = {Jecker, Ismael R and Kupferman, Orna and Mazzocchi, Nicolas}, booktitle = {45th International Symposium on Mathematical Foundations of Computer Science}, isbn = {9783959771597}, issn = {18688969}, location = {Prague, Czech Republic}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Unary prime languages}}, doi = {10.4230/LIPIcs.MFCS.2020.51}, volume = {170}, year = {2020}, } @inproceedings{7955, abstract = {Simple stochastic games are turn-based 2½-player games with a reachability objective. The basic question asks whether one player can ensure reaching a given target with at least a given probability. A natural extension is games with a conjunction of such conditions as objective. Despite a plethora of recent results on the analysis of systems with multiple objectives, the decidability of this basic problem remains open. In this paper, we present an algorithm approximating the Pareto frontier of the achievable values to a given precision. Moreover, it is an anytime algorithm, meaning it can be stopped at any time returning the current approximation and its error bound.}, author = {Ashok, Pranav and Chatterjee, Krishnendu and Kretinsky, Jan and Weininger, Maximilian and Winkler, Tobias}, booktitle = {Proceedings of the 35th Annual ACM/IEEE Symposium on Logic in Computer Science }, isbn = {9781450371049}, location = {Saarbrücken, Germany}, pages = {102--115}, publisher = {Association for Computing Machinery}, title = {{Approximating values of generalized-reachability stochastic games}}, doi = {10.1145/3373718.3394761}, year = {2020}, } @article{8767, abstract = {Resources are rarely distributed uniformly within a population. Heterogeneity in the concentration of a drug, the quality of breeding sites, or wealth can all affect evolutionary dynamics. In this study, we represent a collection of properties affecting the fitness at a given location using a color. A green node is rich in resources while a red node is poorer. More colors can represent a broader spectrum of resource qualities. For a population evolving according to the birth-death Moran model, the first question we address is which structures, identified by graph connectivity and graph coloring, are evolutionarily equivalent. We prove that all properly two-colored, undirected, regular graphs are evolutionarily equivalent (where “properly colored” means that no two neighbors have the same color). We then compare the effects of background heterogeneity on properly two-colored graphs to those with alternative schemes in which the colors are permuted. Finally, we discuss dynamic coloring as a model for spatiotemporal resource fluctuations, and we illustrate that random dynamic colorings often diminish the effects of background heterogeneity relative to a proper two-coloring.}, author = {Kaveh, Kamran and McAvoy, Alex and Chatterjee, Krishnendu and Nowak, Martin A.}, issn = {1553-7358}, journal = {PLOS Computational Biology}, keywords = {Ecology, Modelling and Simulation, Computational Theory and Mathematics, Genetics, Ecology, Evolution, Behavior and Systematics, Molecular Biology, Cellular and Molecular Neuroscience}, number = {11}, publisher = {Public Library of Science}, title = {{The Moran process on 2-chromatic graphs}}, doi = {10.1371/journal.pcbi.1008402}, volume = {16}, year = {2020}, } @article{8789, abstract = {Cooperation is a ubiquitous and beneficial behavioural trait despite being prone to exploitation by free-riders. Hence, cooperative populations are prone to invasions by selfish individuals. However, a population consisting of only free-riders typically does not survive. Thus, cooperators and free-riders often coexist in some proportion. An evolutionary version of a Snowdrift Game proved its efficiency in analysing this phenomenon. However, what if the system has already reached its stable state but was perturbed due to a change in environmental conditions? Then, individuals may have to re-learn their effective strategies. To address this, we consider behavioural mistakes in strategic choice execution, which we refer to as incompetence. Parametrising the propensity to make such mistakes allows for a mathematical description of learning. We compare strategies based on their relative strategic advantage relying on both fitness and learning factors. When strategies are learned at distinct rates, allowing learning according to a prescribed order is optimal. Interestingly, the strategy with the lowest strategic advantage should be learnt first if we are to optimise fitness over the learning path. Then, the differences between strategies are balanced out in order to minimise the effect of behavioural uncertainty.}, author = {Kleshnina, Maria and Streipert, Sabrina and Filar, Jerzy and Chatterjee, Krishnendu}, issn = {22277390}, journal = {Mathematics}, number = {11}, publisher = {MDPI}, title = {{Prioritised learning in snowdrift-type games}}, doi = {10.3390/math8111945}, volume = {8}, year = {2020}, } @article{8788, abstract = {We consider a real-time setting where an environment releases sequences of firm-deadline tasks, and an online scheduler chooses on-the-fly the ones to execute on a single processor so as to maximize cumulated utility. The competitive ratio is a well-known performance measure for the scheduler: it gives the worst-case ratio, among all possible choices for the environment, of the cumulated utility of the online scheduler versus an offline scheduler that knows these choices in advance. Traditionally, competitive analysis is performed by hand, while automated techniques are rare and only handle static environments with independent tasks. We present a quantitative-verification framework for precedence-aware competitive analysis, where task releases may depend on preceding scheduling choices, i.e., the environment can respond to scheduling decisions dynamically . We consider two general classes of precedences: 1) follower precedences force the release of a dependent task upon the completion of a set of precursor tasks, while and 2) pairing precedences modify the characteristics of a dependent task provided the completion of a set of precursor tasks. Precedences make competitive analysis challenging, as the online and offline schedulers operate on diverging sequences. We make a formal presentation of our framework, and use a GPU-based implementation to analyze ten well-known schedulers on precedence-based application examples taken from the existing literature: 1) a handshake protocol (HP); 2) network packet-switching; 3) query scheduling (QS); and 4) a sporadic-interrupt setting. Our experimental results show that precedences and task parameters can vary drastically the best scheduler. Our framework thus supports application designers in choosing the best scheduler among a given set automatically.}, author = {Pavlogiannis, Andreas and Schaumberger, Nico and Schmid, Ulrich and Chatterjee, Krishnendu}, issn = {19374151}, journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems}, number = {11}, pages = {3981--3992}, publisher = {IEEE}, title = {{Precedence-aware automated competitive analysis of real-time scheduling}}, doi = {10.1109/TCAD.2020.3012803}, volume = {39}, year = {2020}, } @article{9197, abstract = {In this paper we introduce and study all-pay bidding games, a class of two player, zero-sum games on graphs. The game proceeds as follows. We place a token on some vertex in the graph and assign budgets to the two players. Each turn, each player submits a sealed legal bid (non-negative and below their remaining budget), which is deducted from their budget and the highest bidder moves the token onto an adjacent vertex. The game ends once a sink is reached, and Player 1 pays Player 2 the outcome that is associated with the sink. The players attempt to maximize their expected outcome. Our games model settings where effort (of no inherent value) needs to be invested in an ongoing and stateful manner. On the negative side, we show that even in simple games on DAGs, optimal strategies may require a distribution over bids with infinite support. A central quantity in bidding games is the ratio of the players budgets. On the positive side, we show a simple FPTAS for DAGs, that, for each budget ratio, outputs an approximation for the optimal strategy for that ratio. We also implement it, show that it performs well, and suggests interesting properties of these games. Then, given an outcome c, we show an algorithm for finding the necessary and sufficient initial ratio for guaranteeing outcome c with probability 1 and a strategy ensuring such. Finally, while the general case has not previously been studied, solving the specific game in which Player 1 wins iff he wins the first two auctions, has been long stated as an open question, which we solve.}, author = {Avni, Guy and Ibsen-Jensen, Rasmus and Tkadlec, Josef}, isbn = {9781577358350}, issn = {2374-3468}, journal = {Proceedings of the AAAI Conference on Artificial Intelligence}, location = {New York, NY, United States}, number = {02}, pages = {1798--1805}, publisher = {Association for the Advancement of Artificial Intelligence}, title = {{All-pay bidding games on graphs}}, doi = {10.1609/aaai.v34i02.5546}, volume = {34}, year = {2020}, } @article{7343, abstract = {Coinfections with multiple pathogens can result in complex within‐host dynamics affecting virulence and transmission. While multiple infections are intensively studied in solitary hosts, it is so far unresolved how social host interactions interfere with pathogen competition, and if this depends on coinfection diversity. We studied how the collective disease defences of ants – their social immunity – influence pathogen competition in coinfections of same or different fungal pathogen species. Social immunity reduced virulence for all pathogen combinations, but interfered with spore production only in different‐species coinfections. Here, it decreased overall pathogen sporulation success while increasing co‐sporulation on individual cadavers and maintaining a higher pathogen diversity at the community level. Mathematical modelling revealed that host sanitary care alone can modulate competitive outcomes between pathogens, giving advantage to fast‐germinating, thus less grooming‐sensitive ones. Host social interactions can hence modulate infection dynamics in coinfected group members, thereby altering pathogen communities at the host level and population level.}, author = {Milutinovic, Barbara and Stock, Miriam and Grasse, Anna V and Naderlinger, Elisabeth and Hilbe, Christian and Cremer, Sylvia}, issn = {1461-0248}, journal = {Ecology Letters}, number = {3}, pages = {565--574}, publisher = {Wiley}, title = {{Social immunity modulates competition between coinfecting pathogens}}, doi = {10.1111/ele.13458}, volume = {23}, year = {2020}, } @misc{13060, abstract = {Coinfections with multiple pathogens can result in complex within-host dynamics affecting virulence and transmission. Whilst multiple infections are intensively studied in solitary hosts, it is so far unresolved how social host interactions interfere with pathogen competition, and if this depends on coinfection diversity. We studied how the collective disease defenses of ants – their social immunity ­– influence pathogen competition in coinfections of same or different fungal pathogen species. Social immunity reduced virulence for all pathogen combinations, but interfered with spore production only in different-species coinfections. Here, it decreased overall pathogen sporulation success, whilst simultaneously increasing co-sporulation on individual cadavers and maintaining a higher pathogen diversity at the community-level. Mathematical modeling revealed that host sanitary care alone can modulate competitive outcomes between pathogens, giving advantage to fast-germinating, thus less grooming-sensitive ones. Host social interactions can hence modulate infection dynamics in coinfected group members, thereby altering pathogen communities at the host- and population-level.}, author = {Milutinovic, Barbara and Stock, Miriam and Grasse, Anna V and Naderlinger, Elisabeth and Hilbe, Christian and Cremer, Sylvia}, publisher = {Dryad}, title = {{Social immunity modulates competition between coinfecting pathogens}}, doi = {10.5061/DRYAD.CRJDFN318}, year = {2020}, } @inproceedings{8193, abstract = {Multiple-environment Markov decision processes (MEMDPs) are MDPs equipped with not one, but multiple probabilistic transition functions, which represent the various possible unknown environments. While the previous research on MEMDPs focused on theoretical properties for long-run average payoff, we study them with discounted-sum payoff and focus on their practical advantages and applications. MEMDPs can be viewed as a special case of Partially observable and Mixed observability MDPs: the state of the system is perfectly observable, but not the environment. We show that the specific structure of MEMDPs allows for more efficient algorithmic analysis, in particular for faster belief updates. We demonstrate the applicability of MEMDPs in several domains. In particular, we formalize the sequential decision-making approach to contextual recommendation systems as MEMDPs and substantially improve over the previous MDP approach.}, author = {Chatterjee, Krishnendu and Chmelik, Martin and Karkhanis, Deep and Novotný, Petr and Royer, Amélie}, booktitle = {Proceedings of the 30th International Conference on Automated Planning and Scheduling}, issn = {23340843}, location = {Nancy, France}, pages = {48--56}, publisher = {Association for the Advancement of Artificial Intelligence}, title = {{Multiple-environment Markov decision processes: Efficient analysis and applications}}, volume = {30}, year = {2020}, } @inproceedings{8272, abstract = {We study turn-based stochastic zero-sum games with lexicographic preferences over reachability and safety objectives. Stochastic games are standard models in control, verification, and synthesis of stochastic reactive systems that exhibit both randomness as well as angelic and demonic non-determinism. Lexicographic order allows to consider multiple objectives with a strict preference order over the satisfaction of the objectives. To the best of our knowledge, stochastic games with lexicographic objectives have not been studied before. We establish determinacy of such games and present strategy and computational complexity results. For strategy complexity, we show that lexicographically optimal strategies exist that are deterministic and memory is only required to remember the already satisfied and violated objectives. For a constant number of objectives, we show that the relevant decision problem is in NP∩coNP , matching the current known bound for single objectives; and in general the decision problem is PSPACE -hard and can be solved in NEXPTIME∩coNEXPTIME . We present an algorithm that computes the lexicographically optimal strategies via a reduction to computation of optimal strategies in a sequence of single-objectives games. We have implemented our algorithm and report experimental results on various case studies.}, author = {Chatterjee, Krishnendu and Katoen, Joost P and Weininger, Maximilian and Winkler, Tobias}, booktitle = {International Conference on Computer Aided Verification}, isbn = {9783030532901}, issn = {16113349}, pages = {398--420}, publisher = {Springer Nature}, title = {{Stochastic games with lexicographic reachability-safety objectives}}, doi = {10.1007/978-3-030-53291-8_21}, volume = {12225}, year = {2020}, } @article{8671, abstract = {We study relations between evidence theory and S-approximation spaces. Both theories have their roots in the analysis of Dempsterchr('39')s multivalued mappings and lower and upper probabilities, and have close relations to rough sets. We show that an S-approximation space, satisfying a monotonicity condition, can induce a natural belief structure which is a fundamental block in evidence theory. We also demonstrate that one can induce a natural belief structure on one set, given a belief structure on another set, if the two sets are related by a partial monotone S-approximation space. }, author = {Shakiba, A. and Goharshady, Amir Kafshdar and Hooshmandasl, M.R. and Alambardar Meybodi, M.}, issn = {2008-9473}, journal = {Iranian Journal of Mathematical Sciences and Informatics}, number = {2}, pages = {117--128}, publisher = {Iranian Academic Center for Education, Culture and Research}, title = {{A note on belief structures and s-approximation spaces}}, doi = {10.29252/ijmsi.15.2.117}, volume = {15}, year = {2020}, } @article{7212, abstract = {The fixation probability of a single mutant invading a population of residents is among the most widely-studied quantities in evolutionary dynamics. Amplifiers of natural selection are population structures that increase the fixation probability of advantageous mutants, compared to well-mixed populations. Extensive studies have shown that many amplifiers exist for the Birth-death Moran process, some of them substantially increasing the fixation probability or even guaranteeing fixation in the limit of large population size. On the other hand, no amplifiers are known for the death-Birth Moran process, and computer-assisted exhaustive searches have failed to discover amplification. In this work we resolve this disparity, by showing that any amplification under death-Birth updating is necessarily bounded and transient. Our boundedness result states that even if a population structure does amplify selection, the resulting fixation probability is close to that of the well-mixed population. Our transience result states that for any population structure there exists a threshold r⋆ such that the population structure ceases to amplify selection if the mutant fitness advantage r is larger than r⋆. Finally, we also extend the above results to δ-death-Birth updating, which is a combination of Birth-death and death-Birth updating. On the positive side, we identify population structures that maintain amplification for a wide range of values r and δ. These results demonstrate that amplification of natural selection depends on the specific mechanisms of the evolutionary process.}, author = {Tkadlec, Josef and Pavlogiannis, Andreas and Chatterjee, Krishnendu and Nowak, Martin A.}, issn = {15537358}, journal = {PLoS computational biology}, publisher = {Public Library of Science}, title = {{Limits on amplifiers of natural selection under death-Birth updating}}, doi = {10.1371/journal.pcbi.1007494}, volume = {16}, year = {2020}, } @phdthesis{7196, abstract = {In this thesis we study certain mathematical aspects of evolution. The two primary forces that drive an evolutionary process are mutation and selection. Mutation generates new variants in a population. Selection chooses among the variants depending on the reproductive rates of individuals. Evolutionary processes are intrinsically random – a new mutation that is initially present in the population at low frequency can go extinct, even if it confers a reproductive advantage. The overall rate of evolution is largely determined by two quantities: the probability that an invading advantageous mutation spreads through the population (called fixation probability) and the time until it does so (called fixation time). Both those quantities crucially depend not only on the strength of the invading mutation but also on the population structure. In this thesis, we aim to understand how the underlying population structure affects the overall rate of evolution. Specifically, we study population structures that increase the fixation probability of advantageous mutants (called amplifiers of selection). Broadly speaking, our results are of three different types: We present various strong amplifiers, we identify regimes under which only limited amplification is feasible, and we propose population structures that provide different tradeoffs between high fixation probability and short fixation time.}, author = {Tkadlec, Josef}, issn = {2663-337X}, pages = {144}, publisher = {Institute of Science and Technology Austria}, title = {{A role of graphs in evolutionary processes}}, doi = {10.15479/AT:ISTA:7196}, year = {2020}, } @misc{9814, abstract = {Data and mathematica notebooks for plotting figures from Language learning with communication between learners}, author = {Ibsen-Jensen, Rasmus and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin}, publisher = {Royal Society}, title = {{Data and mathematica notebooks for plotting figures from language learning with communication between learners from language acquisition with communication between learners}}, doi = {10.6084/m9.figshare.5973013.v1}, year = {2020}, } @inproceedings{8324, abstract = {The notion of program sensitivity (aka Lipschitz continuity) specifies that changes in the program input result in proportional changes to the program output. For probabilistic programs the notion is naturally extended to expected sensitivity. A previous approach develops a relational program logic framework for proving expected sensitivity of probabilistic while loops, where the number of iterations is fixed and bounded. In this work, we consider probabilistic while loops where the number of iterations is not fixed, but randomized and depends on the initial input values. We present a sound approach for proving expected sensitivity of such programs. Our sound approach is martingale-based and can be automated through existing martingale-synthesis algorithms. Furthermore, our approach is compositional for sequential composition of while loops under a mild side condition. We demonstrate the effectiveness of our approach on several classical examples from Gambler's Ruin, stochastic hybrid systems and stochastic gradient descent. We also present experimental results showing that our automated approach can handle various probabilistic programs in the literature.}, author = {Wang, Peixin and Fu, Hongfei and Chatterjee, Krishnendu and Deng, Yuxin and Xu, Ming}, booktitle = {Proceedings of the ACM on Programming Languages}, issn = {2475-1421}, number = {POPL}, publisher = {ACM}, title = {{Proving expected sensitivity of probabilistic programs with randomized variable-dependent termination time}}, doi = {10.1145/3371093}, volume = {4}, year = {2020}, } @article{15055, abstract = {Markov decision processes (MDPs) are the defacto framework for sequential decision making in the presence of stochastic uncertainty. A classical optimization criterion for MDPs is to maximize the expected discounted-sum payoff, which ignores low probability catastrophic events with highly negative impact on the system. On the other hand, risk-averse policies require the probability of undesirable events to be below a given threshold, but they do not account for optimization of the expected payoff. We consider MDPs with discounted-sum payoff with failure states which represent catastrophic outcomes. The objective of risk-constrained planning is to maximize the expected discounted-sum payoff among risk-averse policies that ensure the probability to encounter a failure state is below a desired threshold. Our main contribution is an efficient risk-constrained planning algorithm that combines UCT-like search with a predictor learned through interaction with the MDP (in the style of AlphaZero) and with a risk-constrained action selection via linear programming. We demonstrate the effectiveness of our approach with experiments on classical MDPs from the literature, including benchmarks with an order of 106 states.}, author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Novotný, Petr and Vahala, Jiří}, issn = {2374-3468}, journal = {Proceedings of the 34th AAAI Conference on Artificial Intelligence}, keywords = {General Medicine}, location = {New York, NY, United States}, number = {06}, pages = {9794--9801}, publisher = {Association for the Advancement of Artificial Intelligence}, title = {{Reinforcement learning of risk-constrained policies in Markov decision processes}}, doi = {10.1609/aaai.v34i06.6531}, volume = {34}, year = {2020}, } @inproceedings{15082, abstract = {Two plane drawings of geometric graphs on the same set of points are called disjoint compatible if their union is plane and they do not have an edge in common. For a given set S of 2n points two plane drawings of perfect matchings M1 and M2 (which do not need to be disjoint nor compatible) are disjoint tree-compatible if there exists a plane drawing of a spanning tree T on S which is disjoint compatible to both M1 and M2. We show that the graph of all disjoint tree-compatible perfect geometric matchings on 2n points in convex position is connected if and only if 2n ≥ 10. Moreover, in that case the diameter of this graph is either 4 or 5, independent of n.}, author = {Aichholzer, Oswin and Obmann, Julia and Patak, Pavel and Perz, Daniel and Tkadlec, Josef}, booktitle = {36th European Workshop on Computational Geometry}, location = {Würzburg, Germany, Virtual}, title = {{Disjoint tree-compatible plane perfect matchings}}, year = {2020}, } @inproceedings{7810, abstract = {Interprocedural data-flow analyses form an expressive and useful paradigm of numerous static analysis applications, such as live variables analysis, alias analysis and null pointers analysis. The most widely-used framework for interprocedural data-flow analysis is IFDS, which encompasses distributive data-flow functions over a finite domain. On-demand data-flow analyses restrict the focus of the analysis on specific program locations and data facts. This setting provides a natural split between (i) an offline (or preprocessing) phase, where the program is partially analyzed and analysis summaries are created, and (ii) an online (or query) phase, where analysis queries arrive on demand and the summaries are used to speed up answering queries. In this work, we consider on-demand IFDS analyses where the queries concern program locations of the same procedure (aka same-context queries). We exploit the fact that flow graphs of programs have low treewidth to develop faster algorithms that are space and time optimal for many common data-flow analyses, in both the preprocessing and the query phase. We also use treewidth to develop query solutions that are embarrassingly parallelizable, i.e. the total work for answering each query is split to a number of threads such that each thread performs only a constant amount of work. Finally, we implement a static analyzer based on our algorithms, and perform a series of on-demand analysis experiments on standard benchmarks. Our experimental results show a drastic speed-up of the queries after only a lightweight preprocessing phase, which significantly outperforms existing techniques.}, author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas}, booktitle = {European Symposium on Programming}, isbn = {9783030449131}, issn = {16113349}, location = {Dublin, Ireland}, pages = {112--140}, publisher = {Springer Nature}, title = {{Optimal and perfectly parallel algorithms for on-demand data-flow analysis}}, doi = {10.1007/978-3-030-44914-8_5}, volume = {12075}, year = {2020}, } @inproceedings{8728, abstract = {Discrete-time Markov Chains (MCs) and Markov Decision Processes (MDPs) are two standard formalisms in system analysis. Their main associated quantitative objectives are hitting probabilities, discounted sum, and mean payoff. Although there are many techniques for computing these objectives in general MCs/MDPs, they have not been thoroughly studied in terms of parameterized algorithms, particularly when treewidth is used as the parameter. This is in sharp contrast to qualitative objectives for MCs, MDPs and graph games, for which treewidth-based algorithms yield significant complexity improvements. In this work, we show that treewidth can also be used to obtain faster algorithms for the quantitative problems. For an MC with n states and m transitions, we show that each of the classical quantitative objectives can be computed in O((n+m)⋅t2) time, given a tree decomposition of the MC with width t. Our results also imply a bound of O(κ⋅(n+m)⋅t2) for each objective on MDPs, where κ is the number of strategy-iteration refinements required for the given input and objective. Finally, we make an experimental evaluation of our new algorithms on low-treewidth MCs and MDPs obtained from the DaCapo benchmark suite. Our experiments show that on low-treewidth MCs and MDPs, our algorithms outperform existing well-established methods by one or more orders of magnitude.}, author = {Asadi, Ali and Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Mohammadi, Kiarash and Pavlogiannis, Andreas}, booktitle = {Automated Technology for Verification and Analysis}, isbn = {9783030591519}, issn = {1611-3349}, location = {Hanoi, Vietnam}, pages = {253--270}, publisher = {Springer Nature}, title = {{Faster algorithms for quantitative analysis of MCs and MDPs with small treewidth}}, doi = {10.1007/978-3-030-59152-6_14}, volume = {12302}, year = {2020}, } @inproceedings{8089, abstract = {We consider the classical problem of invariant generation for programs with polynomial assignments and focus on synthesizing invariants that are a conjunction of strict polynomial inequalities. We present a sound and semi-complete method based on positivstellensaetze, i.e. theorems in semi-algebraic geometry that characterize positive polynomials over a semi-algebraic set. On the theoretical side, the worst-case complexity of our approach is subexponential, whereas the worst-case complexity of the previous complete method (Kapur, ACA 2004) is doubly-exponential. Even when restricted to linear invariants, the best previous complexity for complete invariant generation is exponential (Colon et al, CAV 2003). On the practical side, we reduce the invariant generation problem to quadratic programming (QCLP), which is a classical optimization problem with many industrial solvers. We demonstrate the applicability of our approach by providing experimental results on several academic benchmarks. To the best of our knowledge, the only previous invariant generation method that provides completeness guarantees for invariants consisting of polynomial inequalities is (Kapur, ACA 2004), which relies on quantifier elimination and cannot even handle toy programs such as our running example.}, author = {Chatterjee, Krishnendu and Fu, Hongfei and Goharshady, Amir Kafshdar and Goharshady, Ehsan Kafshdar}, booktitle = {Proceedings of the 41st ACM SIGPLAN Conference on Programming Language Design and Implementation}, isbn = {9781450376136}, location = {London, United Kingdom}, pages = {672--687}, publisher = {Association for Computing Machinery}, title = {{Polynomial invariant generation for non-deterministic recursive programs}}, doi = {10.1145/3385412.3385969}, year = {2020}, } @article{6918, abstract = {We consider the classic problem of Network Reliability. A network is given together with a source vertex, one or more target vertices, and probabilities assigned to each of the edges. Each edge of the network is operable with its associated probability and the problem is to determine the probability of having at least one source-to-target path that is entirely composed of operable edges. This problem is known to be NP-hard. We provide a novel scalable algorithm to solve the Network Reliability problem when the treewidth of the underlying network is small. We also show our algorithm’s applicability for real-world transit networks that have small treewidth, including the metro networks of major cities, such as London and Tokyo. Our algorithm leverages tree decompositions to shrink the original graph into much smaller graphs, for which reliability can be efficiently and exactly computed using a brute force method. To the best of our knowledge, this is the first exact algorithm for Network Reliability that can scale to handle real-world instances of the problem.}, author = {Goharshady, Amir Kafshdar and Mohammadi, Fatemeh}, issn = {09518320}, journal = {Reliability Engineering and System Safety}, publisher = {Elsevier}, title = {{An efficient algorithm for computing network reliability in small treewidth}}, doi = {10.1016/j.ress.2019.106665}, volume = {193}, year = {2020}, } @inproceedings{6887, abstract = {The fundamental model-checking problem, given as input a model and a specification, asks for the algorithmic verification of whether the model satisfies the specification. Two classical models for reactive systems are graphs and Markov decision processes (MDPs). A basic specification formalism in the verification of reactive systems is the strong fairness (aka Streett) objective, where given different types of requests and corresponding grants, the requirement is that for each type, if the request event happens infinitely often, then the corresponding grant event must also happen infinitely often. All omega-regular objectives can be expressed as Streett objectives and hence they are canonical in verification. Consider graphs/MDPs with n vertices, m edges, and a Streett objectives with k pairs, and let b denote the size of the description of the Streett objective for the sets of requests and grants. The current best-known algorithm for the problem requires time O(min(n^2, m sqrt{m log n}) + b log n). In this work we present randomized near-linear time algorithms, with expected running time O~(m + b), where the O~ notation hides poly-log factors. Our randomized algorithms are near-linear in the size of the input, and hence optimal up to poly-log factors. }, author = {Chatterjee, Krishnendu and Dvorák, Wolfgang and Henzinger, Monika H and Svozil, Alexander}, booktitle = {Leibniz International Proceedings in Informatics}, location = {Amsterdam, Netherlands}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Near-linear time algorithms for Streett objectives in graphs and MDPs}}, doi = {10.4230/LIPICS.CONCUR.2019.7}, volume = {140}, year = {2019}, } @inproceedings{6885, abstract = {A vector addition system with states (VASS) consists of a finite set of states and counters. A configuration is a state and a value for each counter; a transition changes the state and each counter is incremented, decremented, or left unchanged. While qualitative properties such as state and configuration reachability have been studied for VASS, we consider the long-run average cost of infinite computations of VASS. The cost of a configuration is for each state, a linear combination of the counter values. In the special case of uniform cost functions, the linear combination is the same for all states. The (regular) long-run emptiness problem is, given a VASS, a cost function, and a threshold value, if there is a (lasso-shaped) computation such that the long-run average value of the cost function does not exceed the threshold. For uniform cost functions, we show that the regular long-run emptiness problem is (a) decidable in polynomial time for integer-valued VASS, and (b) decidable but nonelementarily hard for natural-valued VASS (i.e., nonnegative counters). For general cost functions, we show that the problem is (c) NP-complete for integer-valued VASS, and (d) undecidable for natural-valued VASS. Our most interesting result is for (c) integer-valued VASS with general cost functions, where we establish a connection between the regular long-run emptiness problem and quadratic Diophantine inequalities. The general (nonregular) long-run emptiness problem is equally hard as the regular problem in all cases except (c), where it remains open. }, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, location = {Amsterdam, Netherlands}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Long-run average behavior of vector addition systems with states}}, doi = {10.4230/LIPICS.CONCUR.2019.27}, volume = {140}, year = {2019}, } @inproceedings{6889, abstract = {We study Markov decision processes and turn-based stochastic games with parity conditions. There are three qualitative winning criteria, namely, sure winning, which requires all paths to satisfy the condition, almost-sure winning, which requires the condition to be satisfied with probability 1, and limit-sure winning, which requires the condition to be satisfied with probability arbitrarily close to 1. We study the combination of two of these criteria for parity conditions, e.g., there are two parity conditions one of which must be won surely, and the other almost-surely. The problem has been studied recently by Berthon et al. for MDPs with combination of sure and almost-sure winning, under infinite-memory strategies, and the problem has been established to be in NP cap co-NP. Even in MDPs there is a difference between finite-memory and infinite-memory strategies. Our main results for combination of sure and almost-sure winning are as follows: (a) we show that for MDPs with finite-memory strategies the problem is in NP cap co-NP; (b) we show that for turn-based stochastic games the problem is co-NP-complete, both for finite-memory and infinite-memory strategies; and (c) we present algorithmic results for the finite-memory case, both for MDPs and turn-based stochastic games, by reduction to non-stochastic parity games. In addition we show that all the above complexity results also carry over to combination of sure and limit-sure winning, and results for all other combinations can be derived from existing results in the literature. Thus we present a complete picture for the study of combinations of two qualitative winning criteria for parity conditions in MDPs and turn-based stochastic games. }, author = {Chatterjee, Krishnendu and Piterman, Nir}, location = {Amsterdam, Netherlands}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Combinations of Qualitative Winning for Stochastic Parity Games}}, doi = {10.4230/LIPICS.CONCUR.2019.6}, volume = {140}, year = {2019}, } @inproceedings{6884, abstract = {In two-player games on graphs, the players move a token through a graph to produce a finite or infinite path, which determines the qualitative winner or quantitative payoff of the game. We study bidding games in which the players bid for the right to move the token. Several bidding rules were studied previously. In Richman bidding, in each round, the players simultaneously submit bids, and the higher bidder moves the token and pays the other player. Poorman bidding is similar except that the winner of the bidding pays the "bank" rather than the other player. Taxman bidding spans the spectrum between Richman and poorman bidding. They are parameterized by a constant tau in [0,1]: portion tau of the winning bid is paid to the other player, and portion 1-tau to the bank. While finite-duration (reachability) taxman games have been studied before, we present, for the first time, results on infinite-duration taxman games. It was previously shown that both Richman and poorman infinite-duration games with qualitative objectives reduce to reachability games, and we show a similar result here. Our most interesting results concern quantitative taxman games, namely mean-payoff games, where poorman and Richman bidding differ significantly. A central quantity in these games is the ratio between the two players' initial budgets. While in poorman mean-payoff games, the optimal payoff of a player depends on the initial ratio, in Richman bidding, the payoff depends only on the structure of the game. In both games the optimal payoffs can be found using (different) probabilistic connections with random-turn games in which in each turn, instead of bidding, a coin is tossed to determine which player moves. While the value with Richman bidding equals the value of a random-turn game with an un-biased coin, with poorman bidding, the bias in the coin is the initial ratio of the budgets. We give a complete classification of mean-payoff taxman games that is based on a probabilistic connection: the value of a taxman bidding game with parameter tau and initial ratio r, equals the value of a random-turn game that uses a coin with bias F(tau, r) = (r+tau * (1-r))/(1+tau). Thus, we show that Richman bidding is the exception; namely, for every tau <1, the value of the game depends on the initial ratio. Our proof technique simplifies and unifies the previous proof techniques for both Richman and poorman bidding. }, author = {Avni, Guy and Henzinger, Thomas A and Zikelic, Dorde}, location = {Aachen, Germany}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Bidding mechanisms in graph games}}, doi = {10.4230/LIPICS.MFCS.2019.11}, volume = {138}, year = {2019}, } @inproceedings{5948, abstract = {We study the termination problem for nondeterministic probabilistic programs. We consider the bounded termination problem that asks whether the supremum of the expected termination time over all schedulers is bounded. First, we show that ranking supermartingales (RSMs) are both sound and complete for proving bounded termination over nondeterministic probabilistic programs. For nondeterministic probabilistic programs a previous result claimed that RSMs are not complete for bounded termination, whereas our result corrects the previous flaw and establishes completeness with a rigorous proof. Second, we present the first sound approach to establish lower bounds on expected termination time through RSMs.}, author = {Fu, Hongfei and Chatterjee, Krishnendu}, booktitle = {International Conference on Verification, Model Checking, and Abstract Interpretation}, location = {Cascais, Portugal}, pages = {468--490}, publisher = {Springer Nature}, title = {{Termination of nondeterministic probabilistic programs}}, doi = {10.1007/978-3-030-11245-5_22}, volume = {11388}, year = {2019}, } @inproceedings{6462, abstract = {A controller is a device that interacts with a plant. At each time point,it reads the plant’s state and issues commands with the goal that the plant oper-ates optimally. Constructing optimal controllers is a fundamental and challengingproblem. Machine learning techniques have recently been successfully applied totrain controllers, yet they have limitations. Learned controllers are monolithic andhard to reason about. In particular, it is difficult to add features without retraining,to guarantee any level of performance, and to achieve acceptable performancewhen encountering untrained scenarios. These limitations can be addressed bydeploying quantitative run-timeshieldsthat serve as a proxy for the controller.At each time point, the shield reads the command issued by the controller andmay choose to alter it before passing it on to the plant. We show how optimalshields that interfere as little as possible while guaranteeing a desired level ofcontroller performance, can be generated systematically and automatically usingreactive synthesis. First, we abstract the plant by building a stochastic model.Second, we consider the learned controller to be a black box. Third, we mea-surecontroller performanceandshield interferenceby two quantitative run-timemeasures that are formally defined using weighted automata. Then, the problemof constructing a shield that guarantees maximal performance with minimal inter-ference is the problem of finding an optimal strategy in a stochastic2-player game“controller versus shield” played on the abstract state space of the plant with aquantitative objective obtained from combining the performance and interferencemeasures. We illustrate the effectiveness of our approach by automatically con-structing lightweight shields for learned traffic-light controllers in various roadnetworks. The shields we generate avoid liveness bugs, improve controller per-formance in untrained and changing traffic situations, and add features to learnedcontrollers, such as giving priority to emergency vehicles.}, author = {Avni, Guy and Bloem, Roderick and Chatterjee, Krishnendu and Henzinger, Thomas A and Konighofer, Bettina and Pranger, Stefan}, booktitle = {31st International Conference on Computer-Aided Verification}, isbn = {9783030255398}, issn = {0302-9743}, location = {New York, NY, United States}, pages = {630--649}, publisher = {Springer}, title = {{Run-time optimization for learned controllers through quantitative games}}, doi = {10.1007/978-3-030-25540-4_36}, volume = {11561}, year = {2019}, } @article{6836, abstract = {Direct reciprocity is a powerful mechanism for the evolution of cooperation on the basis of repeated interactions1,2,3,4. It requires that interacting individuals are sufficiently equal, such that everyone faces similar consequences when they cooperate or defect. Yet inequality is ubiquitous among humans5,6 and is generally considered to undermine cooperation and welfare7,8,9,10. Most previous models of reciprocity do not include inequality11,12,13,14,15. These models assume that individuals are the same in all relevant aspects. Here we introduce a general framework to study direct reciprocity among unequal individuals. Our model allows for multiple sources of inequality. Subjects can differ in their endowments, their productivities and in how much they benefit from public goods. We find that extreme inequality prevents cooperation. But if subjects differ in productivity, some endowment inequality can be necessary for cooperation to prevail. Our mathematical predictions are supported by a behavioural experiment in which we vary the endowments and productivities of the subjects. We observe that overall welfare is maximized when the two sources of heterogeneity are aligned, such that more productive individuals receive higher endowments. By contrast, when endowments and productivities are misaligned, cooperation quickly breaks down. Our findings have implications for policy-makers concerned with equity, efficiency and the provisioning of public goods.}, author = {Hauser, Oliver P. and Hilbe, Christian and Chatterjee, Krishnendu and Nowak, Martin A.}, issn = {14764687}, journal = {Nature}, number = {7770}, pages = {524--527}, publisher = {Springer Nature}, title = {{Social dilemmas among unequals}}, doi = {10.1038/s41586-019-1488-5}, volume = {572}, year = {2019}, } @inproceedings{6942, abstract = {Graph games and Markov decision processes (MDPs) are standard models in reactive synthesis and verification of probabilistic systems with nondeterminism. The class of 𝜔 -regular winning conditions; e.g., safety, reachability, liveness, parity conditions; provides a robust and expressive specification formalism for properties that arise in analysis of reactive systems. The resolutions of nondeterminism in games and MDPs are represented as strategies, and we consider succinct representation of such strategies. The decision-tree data structure from machine learning retains the flavor of decisions of strategies and allows entropy-based minimization to obtain succinct trees. However, in contrast to traditional machine-learning problems where small errors are allowed, for winning strategies in graph games and MDPs no error is allowed, and the decision tree must represent the entire strategy. In this work we propose decision trees with linear classifiers for representation of strategies in graph games and MDPs. We have implemented strategy representation using this data structure and we present experimental results for problems on graph games and MDPs, which show that this new data structure presents a much more efficient strategy representation as compared to standard decision trees.}, author = {Ashok, Pranav and Brázdil, Tomáš and Chatterjee, Krishnendu and Křetínský, Jan and Lampert, Christoph and Toman, Viktor}, booktitle = {16th International Conference on Quantitative Evaluation of Systems}, isbn = {9783030302801}, issn = {0302-9743}, location = {Glasgow, United Kingdom}, pages = {109--128}, publisher = {Springer Nature}, title = {{Strategy representation by decision trees with linear classifiers}}, doi = {10.1007/978-3-030-30281-8_7}, volume = {11785}, year = {2019}, } @inproceedings{7183, abstract = {A probabilistic vector addition system with states (pVASS) is a finite state Markov process augmented with non-negative integer counters that can be incremented or decremented during each state transition, blocking any behaviour that would cause a counter to decrease below zero. The pVASS can be used as abstractions of probabilistic programs with many decidable properties. The use of pVASS as abstractions requires the presence of nondeterminism in the model. In this paper, we develop techniques for checking fast termination of pVASS with nondeterminism. That is, for every initial configuration of size n, we consider the worst expected number of transitions needed to reach a configuration with some counter negative (the expected termination time). We show that the problem whether the asymptotic expected termination time is linear is decidable in polynomial time for a certain natural class of pVASS with nondeterminism. Furthermore, we show the following dichotomy: if the asymptotic expected termination time is not linear, then it is at least quadratic, i.e., in Ω(n2).}, author = {Brázdil, Tomás and Chatterjee, Krishnendu and Kucera, Antonín and Novotný, Petr and Velan, Dominik}, booktitle = {International Symposium on Automated Technology for Verification and Analysis}, isbn = {9783030317836}, issn = {16113349}, location = {Taipei, Taiwan}, pages = {462--478}, publisher = {Springer Nature}, title = {{Deciding fast termination for probabilistic VASS with nondeterminism}}, doi = {10.1007/978-3-030-31784-3_27}, volume = {11781}, year = {2019}, } @article{7210, abstract = {The rate of biological evolution depends on the fixation probability and on the fixation time of new mutants. Intensive research has focused on identifying population structures that augment the fixation probability of advantageous mutants. But these amplifiers of natural selection typically increase fixation time. Here we study population structures that achieve a tradeoff between fixation probability and time. First, we show that no amplifiers can have an asymptotically lower absorption time than the well-mixed population. Then we design population structures that substantially augment the fixation probability with just a minor increase in fixation time. Finally, we show that those structures enable higher effective rate of evolution than the well-mixed population provided that the rate of generating advantageous mutants is relatively low. Our work sheds light on how population structure affects the rate of evolution. Moreover, our structures could be useful for lab-based, medical, or industrial applications of evolutionary optimization.}, author = {Tkadlec, Josef and Pavlogiannis, Andreas and Chatterjee, Krishnendu and Nowak, Martin A.}, issn = {2399-3642}, journal = {Communications Biology}, publisher = {Springer Nature}, title = {{Population structure determines the tradeoff between fixation probability and fixation time}}, doi = {10.1038/s42003-019-0373-y}, volume = {2}, year = {2019}, } @inproceedings{10190, abstract = {The verification of concurrent programs remains an open challenge, as thread interaction has to be accounted for, which leads to state-space explosion. Stateless model checking battles this problem by exploring traces rather than states of the program. As there are exponentially many traces, dynamic partial-order reduction (DPOR) techniques are used to partition the trace space into equivalence classes, and explore a few representatives from each class. The standard equivalence that underlies most DPOR techniques is the happens-before equivalence, however recent works have spawned a vivid interest towards coarser equivalences. The efficiency of such approaches is a product of two parameters: (i) the size of the partitioning induced by the equivalence, and (ii) the time spent by the exploration algorithm in each class of the partitioning. In this work, we present a new equivalence, called value-happens-before and show that it has two appealing features. First, value-happens-before is always at least as coarse as the happens-before equivalence, and can be even exponentially coarser. Second, the value-happens-before partitioning is efficiently explorable when the number of threads is bounded. We present an algorithm called value-centric DPOR (VCDPOR), which explores the underlying partitioning using polynomial time per class. Finally, we perform an experimental evaluation of VCDPOR on various benchmarks, and compare it against other state-of-the-art approaches. Our results show that value-happens-before typically induces a significant reduction in the size of the underlying partitioning, which leads to a considerable reduction in the running time for exploring the whole partitioning.}, author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Toman, Viktor}, booktitle = {Proceedings of the 34th ACM International Conference on Object-Oriented Programming, Systems, Languages, and Applications}, issn = {2475-1421}, keywords = {safety, risk, reliability and quality, software}, location = {Athens, Greece}, publisher = {ACM}, title = {{Value-centric dynamic partial order reduction}}, doi = {10.1145/3360550}, volume = {3}, year = {2019}, } @inproceedings{7402, abstract = {Graph planning gives rise to fundamental algorithmic questions such as shortest path, traveling salesman problem, etc. A classical problem in discrete planning is to consider a weighted graph and construct a path that maximizes the sum of weights for a given time horizon T. However, in many scenarios, the time horizon is not fixed, but the stopping time is chosen according to some distribution such that the expected stopping time is T. If the stopping time distribution is not known, then to ensure robustness, the distribution is chosen by an adversary, to represent the worst-case scenario. A stationary plan for every vertex always chooses the same outgoing edge. For fixed horizon or fixed stopping-time distribution, stationary plans are not sufficient for optimality. Quite surprisingly we show that when an adversary chooses the stopping-time distribution with expected stopping time T, then stationary plans are sufficient. While computing optimal stationary plans for fixed horizon is NP-complete, we show that computing optimal stationary plans under adversarial stopping-time distribution can be achieved in polynomial time. Consequently, our polynomial-time algorithm for adversarial stopping time also computes an optimal plan among all possible plans.}, author = {Chatterjee, Krishnendu and Doyen, Laurent}, booktitle = {34th Annual ACM/IEEE Symposium on Logic in Computer Science}, isbn = {9781728136080}, location = {Vancouver, BC, Canada}, pages = {1--13}, publisher = {IEEE}, title = {{Graph planning with expected finite horizon}}, doi = {10.1109/lics.2019.8785706}, year = {2019}, } @unpublished{7950, abstract = {The input to the token swapping problem is a graph with vertices v1, v2, . . . , vn, and n tokens with labels 1,2, . . . , n, one on each vertex. The goal is to get token i to vertex vi for all i= 1, . . . , n using a minimum number of swaps, where a swap exchanges the tokens on the endpoints of an edge.Token swapping on a tree, also known as “sorting with a transposition tree,” is not known to be in P nor NP-complete. We present some partial results: 1. An optimum swap sequence may need to perform a swap on a leaf vertex that has the correct token (a “happy leaf”), disproving a conjecture of Vaughan. 2. Any algorithm that fixes happy leaves—as all known approximation algorithms for the problem do—has approximation factor at least 4/3. Furthermore, the two best-known 2-approximation algorithms have approximation factor exactly 2. 3. A generalized problem—weighted coloured token swapping—is NP-complete on trees, but solvable in polynomial time on paths and stars. In this version, tokens and vertices have colours, and colours have weights. The goal is to get every token to a vertex of the same colour, and the cost of a swap is the sum of the weights of the two tokens involved.}, author = {Biniaz, Ahmad and Jain, Kshitij and Lubiw, Anna and Masárová, Zuzana and Miltzow, Tillmann and Mondal, Debajyoti and Naredla, Anurag Murty and Tkadlec, Josef and Turcotte, Alexi}, booktitle = {arXiv}, title = {{Token swapping on trees}}, year = {2019}, } @inproceedings{6780, abstract = {In this work, we consider the almost-sure termination problem for probabilistic programs that asks whether a given probabilistic program terminates with probability 1. Scalable approaches for program analysis often rely on modularity as their theoretical basis. In non-probabilistic programs, the classical variant rule (V-rule) of Floyd-Hoare logic provides the foundation for modular analysis. Extension of this rule to almost-sure termination of probabilistic programs is quite tricky, and a probabilistic variant was proposed in [16]. While the proposed probabilistic variant cautiously addresses the key issue of integrability, we show that the proposed modular rule is still not sound for almost-sure termination of probabilistic programs. Besides establishing unsoundness of the previous rule, our contributions are as follows: First, we present a sound modular rule for almost-sure termination of probabilistic programs. Our approach is based on a novel notion of descent supermartingales. Second, for algorithmic approaches, we consider descent supermartingales that are linear and show that they can be synthesized in polynomial time. Finally, we present experimental results on a variety of benchmarks and several natural examples that model various types of nested while loops in probabilistic programs and demonstrate that our approach is able to efficiently prove their almost-sure termination property}, author = {Huang, Mingzhang and Fu, Hongfei and Chatterjee, Krishnendu and Goharshady, Amir Kafshdar}, booktitle = {Proceedings of the 34th ACM International Conference on Object-Oriented Programming, Systems, Languages, and Applications }, location = {Athens, Greece}, publisher = {ACM}, title = {{Modular verification for almost-sure termination of probabilistic programs}}, doi = {10.1145/3360555}, volume = {3}, year = {2019}, } @article{6380, abstract = {There is a huge gap between the speeds of modern caches and main memories, and therefore cache misses account for a considerable loss of efficiency in programs. The predominant technique to address this issue has been Data Packing: data elements that are frequently accessed within time proximity are packed into the same cache block, thereby minimizing accesses to the main memory. We consider the algorithmic problem of Data Packing on a two-level memory system. Given a reference sequence R of accesses to data elements, the task is to partition the elements into cache blocks such that the number of cache misses on R is minimized. The problem is notoriously difficult: it is NP-hard even when the cache has size 1, and is hard to approximate for any cache size larger than 4. Therefore, all existing techniques for Data Packing are based on heuristics and lack theoretical guarantees. In this work, we present the first positive theoretical results for Data Packing, along with new and stronger negative results. We consider the problem under the lens of the underlying access hypergraphs, which are hypergraphs of affinities between the data elements, where the order of an access hypergraph corresponds to the size of the affinity group. We study the problem parameterized by the treewidth of access hypergraphs, which is a standard notion in graph theory to measure the closeness of a graph to a tree. Our main results are as follows: We show there is a number q* depending on the cache parameters such that (a) if the access hypergraph of order q* has constant treewidth, then there is a linear-time algorithm for Data Packing; (b)the Data Packing problem remains NP-hard even if the access hypergraph of order q*-1 has constant treewidth. Thus, we establish a fine-grained dichotomy depending on a single parameter, namely, the highest order among access hypegraphs that have constant treewidth; and establish the optimal value q* of this parameter. Finally, we present an experimental evaluation of a prototype implementation of our algorithm. Our results demonstrate that, in practice, access hypergraphs of many commonly-used algorithms have small treewidth. We compare our approach with several state-of-the-art heuristic-based algorithms and show that our algorithm leads to significantly fewer cache-misses. }, author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Okati, Nastaran and Pavlogiannis, Andreas}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, number = {POPL}, publisher = {ACM}, title = {{Efficient parameterized algorithms for data packing}}, doi = {10.1145/3290366}, volume = {3}, year = {2019}, } @inproceedings{6056, abstract = {In today's programmable blockchains, smart contracts are limited to being deterministic and non-probabilistic. This lack of randomness is a consequential limitation, given that a wide variety of real-world financial contracts, such as casino games and lotteries, depend entirely on randomness. As a result, several ad-hoc random number generation approaches have been developed to be used in smart contracts. These include ideas such as using an oracle or relying on the block hash. However, these approaches are manipulatable, i.e. their output can be tampered with by parties who might not be neutral, such as the owner of the oracle or the miners.We propose a novel game-theoretic approach for generating provably unmanipulatable pseudorandom numbers on the blockchain. Our approach allows smart contracts to access a trustworthy source of randomness that does not rely on potentially compromised miners or oracles, hence enabling the creation of a new generation of smart contracts that are not limited to being non-probabilistic and can be drawn from the much more general class of probabilistic programs.}, author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Pourdamghani, Arash}, booktitle = {IEEE International Conference on Blockchain and Cryptocurrency}, location = {Seoul, Korea}, publisher = {IEEE}, title = {{Probabilistic smart contracts: Secure randomness on the blockchain}}, doi = {10.1109/BLOC.2019.8751326}, year = {2019}, } @inproceedings{6378, abstract = {In today's cryptocurrencies, Hashcash proof of work is the most commonly-adopted approach to mining. In Hashcash, when a miner decides to add a block to the chain, she has to solve the difficult computational puzzle of inverting a hash function. While Hashcash has been successfully adopted in both Bitcoin and Ethereum, it has attracted significant and harsh criticism due to its massive waste of electricity, its carbon footprint and environmental effects, and the inherent lack of usefulness in inverting a hash function. Various other mining protocols have been suggested, including proof of stake, in which a miner's chance of adding the next block is proportional to her current balance. However, such protocols lead to a higher entry cost for new miners who might not still have any stake in the cryptocurrency, and can in the worst case lead to an oligopoly, where the rich have complete control over mining. In this paper, we propose Hybrid Mining: a new mining protocol that combines solving real-world useful problems with Hashcash. Our protocol allows new miners to join the network by taking part in Hashcash mining without having to own an initial stake. It also allows nodes of the network to submit hard computational problems whose solutions are of interest in the real world, e.g.~protein folding problems. Then, miners can choose to compete in solving these problems, in lieu of Hashcash, for adding a new block. Hence, Hybrid Mining incentivizes miners to solve useful problems, such as hard computational problems arising in biology, in a distributed manner. It also gives researchers in other areas an easy-to-use tool to outsource their hard computations to the blockchain network, which has enormous computational power, by paying a reward to the miner who solves the problem for them. Moreover, our protocol provides strong security guarantees and is at least as resilient to double spending as Bitcoin.}, author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Pourdamghani, Arash}, booktitle = {Proceedings of the 34th ACM Symposium on Applied Computing}, isbn = {9781450359337}, location = {Limassol, Cyprus}, pages = {374--381}, publisher = {ACM}, title = {{Hybrid Mining: Exploiting blockchain’s computational power for distributed problem solving}}, doi = {10.1145/3297280.3297319}, volume = {Part F147772}, year = {2019}, } @inproceedings{6175, abstract = {We consider the problem of expected cost analysis over nondeterministic probabilistic programs, which aims at automated methods for analyzing the resource-usage of such programs. Previous approaches for this problem could only handle nonnegative bounded costs. However, in many scenarios, such as queuing networks or analysis of cryptocurrency protocols, both positive and negative costs are necessary and the costs are unbounded as well. In this work, we present a sound and efficient approach to obtain polynomial bounds on the expected accumulated cost of nondeterministic probabilistic programs. Our approach can handle (a) general positive and negative costs with bounded updates in variables; and (b) nonnegative costs with general updates to variables. We show that several natural examples which could not be handled by previous approaches are captured in our framework. Moreover, our approach leads to an efficient polynomial-time algorithm, while no previous approach for cost analysis of probabilistic programs could guarantee polynomial runtime. Finally, we show the effectiveness of our approach using experimental results on a variety of programs for which we efficiently synthesize tight resource-usage bounds.}, author = {Wang, Peixin and Fu, Hongfei and Goharshady, Amir Kafshdar and Chatterjee, Krishnendu and Qin, Xudong and Shi, Wenjun}, booktitle = {PLDI 2019: Proceedings of the 40th ACM SIGPLAN Conference on Programming Language Design and Implementation}, keywords = {Program Cost Analysis, Program Termination, Probabilistic Programs, Martingales}, location = {Phoenix, AZ, United States}, pages = {204--220}, publisher = {Association for Computing Machinery}, title = {{Cost analysis of nondeterministic probabilistic programs}}, doi = {10.1145/3314221.3314581}, year = {2019}, } @inproceedings{6490, abstract = {Smart contracts are programs that are stored and executed on the Blockchain and can receive, manage and transfer money (cryptocurrency units). Two important problems regarding smart contracts are formal analysis and compiler optimization. Formal analysis is extremely important, because smart contracts hold funds worth billions of dollars and their code is immutable after deployment. Hence, an undetected bug can cause significant financial losses. Compiler optimization is also crucial, because every action of a smart contract has to be executed by every node in the Blockchain network. Therefore, optimizations in compiling smart contracts can lead to significant savings in computation, time and energy. Two classical approaches in program analysis and compiler optimization are intraprocedural and interprocedural analysis. In intraprocedural analysis, each function is analyzed separately, while interprocedural analysis considers the entire program. In both cases, the analyses are usually reduced to graph problems over the control flow graph (CFG) of the program. These graph problems are often computationally expensive. Hence, there has been ample research on exploiting structural properties of CFGs for efficient algorithms. One such well-studied property is the treewidth, which is a measure of tree-likeness of graphs. It is known that intraprocedural CFGs of structured programs have treewidth at most 6, whereas the interprocedural treewidth cannot be bounded. This result has been used as a basis for many efficient intraprocedural analyses. In this paper, we explore the idea of exploiting the treewidth of smart contracts for formal analysis and compiler optimization. First, similar to classical programs, we show that the intraprocedural treewidth of structured Solidity and Vyper smart contracts is at most 9. Second, for global analysis, we prove that the interprocedural treewidth of structured smart contracts is bounded by 10 and, in sharp contrast with classical programs, treewidth-based algorithms can be easily applied for interprocedural analysis. Finally, we supplement our theoretical results with experiments using a tool we implemented for computing treewidth of smart contracts and show that the treewidth is much lower in practice. We use 36,764 real-world Ethereum smart contracts as benchmarks and find that they have an average treewidth of at most 3.35 for the intraprocedural case and 3.65 for the interprocedural case. }, author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Goharshady, Ehsan Kafshdar}, booktitle = {Proceedings of the 34th ACM Symposium on Applied Computing}, isbn = {9781450359337}, location = {Limassol, Cyprus}, pages = {400--408}, publisher = {ACM}, title = {{The treewidth of smart contracts}}, doi = {10.1145/3297280.3297322}, volume = {Part F147772}, year = {2019}, } @article{7158, abstract = {Interprocedural analysis is at the heart of numerous applications in programming languages, such as alias analysis, constant propagation, and so on. Recursive state machines (RSMs) are standard models for interprocedural analysis. We consider a general framework with RSMs where the transitions are labeled from a semiring and path properties are algebraic with semiring operations. RSMs with algebraic path properties can model interprocedural dataflow analysis problems, the shortest path problem, the most probable path problem, and so on. The traditional algorithms for interprocedural analysis focus on path properties where the starting point is fixed as the entry point of a specific method. In this work, we consider possible multiple queries as required in many applications such as in alias analysis. The study of multiple queries allows us to bring in an important algorithmic distinction between the resource usage of the one-time preprocessing vs for each individual query. The second aspect we consider is that the control flow graphs for most programs have constant treewidth. Our main contributions are simple and implementable algorithms that support multiple queries for algebraic path properties for RSMs that have constant treewidth. Our theoretical results show that our algorithms have small additional one-time preprocessing but can answer subsequent queries significantly faster as compared to the current algorithmic solutions for interprocedural dataflow analysis. We have also implemented our algorithms and evaluated their performance for performing on-demand interprocedural dataflow analysis on various domains, such as for live variable analysis and reaching definitions, on a standard benchmark set. Our experimental results align with our theoretical statements and show that after a lightweight preprocessing, on-demand queries are answered much faster than the standard existing algorithmic approaches. }, author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Goyal, Prateesh and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas}, issn = {0164-0925}, journal = {ACM Transactions on Programming Languages and Systems}, number = {4}, publisher = {ACM}, title = {{Faster algorithms for dynamic algebraic queries in basic RSMs with constant treewidth}}, doi = {10.1145/3363525}, volume = {41}, year = {2019}, } @article{7014, abstract = {We study the problem of developing efficient approaches for proving worst-case bounds of non-deterministic recursive programs. Ranking functions are sound and complete for proving termination and worst-case bounds of nonrecursive programs. First, we apply ranking functions to recursion, resulting in measure functions. We show that measure functions provide a sound and complete approach to prove worst-case bounds of non-deterministic recursive programs. Our second contribution is the synthesis of measure functions in nonpolynomial forms. We show that non-polynomial measure functions with logarithm and exponentiation can be synthesized through abstraction of logarithmic or exponentiation terms, Farkas' Lemma, and Handelman's Theorem using linear programming. While previous methods obtain worst-case polynomial bounds, our approach can synthesize bounds of the form $\mathcal{O}(n\log n)$ as well as $\mathcal{O}(n^r)$ where $r$ is not an integer. We present experimental results to demonstrate that our approach can obtain efficiently worst-case bounds of classical recursive algorithms such as (i) Merge-Sort, the divide-and-conquer algorithm for the Closest-Pair problem, where we obtain $\mathcal{O}(n \log n)$ worst-case bound, and (ii) Karatsuba's algorithm for polynomial multiplication and Strassen's algorithm for matrix multiplication, where we obtain $\mathcal{O}(n^r)$ bound such that $r$ is not an integer and close to the best-known bounds for the respective algorithms.}, author = {Chatterjee, Krishnendu and Fu, Hongfei and Goharshady, Amir Kafshdar}, journal = {ACM Transactions on Programming Languages and Systems}, number = {4}, publisher = {ACM}, title = {{Non-polynomial worst-case analysis of recursive programs}}, doi = {10.1145/3339984}, volume = {41}, year = {2019}, } @inproceedings{10883, abstract = {Solving parity games, which are equivalent to modal μ-calculus model checking, is a central algorithmic problem in formal methods, with applications in reactive synthesis, program repair, verification of branching-time properties, etc. Besides the standard compu- tation model with the explicit representation of games, another important theoretical model of computation is that of set-based symbolic algorithms. Set-based symbolic algorithms use basic set operations and one-step predecessor operations on the implicit description of games, rather than the explicit representation. The significance of symbolic algorithms is that they provide scalable algorithms for large finite-state systems, as well as for infinite-state systems with finite quotient. Consider parity games on graphs with n vertices and parity conditions with d priorities. While there is a rich literature of explicit algorithms for parity games, the main results for set-based symbolic algorithms are as follows: (a) the basic algorithm that requires O(nd) symbolic operations and O(d) symbolic space; and (b) an improved algorithm that requires O(nd/3+1) symbolic operations and O(n) symbolic space. In this work, our contributions are as follows: (1) We present a black-box set-based symbolic algorithm based on the explicit progress measure algorithm. Two important consequences of our algorithm are as follows: (a) a set-based symbolic algorithm for parity games that requires quasi-polynomially many symbolic operations and O(n) symbolic space; and (b) any future improvement in progress measure based explicit algorithms immediately imply an efficiency improvement in our set-based symbolic algorithm for parity games. (2) We present a set-based symbolic algorithm that requires quasi-polynomially many symbolic operations and O(d · log n) symbolic space. Moreover, for the important special case of d ≤ log n, our algorithm requires only polynomially many symbolic operations and poly-logarithmic symbolic space.}, author = {Chatterjee, Krishnendu and Dvořák, Wolfgang and Henzinger, Monika H and Svozil, Alexander}, booktitle = {22nd International Conference on Logic for Programming, Artificial Intelligence and Reasoning}, issn = {2398-7340}, location = {Awassa, Ethiopia}, pages = {233--253}, publisher = {EasyChair}, title = {{Quasipolynomial set-based symbolic algorithms for parity games}}, doi = {10.29007/5z5k}, volume = {57}, year = {2018}, } @inproceedings{325, abstract = {Probabilistic programs extend classical imperative programs with real-valued random variables and random branching. The most basic liveness property for such programs is the termination property. The qualitative (aka almost-sure) termination problem asks whether a given program program terminates with probability 1. While ranking functions provide a sound and complete method for non-probabilistic programs, the extension of them to probabilistic programs is achieved via ranking supermartingales (RSMs). Although deep theoretical results have been established about RSMs, their application to probabilistic programs with nondeterminism has been limited only to programs of restricted control-flow structure. For non-probabilistic programs, lexicographic ranking functions provide a compositional and practical approach for termination analysis of real-world programs. In this work we introduce lexicographic RSMs and show that they present a sound method for almost-sure termination of probabilistic programs with nondeterminism. We show that lexicographic RSMs provide a tool for compositional reasoning about almost-sure termination, and for probabilistic programs with linear arithmetic they can be synthesized efficiently (in polynomial time). We also show that with additional restrictions even asymptotic bounds on expected termination time can be obtained through lexicographic RSMs. Finally, we present experimental results on benchmarks adapted from previous work to demonstrate the effectiveness of our approach.}, author = {Agrawal, Sheshansh and Chatterjee, Krishnendu and Novotny, Petr}, location = {Los Angeles, CA, USA}, number = {POPL}, publisher = {ACM}, title = {{Lexicographic ranking supermartingales: an efficient approach to termination of probabilistic programs}}, doi = {10.1145/3158122}, volume = {2}, year = {2018}, } @inbook{59, abstract = {Graph-based games are an important tool in computer science. They have applications in synthesis, verification, refinement, and far beyond. We review graphbased games with objectives on infinite plays. We give definitions and algorithms to solve the games and to give a winning strategy. The objectives we consider are mostly Boolean, but we also look at quantitative graph-based games and their objectives. Synthesis aims to turn temporal logic specifications into correct reactive systems. We explain the reduction of synthesis to graph-based games (or equivalently tree automata) using synthesis of LTL specifications as an example. We treat the classical approach that uses determinization of parity automata and more modern approaches.}, author = {Bloem, Roderick and Chatterjee, Krishnendu and Jobstmann, Barbara}, booktitle = {Handbook of Model Checking}, editor = {Henzinger, Thomas A and Clarke, Edmund M. and Veith, Helmut and Bloem, Roderick}, isbn = {978-3-319-10574-1}, pages = {921 -- 962}, publisher = {Springer}, title = {{Graph games and reactive synthesis}}, doi = {10.1007/978-3-319-10575-8_27}, year = {2018}, } @inbook{86, abstract = {Responsiveness—the requirement that every request to a system be eventually handled—is one of the fundamental liveness properties of a reactive system. Average response time is a quantitative measure for the responsiveness requirement used commonly in performance evaluation. We show how average response time can be computed on state-transition graphs, on Markov chains, and on game graphs. In all three cases, we give polynomial-time algorithms.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, booktitle = {Principles of Modeling}, editor = {Lohstroh, Marten and Derler, Patricia and Sirjani, Marjan}, pages = {143 -- 161}, publisher = {Springer}, title = {{Computing average response time}}, doi = {10.1007/978-3-319-95246-8_9}, volume = {10760}, year = {2018}, } @article{454, abstract = {Direct reciprocity is a mechanism for cooperation among humans. Many of our daily interactions are repeated. We interact repeatedly with our family, friends, colleagues, members of the local and even global community. In the theory of repeated games, it is a tacit assumption that the various games that a person plays simultaneously have no effect on each other. Here we introduce a general framework that allows us to analyze “crosstalk” between a player’s concurrent games. In the presence of crosstalk, the action a person experiences in one game can alter the person’s decision in another. We find that crosstalk impedes the maintenance of cooperation and requires stronger levels of forgiveness. The magnitude of the effect depends on the population structure. In more densely connected social groups, crosstalk has a stronger effect. A harsh retaliator, such as Tit-for-Tat, is unable to counteract crosstalk. The crosstalk framework provides a unified interpretation of direct and upstream reciprocity in the context of repeated games.}, author = {Reiter, Johannes and Hilbe, Christian and Rand, David and Chatterjee, Krishnendu and Nowak, Martin}, journal = {Nature Communications}, number = {1}, publisher = {Nature Publishing Group}, title = {{Crosstalk in concurrent repeated games impedes direct reciprocity and requires stronger levels of forgiveness}}, doi = {10.1038/s41467-017-02721-8}, volume = {9}, year = {2018}, } @inproceedings{143, abstract = {Vector Addition Systems with States (VASS) provide a well-known and fundamental model for the analysis of concurrent processes, parameterized systems, and are also used as abstract models of programs in resource bound analysis. In this paper we study the problem of obtaining asymptotic bounds on the termination time of a given VASS. In particular, we focus on the practically important case of obtaining polynomial bounds on termination time. Our main contributions are as follows: First, we present a polynomial-time algorithm for deciding whether a given VASS has a linear asymptotic complexity. We also show that if the complexity of a VASS is not linear, it is at least quadratic. Second, we classify VASS according to quantitative properties of their cycles. We show that certain singularities in these properties are the key reason for non-polynomial asymptotic complexity of VASS. In absence of singularities, we show that the asymptotic complexity is always polynomial and of the form Θ(nk), for some integer k d, where d is the dimension of the VASS. We present a polynomial-time algorithm computing the optimal k. For general VASS, the same algorithm, which is based on a complete technique for the construction of ranking functions in VASS, produces a valid lower bound, i.e., a k such that the termination complexity is (nk). Our results are based on new insights into the geometry of VASS dynamics, which hold the potential for further applicability to VASS analysis.}, author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Kučera, Antonín and Novotny, Petr and Velan, Dominik and Zuleger, Florian}, isbn = {978-1-4503-5583-4}, location = {Oxford, United Kingdom}, pages = {185 -- 194}, publisher = {IEEE}, title = {{Efficient algorithms for asymptotic bounds on termination time in VASS}}, doi = {10.1145/3209108.3209191}, volume = {F138033}, year = {2018}, } @article{157, abstract = {Social dilemmas occur when incentives for individuals are misaligned with group interests 1-7 . According to the 'tragedy of the commons', these misalignments can lead to overexploitation and collapse of public resources. The resulting behaviours can be analysed with the tools of game theory 8 . The theory of direct reciprocity 9-15 suggests that repeated interactions can alleviate such dilemmas, but previous work has assumed that the public resource remains constant over time. Here we introduce the idea that the public resource is instead changeable and depends on the strategic choices of individuals. An intuitive scenario is that cooperation increases the public resource, whereas defection decreases it. Thus, cooperation allows the possibility of playing a more valuable game with higher payoffs, whereas defection leads to a less valuable game. We analyse this idea using the theory of stochastic games 16-19 and evolutionary game theory. We find that the dependence of the public resource on previous interactions can greatly enhance the propensity for cooperation. For these results, the interaction between reciprocity and payoff feedback is crucial: neither repeated interactions in a constant environment nor single interactions in a changing environment yield similar cooperation rates. Our framework shows which feedbacks between exploitation and environment - either naturally occurring or designed - help to overcome social dilemmas.}, author = {Hilbe, Christian and Šimsa, Štepán and Chatterjee, Krishnendu and Nowak, Martin}, journal = {Nature}, number = {7713}, pages = {246 -- 249}, publisher = {Nature Publishing Group}, title = {{Evolution of cooperation in stochastic games}}, doi = {10.1038/s41586-018-0277-x}, volume = {559}, year = {2018}, } @inproceedings{310, abstract = {A model of computation that is widely used in the formal analysis of reactive systems is symbolic algorithms. In this model the access to the input graph is restricted to consist of symbolic operations, which are expensive in comparison to the standard RAM operations. We give lower bounds on the number of symbolic operations for basic graph problems such as the computation of the strongly connected components and of the approximate diameter as well as for fundamental problems in model checking such as safety, liveness, and coliveness. Our lower bounds are linear in the number of vertices of the graph, even for constant-diameter graphs. For none of these problems lower bounds on the number of symbolic operations were known before. The lower bounds show an interesting separation of these problems from the reachability problem, which can be solved with O(D) symbolic operations, where D is the diameter of the graph. Additionally we present an approximation algorithm for the graph diameter which requires Õ(n/D) symbolic steps to achieve a (1 +ϵ)-approximation for any constant > 0. This compares to O(n/D) symbolic steps for the (naive) exact algorithm and O(D) symbolic steps for a 2-approximation. Finally we also give a refined analysis of the strongly connected components algorithms of [15], showing that it uses an optimal number of symbolic steps that is proportional to the sum of the diameters of the strongly connected components.}, author = {Chatterjee, Krishnendu and Dvorák, Wolfgang and Henzinger, Monika H and Loitzenbauer, Veronika}, location = {New Orleans, Louisiana, United States}, pages = {2341 -- 2356}, publisher = {ACM}, title = {{Lower bounds for symbolic computation on graphs: Strongly connected components, liveness, safety, and diameter}}, doi = {10.1137/1.9781611975031.151}, year = {2018}, } @inproceedings{5679, abstract = {We study the almost-sure termination problem for probabilistic programs. First, we show that supermartingales with lower bounds on conditional absolute difference provide a sound approach for the almost-sure termination problem. Moreover, using this approach we can obtain explicit optimal bounds on tail probabilities of non-termination within a given number of steps. Second, we present a new approach based on Central Limit Theorem for the almost-sure termination problem, and show that this approach can establish almost-sure termination of programs which none of the existing approaches can handle. Finally, we discuss algorithmic approaches for the two above methods that lead to automated analysis techniques for almost-sure termination of probabilistic programs.}, author = {Huang, Mingzhang and Fu, Hongfei and Chatterjee, Krishnendu}, editor = {Ryu, Sukyoung}, isbn = {9783030027674}, issn = {03029743}, location = {Wellington, New Zealand}, pages = {181--201}, publisher = {Springer}, title = {{New approaches for almost-sure termination of probabilistic programs}}, doi = {10.1007/978-3-030-02768-1_11}, volume = {11275}, year = {2018}, } @article{419, abstract = {Reciprocity is a major factor in human social life and accounts for a large part of cooperation in our communities. Direct reciprocity arises when repeated interactions occur between the same individuals. The framework of iterated games formalizes this phenomenon. Despite being introduced more than five decades ago, the concept keeps offering beautiful surprises. Recent theoretical research driven by new mathematical tools has proposed a remarkable dichotomy among the crucial strategies: successful individuals either act as partners or as rivals. Rivals strive for unilateral advantages by applying selfish or extortionate strategies. Partners aim to share the payoff for mutual cooperation, but are ready to fight back when being exploited. Which of these behaviours evolves depends on the environment. Whereas small population sizes and a limited number of rounds favour rivalry, partner strategies are selected when populations are large and relationships stable. Only partners allow for evolution of cooperation, while the rivals’ attempt to put themselves first leads to defection. Hilbe et al. synthesize recent theoretical work on zero-determinant and ‘rival’ versus ‘partner’ strategies in social dilemmas. They describe the environments under which these contrasting selfish or cooperative strategies emerge in evolution.}, author = {Hilbe, Christian and Chatterjee, Krishnendu and Nowak, Martin}, journal = {Nature Human Behaviour}, pages = {469–477}, publisher = {Nature Publishing Group}, title = {{Partners and rivals in direct reciprocity}}, doi = {10.1038/s41562-018-0320-9}, volume = {2}, year = {2018}, } @inproceedings{79, abstract = {Markov Decision Processes (MDPs) are a popular class of models suitable for solving control decision problems in probabilistic reactive systems. We consider parametric MDPs (pMDPs) that include parameters in some of the transition probabilities to account for stochastic uncertainties of the environment such as noise or input disturbances. We study pMDPs with reachability objectives where the parameter values are unknown and impossible to measure directly during execution, but there is a probability distribution known over the parameter values. We study for the first time computing parameter-independent strategies that are expectation optimal, i.e., optimize the expected reachability probability under the probability distribution over the parameters. We present an encoding of our problem to partially observable MDPs (POMDPs), i.e., a reduction of our problem to computing optimal strategies in POMDPs. We evaluate our method experimentally on several benchmarks: a motivating (repeated) learner model; a series of benchmarks of varying configurations of a robot moving on a grid; and a consensus protocol.}, author = {Arming, Sebastian and Bartocci, Ezio and Chatterjee, Krishnendu and Katoen, Joost P and Sokolova, Ana}, location = {Beijing, China}, pages = {53--70}, publisher = {Springer}, title = {{Parameter-independent strategies for pMDPs via POMDPs}}, doi = {10.1007/978-3-319-99154-2_4}, volume = {11024}, year = {2018}, } @inproceedings{297, abstract = {Graph games played by two players over finite-state graphs are central in many problems in computer science. In particular, graph games with ω -regular winning conditions, specified as parity objectives, which can express properties such as safety, liveness, fairness, are the basic framework for verification and synthesis of reactive systems. The decisions for a player at various states of the graph game are represented as strategies. While the algorithmic problem for solving graph games with parity objectives has been widely studied, the most prominent data-structure for strategy representation in graph games has been binary decision diagrams (BDDs). However, due to the bit-level representation, BDDs do not retain the inherent flavor of the decisions of strategies, and are notoriously hard to minimize to obtain succinct representation. In this work we propose decision trees for strategy representation in graph games. Decision trees retain the flavor of decisions of strategies and allow entropy-based minimization to obtain succinct trees. However, decision trees work in settings (e.g., probabilistic models) where errors are allowed, and overfitting of data is typically avoided. In contrast, for strategies in graph games no error is allowed, and the decision tree must represent the entire strategy. We develop new techniques to extend decision trees to overcome the above obstacles, while retaining the entropy-based techniques to obtain succinct trees. We have implemented our techniques to extend the existing decision tree solvers. We present experimental results for problems in reactive synthesis to show that decision trees provide a much more efficient data-structure for strategy representation as compared to BDDs.}, author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Kretinsky, Jan and Toman, Viktor}, location = {Thessaloniki, Greece}, pages = {385 -- 407}, publisher = {Springer}, title = {{Strategy representation by decision trees in reactive synthesis}}, doi = {10.1007/978-3-319-89960-2_21}, volume = {10805}, year = {2018}, } @inproceedings{141, abstract = {Given a model and a specification, the fundamental model-checking problem asks for algorithmic verification of whether the model satisfies the specification. We consider graphs and Markov decision processes (MDPs), which are fundamental models for reactive systems. One of the very basic specifications that arise in verification of reactive systems is the strong fairness (aka Streett) objective. Given different types of requests and corresponding grants, the objective requires that for each type, if the request event happens infinitely often, then the corresponding grant event must also happen infinitely often. All ω -regular objectives can be expressed as Streett objectives and hence they are canonical in verification. To handle the state-space explosion, symbolic algorithms are required that operate on a succinct implicit representation of the system rather than explicitly accessing the system. While explicit algorithms for graphs and MDPs with Streett objectives have been widely studied, there has been no improvement of the basic symbolic algorithms. The worst-case numbers of symbolic steps required for the basic symbolic algorithms are as follows: quadratic for graphs and cubic for MDPs. In this work we present the first sub-quadratic symbolic algorithm for graphs with Streett objectives, and our algorithm is sub-quadratic even for MDPs. Based on our algorithmic insights we present an implementation of the new symbolic approach and show that it improves the existing approach on several academic benchmark examples.}, author = {Chatterjee, Krishnendu and Henzinger, Monika H and Loitzenbauer, Veronika and Oraee, Simin and Toman, Viktor}, location = {Oxford, United Kingdom}, pages = {178--197}, publisher = {Springer}, title = {{Symbolic algorithms for graphs and Markov decision processes with fairness objectives}}, doi = {10.1007/978-3-319-96142-2_13}, volume = {10982}, year = {2018}, } @article{293, abstract = {People sometimes make their admirable deeds and accomplishments hard to spot, such as by giving anonymously or avoiding bragging. Such ‘buried’ signals are hard to reconcile with standard models of signalling or indirect reciprocity, which motivate costly pro-social behaviour by reputational gains. To explain these phenomena, we design a simple game theory model, which we call the signal-burying game. This game has the feature that senders can bury their signal by deliberately reducing the probability of the signal being observed. If the signal is observed, however, it is identified as having been buried. We show under which conditions buried signals can be maintained, using static equilibrium concepts and calculations of the evolutionary dynamics. We apply our analysis to shed light on a number of otherwise puzzling social phenomena, including modesty, anonymous donations, subtlety in art and fashion, and overeagerness.}, author = {Hoffman, Moshe and Hilbe, Christian and Nowak, Martin}, journal = {Nature Human Behaviour}, pages = {397 -- 404}, publisher = {Nature Publishing Group}, title = {{The signal-burying game can explain why we obscure positive traits and good deeds}}, doi = {10.1038/s41562-018-0354-z}, volume = {2}, year = {2018}, } @inproceedings{5967, abstract = {The Big Match is a multi-stage two-player game. In each stage Player 1 hides one or two pebbles in his hand, and his opponent has to guess that number; Player 1 loses a point if Player 2 is correct, and otherwise he wins a point. As soon as Player 1 hides one pebble, the players cannot change their choices in any future stage. Blackwell and Ferguson (1968) give an ε-optimal strategy for Player 1 that hides, in each stage, one pebble with a probability that depends on the entire past history. Any strategy that depends just on the clock or on a finite memory is worthless. The long-standing natural open problem has been whether every strategy that depends just on the clock and a finite memory is worthless. We prove that there is such a strategy that is ε-optimal. In fact, we show that just two states of memory are sufficient. }, author = {Hansen, Kristoffer Arnsfelt and Ibsen-Jensen, Rasmus and Neyman, Abraham}, booktitle = {Proceedings of the 2018 ACM Conference on Economics and Computation - EC '18}, isbn = {9781450358293}, location = {Ithaca, NY, United States}, pages = {149--150}, publisher = {ACM Press}, title = {{The Big Match with a clock and a bit of memory}}, doi = {10.1145/3219166.3219198}, year = {2018}, } @article{5993, abstract = {In this article, we consider the termination problem of probabilistic programs with real-valued variables. Thequestions concerned are: qualitative ones that ask (i) whether the program terminates with probability 1(almost-sure termination) and (ii) whether the expected termination time is finite (finite termination); andquantitative ones that ask (i) to approximate the expected termination time (expectation problem) and (ii) tocompute a boundBsuch that the probability not to terminate afterBsteps decreases exponentially (con-centration problem). To solve these questions, we utilize the notion of ranking supermartingales, which isa powerful approach for proving termination of probabilistic programs. In detail, we focus on algorithmicsynthesis of linear ranking-supermartingales over affine probabilistic programs (Apps) with both angelic anddemonic non-determinism. An important subclass of Apps is LRApp which is defined as the class of all Appsover which a linear ranking-supermartingale exists.Our main contributions are as follows. Firstly, we show that the membership problem of LRApp (i) canbe decided in polynomial time for Apps with at most demonic non-determinism, and (ii) isNP-hard and inPSPACEfor Apps with angelic non-determinism. Moreover, theNP-hardness result holds already for Appswithout probability and demonic non-determinism. Secondly, we show that the concentration problem overLRApp can be solved in the same complexity as for the membership problem of LRApp. Finally, we show thatthe expectation problem over LRApp can be solved in2EXPTIMEand isPSPACE-hard even for Apps withoutprobability and non-determinism (i.e., deterministic programs). Our experimental results demonstrate theeffectiveness of our approach to answer the qualitative and quantitative questions over Apps with at mostdemonic non-determinism.}, author = {Chatterjee, Krishnendu and Fu, Hongfei and Novotný, Petr and Hasheminezhad, Rouzbeh}, issn = {0164-0925}, journal = {ACM Transactions on Programming Languages and Systems}, number = {2}, publisher = {Association for Computing Machinery (ACM)}, title = {{Algorithmic analysis of qualitative and quantitative termination problems for affine probabilistic programs}}, doi = {10.1145/3174800}, volume = {40}, year = {2018}, } @inproceedings{25, abstract = {Partially observable Markov decision processes (POMDPs) are the standard models for planning under uncertainty with both finite and infinite horizon. Besides the well-known discounted-sum objective, indefinite-horizon objective (aka Goal-POMDPs) is another classical objective for POMDPs. In this case, given a set of target states and a positive cost for each transition, the optimization objective is to minimize the expected total cost until a target state is reached. In the literature, RTDP-Bel or heuristic search value iteration (HSVI) have been used for solving Goal-POMDPs. Neither of these algorithms has theoretical convergence guarantees, and HSVI may even fail to terminate its trials. We give the following contributions: (1) We discuss the challenges introduced in Goal-POMDPs and illustrate how they prevent the original HSVI from converging. (2) We present a novel algorithm inspired by HSVI, termed Goal-HSVI, and show that our algorithm has convergence guarantees. (3) We show that Goal-HSVI outperforms RTDP-Bel on a set of well-known examples.}, author = {Horák, Karel and Bošanský, Branislav and Chatterjee, Krishnendu}, booktitle = {Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence}, location = {Stockholm, Sweden}, pages = {4764 -- 4770}, publisher = {IJCAI}, title = {{Goal-HSVI: Heuristic search value iteration for goal-POMDPs}}, doi = {10.24963/ijcai.2018/662}, volume = {2018-July}, year = {2018}, } @inproceedings{24, abstract = {Partially-observable Markov decision processes (POMDPs) with discounted-sum payoff are a standard framework to model a wide range of problems related to decision making under uncertainty. Traditionally, the goal has been to obtain policies that optimize the expectation of the discounted-sum payoff. A key drawback of the expectation measure is that even low probability events with extreme payoff can significantly affect the expectation, and thus the obtained policies are not necessarily risk-averse. An alternate approach is to optimize the probability that the payoff is above a certain threshold, which allows obtaining risk-averse policies, but ignores optimization of the expectation. We consider the expectation optimization with probabilistic guarantee (EOPG) problem, where the goal is to optimize the expectation ensuring that the payoff is above a given threshold with at least a specified probability. We present several results on the EOPG problem, including the first algorithm to solve it.}, author = {Chatterjee, Krishnendu and Elgyütt, Adrian and Novotny, Petr and Rouillé, Owen}, location = {Stockholm, Sweden}, pages = {4692 -- 4699}, publisher = {IJCAI}, title = {{Expectation optimization with probabilistic guarantees in POMDPs with discounted-sum objectives}}, doi = {10.24963/ijcai.2018/652}, volume = {2018}, year = {2018}, } @inproceedings{34, abstract = {Partially observable Markov decision processes (POMDPs) are widely used in probabilistic planning problems in which an agent interacts with an environment using noisy and imprecise sensors. We study a setting in which the sensors are only partially defined and the goal is to synthesize “weakest” additional sensors, such that in the resulting POMDP, there is a small-memory policy for the agent that almost-surely (with probability 1) satisfies a reachability objective. We show that the problem is NP-complete, and present a symbolic algorithm by encoding the problem into SAT instances. We illustrate trade-offs between the amount of memory of the policy and the number of additional sensors on a simple example. We have implemented our approach and consider three classical POMDP examples from the literature, and show that in all the examples the number of sensors can be significantly decreased (as compared to the existing solutions in the literature) without increasing the complexity of the policies.}, author = {Chatterjee, Krishnendu and Chemlík, Martin and Topcu, Ufuk}, location = {Delft, Netherlands}, pages = {47 -- 55}, publisher = {AAAI Press}, title = {{Sensor synthesis for POMDPs with reachability objectives}}, volume = {2018}, year = {2018}, } @inproceedings{35, abstract = {We consider planning problems for graphs, Markov decision processes (MDPs), and games on graphs. While graphs represent the most basic planning model, MDPs represent interaction with nature and games on graphs represent interaction with an adversarial environment. We consider two planning problems where there are k different target sets, and the problems are as follows: (a) the coverage problem asks whether there is a plan for each individual target set; and (b) the sequential target reachability problem asks whether the targets can be reached in sequence. For the coverage problem, we present a linear-time algorithm for graphs, and quadratic conditional lower bound for MDPs and games on graphs. For the sequential target problem, we present a linear-time algorithm for graphs, a sub-quadratic algorithm for MDPs, and a quadratic conditional lower bound for games on graphs. Our results with conditional lower bounds establish (i) model-separation results showing that for the coverage problem MDPs and games on graphs are harder than graphs and for the sequential reachability problem games on graphs are harder than MDPs and graphs; and (ii) objective-separation results showing that for MDPs the coverage problem is harder than the sequential target problem.}, author = {Chatterjee, Krishnendu and Dvorák, Wolfgang and Henzinger, Monika H and Svozil, Alexander}, booktitle = {28th International Conference on Automated Planning and Scheduling }, location = {Delft, Netherlands}, publisher = {AAAI Press}, title = {{Algorithms and conditional lower bounds for planning problems}}, year = {2018}, } @article{738, abstract = {This paper is devoted to automatic competitive analysis of real-time scheduling algorithms for firm-deadline tasksets, where only completed tasks con- tribute some utility to the system. Given such a taskset T , the competitive ratio of an on-line scheduling algorithm A for T is the worst-case utility ratio of A over the utility achieved by a clairvoyant algorithm. We leverage the theory of quantitative graph games to address the competitive analysis and competitive synthesis problems. For the competitive analysis case, given any taskset T and any finite-memory on- line scheduling algorithm A , we show that the competitive ratio of A in T can be computed in polynomial time in the size of the state space of A . Our approach is flexible as it also provides ways to model meaningful constraints on the released task sequences that determine the competitive ratio. We provide an experimental study of many well-known on-line scheduling algorithms, which demonstrates the feasibility of our competitive analysis approach that effectively replaces human ingenuity (required Preliminary versions of this paper have appeared in Chatterjee et al. ( 2013 , 2014 ). B Andreas Pavlogiannis pavlogiannis@ist.ac.at Krishnendu Chatterjee krish.chat@ist.ac.at Alexander Kößler koe@ecs.tuwien.ac.at Ulrich Schmid s@ecs.tuwien.ac.at 1 IST Austria (Institute of Science and Technology Austria), Am Campus 1, 3400 Klosterneuburg, Austria 2 Embedded Computing Systems Group, Vienna University of Technology, Treitlstrasse 3, 1040 Vienna, Austria 123 Real-Time Syst for finding worst-case scenarios) by computing power. For the competitive synthesis case, we are just given a taskset T , and the goal is to automatically synthesize an opti- mal on-line scheduling algorithm A , i.e., one that guarantees the largest competitive ratio possible for T . We show how the competitive synthesis problem can be reduced to a two-player graph game with partial information, and establish that the compu- tational complexity of solving this game is Np -complete. The competitive synthesis problem is hence in Np in the size of the state space of the non-deterministic labeled transition system encoding the taskset. Overall, the proposed framework assists in the selection of suitable scheduling algorithms for a given taskset, which is in fact the most common situation in real-time systems design. }, author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Kößler, Alexander and Schmid, Ulrich}, journal = {Real-Time Systems}, number = {1}, pages = {166 -- 207}, publisher = {Springer}, title = {{Automated competitive analysis of real time scheduling with graph games}}, doi = {10.1007/s11241-017-9293-4}, volume = {54}, year = {2018}, } @article{198, abstract = {We consider a class of students learning a language from a teacher. The situation can be interpreted as a group of child learners receiving input from the linguistic environment. The teacher provides sample sentences. The students try to learn the grammar from the teacher. In addition to just listening to the teacher, the students can also communicate with each other. The students hold hypotheses about the grammar and change them if they receive counter evidence. The process stops when all students have converged to the correct grammar. We study how the time to convergence depends on the structure of the classroom by introducing and evaluating various complexity measures. We find that structured communication between students, although potentially introducing confusion, can greatly reduce some of the complexity measures. Our theory can also be interpreted as applying to the scientific process, where nature is the teacher and the scientists are the students.}, author = {Ibsen-Jensen, Rasmus and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin}, issn = {1742-5662}, journal = {Journal of the Royal Society Interface}, number = {140}, publisher = {The Royal Society}, title = {{Language acquisition with communication between learners}}, doi = {10.1098/rsif.2018.0073}, volume = {15}, year = {2018}, } @article{5751, abstract = {Because of the intrinsic randomness of the evolutionary process, a mutant with a fitness advantage has some chance to be selected but no certainty. Any experiment that searches for advantageous mutants will lose many of them due to random drift. It is therefore of great interest to find population structures that improve the odds of advantageous mutants. Such structures are called amplifiers of natural selection: they increase the probability that advantageous mutants are selected. Arbitrarily strong amplifiers guarantee the selection of advantageous mutants, even for very small fitness advantage. Despite intensive research over the past decade, arbitrarily strong amplifiers have remained rare. Here we show how to construct a large variety of them. Our amplifiers are so simple that they could be useful in biotechnology, when optimizing biological molecules, or as a diagnostic tool, when searching for faster dividing cells or viruses. They could also occur in natural population structures.}, author = {Pavlogiannis, Andreas and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin A.}, issn = {2399-3642}, journal = {Communications Biology}, number = {1}, publisher = {Springer Nature}, title = {{Construction of arbitrarily strong amplifiers of natural selection using evolutionary graph theory}}, doi = {10.1038/s42003-018-0078-7}, volume = {1}, year = {2018}, } @inproceedings{66, abstract = {Crypto-currencies are digital assets designed to work as a medium of exchange, e.g., Bitcoin, but they are susceptible to attacks (dishonest behavior of participants). A framework for the analysis of attacks in crypto-currencies requires (a) modeling of game-theoretic aspects to analyze incentives for deviation from honest behavior; (b) concurrent interactions between participants; and (c) analysis of long-term monetary gains. Traditional game-theoretic approaches for the analysis of security protocols consider either qualitative temporal properties such as safety and termination, or the very special class of one-shot (stateless) games. However, to analyze general attacks on protocols for crypto-currencies, both stateful analysis and quantitative objectives are necessary. In this work our main contributions are as follows: (a) we show how a class of concurrent mean-payo games, namely ergodic games, can model various attacks that arise naturally in crypto-currencies; (b) we present the first practical implementation of algorithms for ergodic games that scales to model realistic problems for crypto-currencies; and (c) we present experimental results showing that our framework can handle games with thousands of states and millions of transitions.}, author = {Chatterjee, Krishnendu and Goharshady, Amir and Ibsen-Jensen, Rasmus and Velner, Yaron}, isbn = {978-3-95977-087-3}, location = {Beijing, China}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Ergodic mean-payoff games for the analysis of attacks in crypto-currencies}}, doi = {10.4230/LIPIcs.CONCUR.2018.11}, volume = {118}, year = {2018}, } @inproceedings{311, abstract = {Smart contracts are computer programs that are executed by a network of mutually distrusting agents, without the need of an external trusted authority. Smart contracts handle and transfer assets of considerable value (in the form of crypto-currency like Bitcoin). Hence, it is crucial that their implementation is bug-free. We identify the utility (or expected payoff) of interacting with such smart contracts as the basic and canonical quantitative property for such contracts. We present a framework for such quantitative analysis of smart contracts. Such a formal framework poses new and novel research challenges in programming languages, as it requires modeling of game-theoretic aspects to analyze incentives for deviation from honest behavior and modeling utilities which are not specified as standard temporal properties such as safety and termination. While game-theoretic incentives have been analyzed in the security community, their analysis has been restricted to the very special case of stateless games. However, to analyze smart contracts, stateful analysis is required as it must account for the different program states of the protocol. Our main contributions are as follows: we present (i)~a simplified programming language for smart contracts; (ii)~an automatic translation of the programs to state-based games; (iii)~an abstraction-refinement approach to solve such games; and (iv)~experimental results on real-world-inspired smart contracts.}, author = {Chatterjee, Krishnendu and Goharshady, Amir and Velner, Yaron}, location = {Thessaloniki, Greece}, pages = {739 -- 767}, publisher = {Springer}, title = {{Quantitative analysis of smart contracts}}, doi = {10.1007/978-3-319-89884-1_26}, volume = {10801}, year = {2018}, } @inproceedings{6340, abstract = {We present a secure approach for maintaining andreporting credit history records on the Blockchain. Our ap-proach removes third-parties such as credit reporting agen-cies from the lending process and replaces them with smartcontracts. This allows customers to interact directly with thelenders or banks while ensuring the integrity, unmalleabilityand privacy of their credit data. Additionally, each customerhas full control over complete or selective disclosure of hercredit records, eliminating the risk of privacy violations or databreaches. Moreover, our approach provides strong guaranteesfor the lenders as well. A lender can check both correctness andcompleteness of the credit data disclosed to her. This is the firstapproach that can perform all credit reporting tasks withouta central authority or changing the financial mechanisms*.}, author = {Goharshady, Amir Kafshdar and Behrouz, Ali and Chatterjee, Krishnendu}, booktitle = {Proceedings of the IEEE International Conference on Blockchain}, isbn = {978-1-5386-7975-3 }, location = {Halifax, Canada}, pages = {1343--1348}, publisher = {IEEE}, title = {{Secure Credit Reporting on the Blockchain}}, doi = {10.1109/Cybermatics_2018.2018.00231}, year = {2018}, } @article{6009, abstract = {We study algorithmic questions wrt algebraic path properties in concurrent systems, where the transitions of the system are labeled from a complete, closed semiring. The algebraic path properties can model dataflow analysis problems, the shortest path problem, and many other natural problems that arise in program analysis. We consider that each component of the concurrent system is a graph with constant treewidth, a property satisfied by the controlflow graphs of most programs. We allow for multiple possible queries, which arise naturally in demand driven dataflow analysis. The study of multiple queries allows us to consider the tradeoff between the resource usage of the one-time preprocessing and for each individual query. The traditional approach constructs the product graph of all components and applies the best-known graph algorithm on the product. In this approach, even the answer to a single query requires the transitive closure (i.e., the results of all possible queries), which provides no room for tradeoff between preprocessing and query time. Our main contributions are algorithms that significantly improve the worst-case running time of the traditional approach, and provide various tradeoffs depending on the number of queries. For example, in a concurrent system of two components, the traditional approach requires hexic time in the worst case for answering one query as well as computing the transitive closure, whereas we show that with one-time preprocessing in almost cubic time, each subsequent query can be answered in at most linear time, and even the transitive closure can be computed in almost quartic time. Furthermore, we establish conditional optimality results showing that the worst-case running time of our algorithms cannot be improved without achieving major breakthroughs in graph algorithms (i.e., improving the worst-case bound for the shortest path problem in general graphs). Preliminary experimental results show that our algorithms perform favorably on several benchmarks. }, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Goharshady, Amir Kafshdar and Pavlogiannis, Andreas}, issn = {0164-0925}, journal = {ACM Transactions on Programming Languages and Systems}, number = {3}, publisher = {Association for Computing Machinery (ACM)}, title = {{Algorithms for algebraic path properties in concurrent systems of constant treewidth components}}, doi = {10.1145/3210257}, volume = {40}, year = {2018}, } @inproceedings{5977, abstract = {We consider the stochastic shortest path (SSP)problem for succinct Markov decision processes(MDPs), where the MDP consists of a set of vari-ables, and a set of nondeterministic rules that up-date the variables. First, we show that several ex-amples from the AI literature can be modeled assuccinct MDPs. Then we present computationalapproaches for upper and lower bounds for theSSP problem: (a) for computing upper bounds, ourmethod is polynomial-time in the implicit descrip-tion of the MDP; (b) for lower bounds, we present apolynomial-time (in the size of the implicit descrip-tion) reduction to quadratic programming. Our ap-proach is applicable even to infinite-state MDPs.Finally, we present experimental results to demon-strate the effectiveness of our approach on severalclassical examples from the AI literature.}, author = {Chatterjee, Krishnendu and Fu, Hongfei and Goharshady, Amir and Okati, Nastaran}, booktitle = {Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence}, isbn = {978-099924112-7}, issn = {10450823}, location = {Stockholm, Sweden}, pages = {4700--4707}, publisher = {IJCAI}, title = {{Computational approaches for stochastic shortest path on succinct MDPs}}, doi = {10.24963/ijcai.2018/653}, volume = {2018}, year = {2018}, } @article{2, abstract = {Indirect reciprocity explores how humans act when their reputation is at stake, and which social norms they use to assess the actions of others. A crucial question in indirect reciprocity is which social norms can maintain stable cooperation in a society. Past research has highlighted eight such norms, called “leading-eight” strategies. This past research, however, is based on the assumption that all relevant information about other population members is publicly available and that everyone agrees on who is good or bad. Instead, here we explore the reputation dynamics when information is private and noisy. We show that under these conditions, most leading-eight strategies fail to evolve. Those leading-eight strategies that do evolve are unable to sustain full cooperation.Indirect reciprocity is a mechanism for cooperation based on shared moral systems and individual reputations. It assumes that members of a community routinely observe and assess each other and that they use this information to decide who is good or bad, and who deserves cooperation. When information is transmitted publicly, such that all community members agree on each other’s reputation, previous research has highlighted eight crucial moral systems. These “leading-eight” strategies can maintain cooperation and resist invasion by defectors. However, in real populations individuals often hold their own private views of others. Once two individuals disagree about their opinion of some third party, they may also see its subsequent actions in a different light. Their opinions may further diverge over time. Herein, we explore indirect reciprocity when information transmission is private and noisy. We find that in the presence of perception errors, most leading-eight strategies cease to be stable. Even if a leading-eight strategy evolves, cooperation rates may drop considerably when errors are common. Our research highlights the role of reliable information and synchronized reputations to maintain stable moral systems.}, author = {Hilbe, Christian and Schmid, Laura and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin}, journal = {PNAS}, number = {48}, pages = {12241--12246}, publisher = {National Academy of Sciences}, title = {{Indirect reciprocity with private, noisy, and incomplete information}}, doi = {10.1073/pnas.1810565115}, volume = {115}, year = {2018}, } @article{10418, abstract = {We present a new proof rule for proving almost-sure termination of probabilistic programs, including those that contain demonic non-determinism. An important question for a probabilistic program is whether the probability mass of all its diverging runs is zero, that is that it terminates "almost surely". Proving that can be hard, and this paper presents a new method for doing so. It applies directly to the program's source code, even if the program contains demonic choice. Like others, we use variant functions (a.k.a. "super-martingales") that are real-valued and decrease randomly on each loop iteration; but our key innovation is that the amount as well as the probability of the decrease are parametric. We prove the soundness of the new rule, indicate where its applicability goes beyond existing rules, and explain its connection to classical results on denumerable (non-demonic) Markov chains.}, author = {Mciver, Annabelle and Morgan, Carroll and Kaminski, Benjamin Lucien and Katoen, Joost P}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, location = {Los Angeles, CA, United States}, number = {POPL}, publisher = {Association for Computing Machinery}, title = {{A new proof rule for almost-sure termination}}, doi = {10.1145/3158121}, volume = {2}, year = {2017}, } @article{464, abstract = {The computation of the winning set for parity objectives and for Streett objectives in graphs as well as in game graphs are central problems in computer-aided verification, with application to the verification of closed systems with strong fairness conditions, the verification of open systems, checking interface compatibility, well-formedness of specifications, and the synthesis of reactive systems. We show how to compute the winning set on n vertices for (1) parity-3 (aka one-pair Streett) objectives in game graphs in time O(n5/2) and for (2) k-pair Streett objectives in graphs in time O(n2+nklogn). For both problems this gives faster algorithms for dense graphs and represents the first improvement in asymptotic running time in 15 years.}, author = {Chatterjee, Krishnendu and Henzinger, Monika H and Loitzenbauer, Veronika}, issn = {1860-5974}, journal = {Logical Methods in Computer Science}, number = {3}, publisher = {International Federation of Computational Logic}, title = {{Improved algorithms for parity and Streett objectives}}, doi = {10.23638/LMCS-13(3:26)2017}, volume = {13}, year = {2017}, } @article{466, abstract = {We consider Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) objectives. There exist two different views: (i) the expectation semantics, where the goal is to optimize the expected mean-payoff objective, and (ii) the satisfaction semantics, where the goal is to maximize the probability of runs such that the mean-payoff value stays above a given vector. We consider optimization with respect to both objectives at once, thus unifying the existing semantics. Precisely, the goal is to optimize the expectation while ensuring the satisfaction constraint. Our problem captures the notion of optimization with respect to strategies that are risk-averse (i.e., ensure certain probabilistic guarantee). Our main results are as follows: First, we present algorithms for the decision problems which are always polynomial in the size of the MDP. We also show that an approximation of the Pareto-curve can be computed in time polynomial in the size of the MDP, and the approximation factor, but exponential in the number of dimensions. Second, we present a complete characterization of the strategy complexity (in terms of memory bounds and randomization) required to solve our problem. }, author = {Chatterjee, Krishnendu and Křetínská, Zuzana and Kretinsky, Jan}, issn = {18605974}, journal = {Logical Methods in Computer Science}, number = {2}, publisher = {International Federation of Computational Logic}, title = {{Unifying two views on multiple mean-payoff objectives in Markov decision processes}}, doi = {10.23638/LMCS-13(2:15)2017}, volume = {13}, year = {2017}, } @article{467, abstract = {Recently there has been a significant effort to handle quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, some basic system properties such as average response time cannot be expressed using weighted automata or in any other known decidable formalism. In this work, we introduce nested weighted automata as a natural extension of weighted automata, which makes it possible to express important quantitative properties such as average response time. In nested weighted automata, a master automaton spins off and collects results from weighted slave automata, each of which computes a quantity along a finite portion of an infinite word. Nested weighted automata can be viewed as the quantitative analogue of monitor automata, which are used in runtime verification. We establish an almost-complete decidability picture for the basic decision problems about nested weighted automata and illustrate their applicability in several domains. In particular, nested weighted automata can be used to decide average response time properties.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, issn = {15293785}, journal = {ACM Transactions on Computational Logic (TOCL)}, number = {4}, publisher = {ACM}, title = {{Nested weighted automata}}, doi = {10.1145/3152769}, volume = {18}, year = {2017}, } @article{465, abstract = {The edit distance between two words w 1 , w 2 is the minimal number of word operations (letter insertions, deletions, and substitutions) necessary to transform w 1 to w 2 . The edit distance generalizes to languages L 1 , L 2 , where the edit distance from L 1 to L 2 is the minimal number k such that for every word from L 1 there exists a word in L 2 with edit distance at most k . We study the edit distance computation problem between pushdown automata and their subclasses. The problem of computing edit distance to a pushdown automaton is undecidable, and in practice, the interesting question is to compute the edit distance from a pushdown automaton (the implementation, a standard model for programs with recursion) to a regular language (the specification). In this work, we present a complete picture of decidability and complexity for the following problems: (1) deciding whether, for a given threshold k , the edit distance from a pushdown automaton to a finite automaton is at most k , and (2) deciding whether the edit distance from a pushdown automaton to a finite automaton is finite. }, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Ibsen-Jensen, Rasmus and Otop, Jan}, issn = {18605974}, journal = {Logical Methods in Computer Science}, number = {3}, publisher = {International Federation of Computational Logic}, title = {{Edit distance for pushdown automata}}, doi = {10.23638/LMCS-13(3:23)2017}, volume = {13}, year = {2017}, } @article{512, abstract = {The fixation probability is the probability that a new mutant introduced in a homogeneous population eventually takes over the entire population. The fixation probability is a fundamental quantity of natural selection, and known to depend on the population structure. Amplifiers of natural selection are population structures which increase the fixation probability of advantageous mutants, as compared to the baseline case of well-mixed populations. In this work we focus on symmetric population structures represented as undirected graphs. In the regime of undirected graphs, the strongest amplifier known has been the Star graph, and the existence of undirected graphs with stronger amplification properties has remained open for over a decade. In this work we present the Comet and Comet-swarm families of undirected graphs. We show that for a range of fitness values of the mutants, the Comet and Cometswarm graphs have fixation probability strictly larger than the fixation probability of the Star graph, for fixed population size and at the limit of large populations, respectively. }, author = {Pavlogiannis, Andreas and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin}, issn = {20452322}, journal = {Scientific Reports}, number = {1}, publisher = {Nature Publishing Group}, title = {{Amplification on undirected population structures: Comets beat stars}}, doi = {10.1038/s41598-017-00107-w}, volume = {7}, year = {2017}, } @article{10416, abstract = {A fundamental algorithmic problem at the heart of static analysis is Dyck reachability. The input is a graph where the edges are labeled with different types of opening and closing parentheses, and the reachability information is computed via paths whose parentheses are properly matched. We present new results for Dyck reachability problems with applications to alias analysis and data-dependence analysis. Our main contributions, that include improved upper bounds as well as lower bounds that establish optimality guarantees, are as follows: First, we consider Dyck reachability on bidirected graphs, which is the standard way of performing field-sensitive points-to analysis. Given a bidirected graph with n nodes and m edges, we present: (i) an algorithm with worst-case running time O(m + n · α(n)), where α(n) is the inverse Ackermann function, improving the previously known O(n2) time bound; (ii) a matching lower bound that shows that our algorithm is optimal wrt to worst-case complexity; and (iii) an optimal average-case upper bound of O(m) time, improving the previously known O(m · logn) bound. Second, we consider the problem of context-sensitive data-dependence analysis, where the task is to obtain analysis summaries of library code in the presence of callbacks. Our algorithm preprocesses libraries in almost linear time, after which the contribution of the library in the complexity of the client analysis is only linear, and only wrt the number of call sites. Third, we prove that combinatorial algorithms for Dyck reachability on general graphs with truly sub-cubic bounds cannot be obtained without obtaining sub-cubic combinatorial algorithms for Boolean Matrix Multiplication, which is a long-standing open problem. Thus we establish that the existing combinatorial algorithms for Dyck reachability are (conditionally) optimal for general graphs. We also show that the same hardness holds for graphs of constant treewidth. Finally, we provide a prototype implementation of our algorithms for both alias analysis and data-dependence analysis. Our experimental evaluation demonstrates that the new algorithms significantly outperform all existing methods on the two problems, over real-world benchmarks.}, author = {Chatterjee, Krishnendu and Choudhary, Bhavya and Pavlogiannis, Andreas}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, location = {Los Angeles, CA, United States}, number = {POPL}, publisher = {Association for Computing Machinery}, title = {{Optimal Dyck reachability for data-dependence and Alias analysis}}, doi = {10.1145/3158118}, volume = {2}, year = {2017}, } @misc{5455, abstract = {A fundamental algorithmic problem at the heart of static analysis is Dyck reachability. The input is a graphwhere the edges are labeled with different types of opening and closing parentheses, and the reachabilityinformation is computed via paths whose parentheses are properly matched. We present new results for Dyckreachability problems with applications to alias analysis and data-dependence analysis. Our main contributions,that include improved upper bounds as well as lower bounds that establish optimality guarantees, are asfollows:First, we consider Dyck reachability on bidirected graphs, which is the standard way of performing field-sensitive points-to analysis. Given a bidirected graph withnnodes andmedges, we present: (i) an algorithmwith worst-case running timeO(m+n·α(n)), whereα(n)is the inverse Ackermann function, improving thepreviously knownO(n2)time bound; (ii) a matching lower bound that shows that our algorithm is optimalwrt to worst-case complexity; and (iii) an optimal average-case upper bound ofO(m)time, improving thepreviously knownO(m·logn)bound.Second, we consider the problem of context-sensitive data-dependence analysis, where the task is to obtainanalysis summaries of library code in the presence of callbacks. Our algorithm preprocesses libraries in almostlinear time, after which the contribution of the library in the complexity of the client analysis is only linear,and only wrt the number of call sites.Third, we prove that combinatorial algorithms for Dyck reachability on general graphs with truly sub-cubic bounds cannot be obtained without obtaining sub-cubic combinatorial algorithms for Boolean MatrixMultiplication, which is a long-standing open problem. Thus we establish that the existing combinatorialalgorithms for Dyck reachability are (conditionally) optimal for general graphs. We also show that the samehardness holds for graphs of constant treewidth.Finally, we provide a prototype implementation of our algorithms for both alias analysis and data-dependenceanalysis. Our experimental evaluation demonstrates that the new algorithms significantly outperform allexisting methods on the two problems, over real-world benchmarks.}, author = {Chatterjee, Krishnendu and Choudhary, Bhavya and Pavlogiannis, Andreas}, issn = {2664-1690}, pages = {37}, publisher = {IST Austria}, title = {{Optimal Dyck reachability for data-dependence and alias analysis}}, doi = {10.15479/AT:IST-2017-870-v1-1}, year = {2017}, } @article{10417, abstract = {We present a new dynamic partial-order reduction method for stateless model checking of concurrent programs. A common approach for exploring program behaviors relies on enumerating the traces of the program, without storing the visited states (aka stateless exploration). As the number of distinct traces grows exponentially, dynamic partial-order reduction (DPOR) techniques have been successfully used to partition the space of traces into equivalence classes (Mazurkiewicz partitioning), with the goal of exploring only few representative traces from each class. We introduce a new equivalence on traces under sequential consistency semantics, which we call the observation equivalence. Two traces are observationally equivalent if every read event observes the same write event in both traces. While the traditional Mazurkiewicz equivalence is control-centric, our new definition is data-centric. We show that our observation equivalence is coarser than the Mazurkiewicz equivalence, and in many cases even exponentially coarser. We devise a DPOR exploration of the trace space, called data-centric DPOR, based on the observation equivalence.}, author = {Chalupa, Marek and Chatterjee, Krishnendu and Pavlogiannis, Andreas and Sinha, Nishant and Vaidya, Kapil}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, location = {Los Angeles, CA, United States}, number = {POPL}, publisher = {Association for Computing Machinery}, title = {{Data-centric dynamic partial order reduction}}, doi = {10.1145/3158119}, volume = {2}, year = {2017}, } @misc{5456, abstract = {We present a new dynamic partial-order reduction method for stateless model checking of concurrent programs. A common approach for exploring program behaviors relies on enumerating the traces of the program, without storing the visited states (aka stateless exploration). As the number of distinct traces grows exponentially, dynamic partial-order reduction (DPOR) techniques have been successfully used to partition the space of traces into equivalence classes (Mazurkiewicz partitioning), with the goal of exploring only few representative traces from each class. We introduce a new equivalence on traces under sequential consistency semantics, which we call the observation equivalence. Two traces are observationally equivalent if every read event observes the same write event in both traces. While the traditional Mazurkiewicz equivalence is control-centric, our new definition is data-centric. We show that our observation equivalence is coarser than the Mazurkiewicz equivalence, and in many cases even exponentially coarser. We devise a DPOR exploration of the trace space, called data-centric DPOR, based on the observation equivalence. 1. For acyclic architectures, our algorithm is guaranteed to explore exactly one representative trace from each observation class, while spending polynomial time per class. Hence, our algorithm is optimal wrt the observation equivalence, and in several cases explores exponentially fewer traces than any enumerative method based on the Mazurkiewicz equivalence. 2. For cyclic architectures, we consider an equivalence between traces which is finer than the observation equivalence; but coarser than the Mazurkiewicz equivalence, and in some cases is exponentially coarser. Our data-centric DPOR algorithm remains optimal under this trace equivalence. Finally, we perform a basic experimental comparison between the existing Mazurkiewicz-based DPOR and our data-centric DPOR on a set of academic benchmarks. Our results show a significant reduction in both running time and the number of explored equivalence classes.}, author = {Chalupa, Marek and Chatterjee, Krishnendu and Pavlogiannis, Andreas and Sinha, Nishant and Vaidya, Kapil}, issn = {2664-1690}, pages = {36}, publisher = {IST Austria}, title = {{Data-centric dynamic partial order reduction}}, doi = {10.15479/AT:IST-2017-872-v1-1}, year = {2017}, } @inproceedings{551, abstract = {Evolutionary graph theory studies the evolutionary dynamics in a population structure given as a connected graph. Each node of the graph represents an individual of the population, and edges determine how offspring are placed. We consider the classical birth-death Moran process where there are two types of individuals, namely, the residents with fitness 1 and mutants with fitness r. The fitness indicates the reproductive strength. The evolutionary dynamics happens as follows: in the initial step, in a population of all resident individuals a mutant is introduced, and then at each step, an individual is chosen proportional to the fitness of its type to reproduce, and the offspring replaces a neighbor uniformly at random. The process stops when all individuals are either residents or mutants. The probability that all individuals in the end are mutants is called the fixation probability, which is a key factor in the rate of evolution. We consider the problem of approximating the fixation probability. The class of algorithms that is extremely relevant for approximation of the fixation probabilities is the Monte-Carlo simulation of the process. Previous results present a polynomial-time Monte-Carlo algorithm for undirected graphs when r is given in unary. First, we present a simple modification: instead of simulating each step, we discard ineffective steps, where no node changes type (i.e., either residents replace residents, or mutants replace mutants). Using the above simple modification and our result that the number of effective steps is concentrated around the expected number of effective steps, we present faster polynomial-time Monte-Carlo algorithms for undirected graphs. Our algorithms are always at least a factor O(n2/ log n) faster as compared to the previous algorithms, where n is the number of nodes, and is polynomial even if r is given in binary. We also present lower bounds showing that the upper bound on the expected number of effective steps we present is asymptotically tight for undirected graphs. }, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Nowak, Martin}, booktitle = {Leibniz International Proceedings in Informatics}, isbn = {978-395977046-0}, location = {Aalborg, Denmark}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Faster Monte Carlo algorithms for fixation probability of the Moran process on undirected graphs}}, doi = {10.4230/LIPIcs.MFCS.2017.61}, volume = {83}, year = {2017}, } @inproceedings{552, abstract = {Graph games provide the foundation for modeling and synthesis of reactive processes. Such games are played over graphs where the vertices are controlled by two adversarial players. We consider graph games where the objective of the first player is the conjunction of a qualitative objective (specified as a parity condition) and a quantitative objective (specified as a meanpayoff condition). There are two variants of the problem, namely, the threshold problem where the quantitative goal is to ensure that the mean-payoff value is above a threshold, and the value problem where the quantitative goal is to ensure the optimal mean-payoff value; in both cases ensuring the qualitative parity objective. The previous best-known algorithms for game graphs with n vertices, m edges, parity objectives with d priorities, and maximal absolute reward value W for mean-payoff objectives, are as follows: O(nd+1 . m . w) for the threshold problem, and O(nd+2 · m · W) for the value problem. Our main contributions are faster algorithms, and the running times of our algorithms are as follows: O(nd-1 · m ·W) for the threshold problem, and O(nd · m · W · log(n · W)) for the value problem. For mean-payoff parity objectives with two priorities, our algorithms match the best-known bounds of the algorithms for mean-payoff games (without conjunction with parity objectives). Our results are relevant in synthesis of reactive systems with both functional requirement (given as a qualitative objective) and performance requirement (given as a quantitative objective).}, author = {Chatterjee, Krishnendu and Henzinger, Monika H and Svozil, Alexander}, booktitle = {Leibniz International Proceedings in Informatics}, isbn = {978-395977046-0}, location = {Aalborg, Denmark}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Faster algorithms for mean-payoff parity games}}, doi = {10.4230/LIPIcs.MFCS.2017.39}, volume = {83}, year = {2017}, } @inproceedings{553, abstract = {We consider two player, zero-sum, finite-state concurrent reachability games, played for an infinite number of rounds, where in every round, each player simultaneously and independently of the other players chooses an action, whereafter the successor state is determined by a probability distribution given by the current state and the chosen actions. Player 1 wins iff a designated goal state is eventually visited. We are interested in the complexity of stationary strategies measured by their patience, which is defined as the inverse of the smallest non-zero probability employed. Our main results are as follows: We show that: (i) the optimal bound on the patience of optimal and -optimal strategies, for both players is doubly exponential; and (ii) even in games with a single non-absorbing state exponential (in the number of actions) patience is necessary. }, author = {Chatterjee, Krishnendu and Hansen, Kristofer and Ibsen-Jensen, Rasmus}, booktitle = {Leibniz International Proceedings in Informatics}, isbn = {978-395977046-0}, location = {Aalborg, Denmark}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Strategy complexity of concurrent safety games}}, doi = {10.4230/LIPIcs.MFCS.2017.55}, volume = {83}, year = {2017}, } @inbook{625, abstract = {In the analysis of reactive systems a quantitative objective assigns a real value to every trace of the system. The value decision problem for a quantitative objective requires a trace whose value is at least a given threshold, and the exact value decision problem requires a trace whose value is exactly the threshold. We compare the computational complexity of the value and exact value decision problems for classical quantitative objectives, such as sum, discounted sum, energy, and mean-payoff for two standard models of reactive systems, namely, graphs and graph games.}, author = {Chatterjee, Krishnendu and Doyen, Laurent and Henzinger, Thomas A}, booktitle = {Models, Algorithms, Logics and Tools}, editor = {Aceto, Luca and Bacci, Giorgio and Ingólfsdóttir, Anna and Legay, Axel and Mardare, Radu}, isbn = {978-3-319-63120-2}, issn = {0302-9743}, pages = {367 -- 381}, publisher = {Springer}, title = {{The cost of exactness in quantitative reachability}}, doi = {10.1007/978-3-319-63121-9_18}, volume = {10460}, year = {2017}, } @inproceedings{628, abstract = {We consider the problem of developing automated techniques for solving recurrence relations to aid the expected-runtime analysis of programs. The motivation is that several classical textbook algorithms have quite efficient expected-runtime complexity, whereas the corresponding worst-case bounds are either inefficient (e.g., Quick-Sort), or completely ineffective (e.g., Coupon-Collector). Since the main focus of expected-runtime analysis is to obtain efficient bounds, we consider bounds that are either logarithmic, linear or almost-linear (O(log n), O(n), O(n · log n), respectively, where n represents the input size). Our main contribution is an efficient (simple linear-time algorithm) sound approach for deriving such expected-runtime bounds for the analysis of recurrence relations induced by randomized algorithms. The experimental results show that our approach can efficiently derive asymptotically optimal expected-runtime bounds for recurrences of classical randomized algorithms, including Randomized-Search, Quick-Sort, Quick-Select, Coupon-Collector, where the worst-case bounds are either inefficient (such as linear as compared to logarithmic expected-runtime complexity, or quadratic as compared to linear or almost-linear expected-runtime complexity), or ineffective.}, author = {Chatterjee, Krishnendu and Fu, Hongfei and Murhekar, Aniket}, editor = {Majumdar, Rupak and Kunčak, Viktor}, isbn = {978-331963386-2}, location = {Heidelberg, Germany}, pages = {118 -- 139}, publisher = {Springer}, title = {{Automated recurrence analysis for almost linear expected runtime bounds}}, doi = {10.1007/978-3-319-63387-9_6}, volume = {10426}, year = {2017}, }