@inproceedings{311, abstract = {Smart contracts are computer programs that are executed by a network of mutually distrusting agents, without the need of an external trusted authority. Smart contracts handle and transfer assets of considerable value (in the form of crypto-currency like Bitcoin). Hence, it is crucial that their implementation is bug-free. We identify the utility (or expected payoff) of interacting with such smart contracts as the basic and canonical quantitative property for such contracts. We present a framework for such quantitative analysis of smart contracts. Such a formal framework poses new and novel research challenges in programming languages, as it requires modeling of game-theoretic aspects to analyze incentives for deviation from honest behavior and modeling utilities which are not specified as standard temporal properties such as safety and termination. While game-theoretic incentives have been analyzed in the security community, their analysis has been restricted to the very special case of stateless games. However, to analyze smart contracts, stateful analysis is required as it must account for the different program states of the protocol. Our main contributions are as follows: we present (i)~a simplified programming language for smart contracts; (ii)~an automatic translation of the programs to state-based games; (iii)~an abstraction-refinement approach to solve such games; and (iv)~experimental results on real-world-inspired smart contracts.}, author = {Chatterjee, Krishnendu and Goharshady, Amir and Velner, Yaron}, location = {Thessaloniki, Greece}, pages = {739 -- 767}, publisher = {Springer}, title = {{Quantitative analysis of smart contracts}}, doi = {10.1007/978-3-319-89884-1_26}, volume = {10801}, year = {2018}, } @inproceedings{6340, abstract = {We present a secure approach for maintaining andreporting credit history records on the Blockchain. Our ap-proach removes third-parties such as credit reporting agen-cies from the lending process and replaces them with smartcontracts. This allows customers to interact directly with thelenders or banks while ensuring the integrity, unmalleabilityand privacy of their credit data. Additionally, each customerhas full control over complete or selective disclosure of hercredit records, eliminating the risk of privacy violations or databreaches. Moreover, our approach provides strong guaranteesfor the lenders as well. A lender can check both correctness andcompleteness of the credit data disclosed to her. This is the firstapproach that can perform all credit reporting tasks withouta central authority or changing the financial mechanisms*.}, author = {Goharshady, Amir Kafshdar and Behrouz, Ali and Chatterjee, Krishnendu}, booktitle = {Proceedings of the IEEE International Conference on Blockchain}, isbn = {978-1-5386-7975-3 }, location = {Halifax, Canada}, pages = {1343--1348}, publisher = {IEEE}, title = {{Secure Credit Reporting on the Blockchain}}, doi = {10.1109/Cybermatics_2018.2018.00231}, year = {2018}, } @article{6009, abstract = {We study algorithmic questions wrt algebraic path properties in concurrent systems, where the transitions of the system are labeled from a complete, closed semiring. The algebraic path properties can model dataflow analysis problems, the shortest path problem, and many other natural problems that arise in program analysis. We consider that each component of the concurrent system is a graph with constant treewidth, a property satisfied by the controlflow graphs of most programs. We allow for multiple possible queries, which arise naturally in demand driven dataflow analysis. The study of multiple queries allows us to consider the tradeoff between the resource usage of the one-time preprocessing and for each individual query. The traditional approach constructs the product graph of all components and applies the best-known graph algorithm on the product. In this approach, even the answer to a single query requires the transitive closure (i.e., the results of all possible queries), which provides no room for tradeoff between preprocessing and query time. Our main contributions are algorithms that significantly improve the worst-case running time of the traditional approach, and provide various tradeoffs depending on the number of queries. For example, in a concurrent system of two components, the traditional approach requires hexic time in the worst case for answering one query as well as computing the transitive closure, whereas we show that with one-time preprocessing in almost cubic time, each subsequent query can be answered in at most linear time, and even the transitive closure can be computed in almost quartic time. Furthermore, we establish conditional optimality results showing that the worst-case running time of our algorithms cannot be improved without achieving major breakthroughs in graph algorithms (i.e., improving the worst-case bound for the shortest path problem in general graphs). Preliminary experimental results show that our algorithms perform favorably on several benchmarks. }, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Goharshady, Amir Kafshdar and Pavlogiannis, Andreas}, issn = {0164-0925}, journal = {ACM Transactions on Programming Languages and Systems}, number = {3}, publisher = {Association for Computing Machinery (ACM)}, title = {{Algorithms for algebraic path properties in concurrent systems of constant treewidth components}}, doi = {10.1145/3210257}, volume = {40}, year = {2018}, } @inproceedings{5977, abstract = {We consider the stochastic shortest path (SSP)problem for succinct Markov decision processes(MDPs), where the MDP consists of a set of vari-ables, and a set of nondeterministic rules that up-date the variables. First, we show that several ex-amples from the AI literature can be modeled assuccinct MDPs. Then we present computationalapproaches for upper and lower bounds for theSSP problem: (a) for computing upper bounds, ourmethod is polynomial-time in the implicit descrip-tion of the MDP; (b) for lower bounds, we present apolynomial-time (in the size of the implicit descrip-tion) reduction to quadratic programming. Our ap-proach is applicable even to infinite-state MDPs.Finally, we present experimental results to demon-strate the effectiveness of our approach on severalclassical examples from the AI literature.}, author = {Chatterjee, Krishnendu and Fu, Hongfei and Goharshady, Amir and Okati, Nastaran}, booktitle = {Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence}, isbn = {978-099924112-7}, issn = {10450823}, location = {Stockholm, Sweden}, pages = {4700--4707}, publisher = {IJCAI}, title = {{Computational approaches for stochastic shortest path on succinct MDPs}}, doi = {10.24963/ijcai.2018/653}, volume = {2018}, year = {2018}, } @article{2, abstract = {Indirect reciprocity explores how humans act when their reputation is at stake, and which social norms they use to assess the actions of others. A crucial question in indirect reciprocity is which social norms can maintain stable cooperation in a society. Past research has highlighted eight such norms, called “leading-eight” strategies. This past research, however, is based on the assumption that all relevant information about other population members is publicly available and that everyone agrees on who is good or bad. Instead, here we explore the reputation dynamics when information is private and noisy. We show that under these conditions, most leading-eight strategies fail to evolve. Those leading-eight strategies that do evolve are unable to sustain full cooperation.Indirect reciprocity is a mechanism for cooperation based on shared moral systems and individual reputations. It assumes that members of a community routinely observe and assess each other and that they use this information to decide who is good or bad, and who deserves cooperation. When information is transmitted publicly, such that all community members agree on each other’s reputation, previous research has highlighted eight crucial moral systems. These “leading-eight” strategies can maintain cooperation and resist invasion by defectors. However, in real populations individuals often hold their own private views of others. Once two individuals disagree about their opinion of some third party, they may also see its subsequent actions in a different light. Their opinions may further diverge over time. Herein, we explore indirect reciprocity when information transmission is private and noisy. We find that in the presence of perception errors, most leading-eight strategies cease to be stable. Even if a leading-eight strategy evolves, cooperation rates may drop considerably when errors are common. Our research highlights the role of reliable information and synchronized reputations to maintain stable moral systems.}, author = {Hilbe, Christian and Schmid, Laura and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin}, journal = {PNAS}, number = {48}, pages = {12241--12246}, publisher = {National Academy of Sciences}, title = {{Indirect reciprocity with private, noisy, and incomplete information}}, doi = {10.1073/pnas.1810565115}, volume = {115}, year = {2018}, } @article{10418, abstract = {We present a new proof rule for proving almost-sure termination of probabilistic programs, including those that contain demonic non-determinism. An important question for a probabilistic program is whether the probability mass of all its diverging runs is zero, that is that it terminates "almost surely". Proving that can be hard, and this paper presents a new method for doing so. It applies directly to the program's source code, even if the program contains demonic choice. Like others, we use variant functions (a.k.a. "super-martingales") that are real-valued and decrease randomly on each loop iteration; but our key innovation is that the amount as well as the probability of the decrease are parametric. We prove the soundness of the new rule, indicate where its applicability goes beyond existing rules, and explain its connection to classical results on denumerable (non-demonic) Markov chains.}, author = {Mciver, Annabelle and Morgan, Carroll and Kaminski, Benjamin Lucien and Katoen, Joost P}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, location = {Los Angeles, CA, United States}, number = {POPL}, publisher = {Association for Computing Machinery}, title = {{A new proof rule for almost-sure termination}}, doi = {10.1145/3158121}, volume = {2}, year = {2017}, } @article{464, abstract = {The computation of the winning set for parity objectives and for Streett objectives in graphs as well as in game graphs are central problems in computer-aided verification, with application to the verification of closed systems with strong fairness conditions, the verification of open systems, checking interface compatibility, well-formedness of specifications, and the synthesis of reactive systems. We show how to compute the winning set on n vertices for (1) parity-3 (aka one-pair Streett) objectives in game graphs in time O(n5/2) and for (2) k-pair Streett objectives in graphs in time O(n2+nklogn). For both problems this gives faster algorithms for dense graphs and represents the first improvement in asymptotic running time in 15 years.}, author = {Chatterjee, Krishnendu and Henzinger, Monika H and Loitzenbauer, Veronika}, issn = {1860-5974}, journal = {Logical Methods in Computer Science}, number = {3}, publisher = {International Federation of Computational Logic}, title = {{Improved algorithms for parity and Streett objectives}}, doi = {10.23638/LMCS-13(3:26)2017}, volume = {13}, year = {2017}, } @article{466, abstract = {We consider Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) objectives. There exist two different views: (i) the expectation semantics, where the goal is to optimize the expected mean-payoff objective, and (ii) the satisfaction semantics, where the goal is to maximize the probability of runs such that the mean-payoff value stays above a given vector. We consider optimization with respect to both objectives at once, thus unifying the existing semantics. Precisely, the goal is to optimize the expectation while ensuring the satisfaction constraint. Our problem captures the notion of optimization with respect to strategies that are risk-averse (i.e., ensure certain probabilistic guarantee). Our main results are as follows: First, we present algorithms for the decision problems which are always polynomial in the size of the MDP. We also show that an approximation of the Pareto-curve can be computed in time polynomial in the size of the MDP, and the approximation factor, but exponential in the number of dimensions. Second, we present a complete characterization of the strategy complexity (in terms of memory bounds and randomization) required to solve our problem. }, author = {Chatterjee, Krishnendu and Křetínská, Zuzana and Kretinsky, Jan}, issn = {18605974}, journal = {Logical Methods in Computer Science}, number = {2}, publisher = {International Federation of Computational Logic}, title = {{Unifying two views on multiple mean-payoff objectives in Markov decision processes}}, doi = {10.23638/LMCS-13(2:15)2017}, volume = {13}, year = {2017}, } @article{467, abstract = {Recently there has been a significant effort to handle quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, some basic system properties such as average response time cannot be expressed using weighted automata or in any other known decidable formalism. In this work, we introduce nested weighted automata as a natural extension of weighted automata, which makes it possible to express important quantitative properties such as average response time. In nested weighted automata, a master automaton spins off and collects results from weighted slave automata, each of which computes a quantity along a finite portion of an infinite word. Nested weighted automata can be viewed as the quantitative analogue of monitor automata, which are used in runtime verification. We establish an almost-complete decidability picture for the basic decision problems about nested weighted automata and illustrate their applicability in several domains. In particular, nested weighted automata can be used to decide average response time properties.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, issn = {15293785}, journal = {ACM Transactions on Computational Logic (TOCL)}, number = {4}, publisher = {ACM}, title = {{Nested weighted automata}}, doi = {10.1145/3152769}, volume = {18}, year = {2017}, } @article{465, abstract = {The edit distance between two words w 1 , w 2 is the minimal number of word operations (letter insertions, deletions, and substitutions) necessary to transform w 1 to w 2 . The edit distance generalizes to languages L 1 , L 2 , where the edit distance from L 1 to L 2 is the minimal number k such that for every word from L 1 there exists a word in L 2 with edit distance at most k . We study the edit distance computation problem between pushdown automata and their subclasses. The problem of computing edit distance to a pushdown automaton is undecidable, and in practice, the interesting question is to compute the edit distance from a pushdown automaton (the implementation, a standard model for programs with recursion) to a regular language (the specification). In this work, we present a complete picture of decidability and complexity for the following problems: (1) deciding whether, for a given threshold k , the edit distance from a pushdown automaton to a finite automaton is at most k , and (2) deciding whether the edit distance from a pushdown automaton to a finite automaton is finite. }, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Ibsen-Jensen, Rasmus and Otop, Jan}, issn = {18605974}, journal = {Logical Methods in Computer Science}, number = {3}, publisher = {International Federation of Computational Logic}, title = {{Edit distance for pushdown automata}}, doi = {10.23638/LMCS-13(3:23)2017}, volume = {13}, year = {2017}, } @article{512, abstract = {The fixation probability is the probability that a new mutant introduced in a homogeneous population eventually takes over the entire population. The fixation probability is a fundamental quantity of natural selection, and known to depend on the population structure. Amplifiers of natural selection are population structures which increase the fixation probability of advantageous mutants, as compared to the baseline case of well-mixed populations. In this work we focus on symmetric population structures represented as undirected graphs. In the regime of undirected graphs, the strongest amplifier known has been the Star graph, and the existence of undirected graphs with stronger amplification properties has remained open for over a decade. In this work we present the Comet and Comet-swarm families of undirected graphs. We show that for a range of fitness values of the mutants, the Comet and Cometswarm graphs have fixation probability strictly larger than the fixation probability of the Star graph, for fixed population size and at the limit of large populations, respectively. }, author = {Pavlogiannis, Andreas and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin}, issn = {20452322}, journal = {Scientific Reports}, number = {1}, publisher = {Nature Publishing Group}, title = {{Amplification on undirected population structures: Comets beat stars}}, doi = {10.1038/s41598-017-00107-w}, volume = {7}, year = {2017}, } @article{10416, abstract = {A fundamental algorithmic problem at the heart of static analysis is Dyck reachability. The input is a graph where the edges are labeled with different types of opening and closing parentheses, and the reachability information is computed via paths whose parentheses are properly matched. We present new results for Dyck reachability problems with applications to alias analysis and data-dependence analysis. Our main contributions, that include improved upper bounds as well as lower bounds that establish optimality guarantees, are as follows: First, we consider Dyck reachability on bidirected graphs, which is the standard way of performing field-sensitive points-to analysis. Given a bidirected graph with n nodes and m edges, we present: (i) an algorithm with worst-case running time O(m + n · α(n)), where α(n) is the inverse Ackermann function, improving the previously known O(n2) time bound; (ii) a matching lower bound that shows that our algorithm is optimal wrt to worst-case complexity; and (iii) an optimal average-case upper bound of O(m) time, improving the previously known O(m · logn) bound. Second, we consider the problem of context-sensitive data-dependence analysis, where the task is to obtain analysis summaries of library code in the presence of callbacks. Our algorithm preprocesses libraries in almost linear time, after which the contribution of the library in the complexity of the client analysis is only linear, and only wrt the number of call sites. Third, we prove that combinatorial algorithms for Dyck reachability on general graphs with truly sub-cubic bounds cannot be obtained without obtaining sub-cubic combinatorial algorithms for Boolean Matrix Multiplication, which is a long-standing open problem. Thus we establish that the existing combinatorial algorithms for Dyck reachability are (conditionally) optimal for general graphs. We also show that the same hardness holds for graphs of constant treewidth. Finally, we provide a prototype implementation of our algorithms for both alias analysis and data-dependence analysis. Our experimental evaluation demonstrates that the new algorithms significantly outperform all existing methods on the two problems, over real-world benchmarks.}, author = {Chatterjee, Krishnendu and Choudhary, Bhavya and Pavlogiannis, Andreas}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, location = {Los Angeles, CA, United States}, number = {POPL}, publisher = {Association for Computing Machinery}, title = {{Optimal Dyck reachability for data-dependence and Alias analysis}}, doi = {10.1145/3158118}, volume = {2}, year = {2017}, } @misc{5455, abstract = {A fundamental algorithmic problem at the heart of static analysis is Dyck reachability. The input is a graphwhere the edges are labeled with different types of opening and closing parentheses, and the reachabilityinformation is computed via paths whose parentheses are properly matched. We present new results for Dyckreachability problems with applications to alias analysis and data-dependence analysis. Our main contributions,that include improved upper bounds as well as lower bounds that establish optimality guarantees, are asfollows:First, we consider Dyck reachability on bidirected graphs, which is the standard way of performing field-sensitive points-to analysis. Given a bidirected graph withnnodes andmedges, we present: (i) an algorithmwith worst-case running timeO(m+n·α(n)), whereα(n)is the inverse Ackermann function, improving thepreviously knownO(n2)time bound; (ii) a matching lower bound that shows that our algorithm is optimalwrt to worst-case complexity; and (iii) an optimal average-case upper bound ofO(m)time, improving thepreviously knownO(m·logn)bound.Second, we consider the problem of context-sensitive data-dependence analysis, where the task is to obtainanalysis summaries of library code in the presence of callbacks. Our algorithm preprocesses libraries in almostlinear time, after which the contribution of the library in the complexity of the client analysis is only linear,and only wrt the number of call sites.Third, we prove that combinatorial algorithms for Dyck reachability on general graphs with truly sub-cubic bounds cannot be obtained without obtaining sub-cubic combinatorial algorithms for Boolean MatrixMultiplication, which is a long-standing open problem. Thus we establish that the existing combinatorialalgorithms for Dyck reachability are (conditionally) optimal for general graphs. We also show that the samehardness holds for graphs of constant treewidth.Finally, we provide a prototype implementation of our algorithms for both alias analysis and data-dependenceanalysis. Our experimental evaluation demonstrates that the new algorithms significantly outperform allexisting methods on the two problems, over real-world benchmarks.}, author = {Chatterjee, Krishnendu and Choudhary, Bhavya and Pavlogiannis, Andreas}, issn = {2664-1690}, pages = {37}, publisher = {IST Austria}, title = {{Optimal Dyck reachability for data-dependence and alias analysis}}, doi = {10.15479/AT:IST-2017-870-v1-1}, year = {2017}, } @article{10417, abstract = {We present a new dynamic partial-order reduction method for stateless model checking of concurrent programs. A common approach for exploring program behaviors relies on enumerating the traces of the program, without storing the visited states (aka stateless exploration). As the number of distinct traces grows exponentially, dynamic partial-order reduction (DPOR) techniques have been successfully used to partition the space of traces into equivalence classes (Mazurkiewicz partitioning), with the goal of exploring only few representative traces from each class. We introduce a new equivalence on traces under sequential consistency semantics, which we call the observation equivalence. Two traces are observationally equivalent if every read event observes the same write event in both traces. While the traditional Mazurkiewicz equivalence is control-centric, our new definition is data-centric. We show that our observation equivalence is coarser than the Mazurkiewicz equivalence, and in many cases even exponentially coarser. We devise a DPOR exploration of the trace space, called data-centric DPOR, based on the observation equivalence.}, author = {Chalupa, Marek and Chatterjee, Krishnendu and Pavlogiannis, Andreas and Sinha, Nishant and Vaidya, Kapil}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, location = {Los Angeles, CA, United States}, number = {POPL}, publisher = {Association for Computing Machinery}, title = {{Data-centric dynamic partial order reduction}}, doi = {10.1145/3158119}, volume = {2}, year = {2017}, } @misc{5456, abstract = {We present a new dynamic partial-order reduction method for stateless model checking of concurrent programs. A common approach for exploring program behaviors relies on enumerating the traces of the program, without storing the visited states (aka stateless exploration). As the number of distinct traces grows exponentially, dynamic partial-order reduction (DPOR) techniques have been successfully used to partition the space of traces into equivalence classes (Mazurkiewicz partitioning), with the goal of exploring only few representative traces from each class. We introduce a new equivalence on traces under sequential consistency semantics, which we call the observation equivalence. Two traces are observationally equivalent if every read event observes the same write event in both traces. While the traditional Mazurkiewicz equivalence is control-centric, our new definition is data-centric. We show that our observation equivalence is coarser than the Mazurkiewicz equivalence, and in many cases even exponentially coarser. We devise a DPOR exploration of the trace space, called data-centric DPOR, based on the observation equivalence. 1. For acyclic architectures, our algorithm is guaranteed to explore exactly one representative trace from each observation class, while spending polynomial time per class. Hence, our algorithm is optimal wrt the observation equivalence, and in several cases explores exponentially fewer traces than any enumerative method based on the Mazurkiewicz equivalence. 2. For cyclic architectures, we consider an equivalence between traces which is finer than the observation equivalence; but coarser than the Mazurkiewicz equivalence, and in some cases is exponentially coarser. Our data-centric DPOR algorithm remains optimal under this trace equivalence. Finally, we perform a basic experimental comparison between the existing Mazurkiewicz-based DPOR and our data-centric DPOR on a set of academic benchmarks. Our results show a significant reduction in both running time and the number of explored equivalence classes.}, author = {Chalupa, Marek and Chatterjee, Krishnendu and Pavlogiannis, Andreas and Sinha, Nishant and Vaidya, Kapil}, issn = {2664-1690}, pages = {36}, publisher = {IST Austria}, title = {{Data-centric dynamic partial order reduction}}, doi = {10.15479/AT:IST-2017-872-v1-1}, year = {2017}, } @inproceedings{551, abstract = {Evolutionary graph theory studies the evolutionary dynamics in a population structure given as a connected graph. Each node of the graph represents an individual of the population, and edges determine how offspring are placed. We consider the classical birth-death Moran process where there are two types of individuals, namely, the residents with fitness 1 and mutants with fitness r. The fitness indicates the reproductive strength. The evolutionary dynamics happens as follows: in the initial step, in a population of all resident individuals a mutant is introduced, and then at each step, an individual is chosen proportional to the fitness of its type to reproduce, and the offspring replaces a neighbor uniformly at random. The process stops when all individuals are either residents or mutants. The probability that all individuals in the end are mutants is called the fixation probability, which is a key factor in the rate of evolution. We consider the problem of approximating the fixation probability. The class of algorithms that is extremely relevant for approximation of the fixation probabilities is the Monte-Carlo simulation of the process. Previous results present a polynomial-time Monte-Carlo algorithm for undirected graphs when r is given in unary. First, we present a simple modification: instead of simulating each step, we discard ineffective steps, where no node changes type (i.e., either residents replace residents, or mutants replace mutants). Using the above simple modification and our result that the number of effective steps is concentrated around the expected number of effective steps, we present faster polynomial-time Monte-Carlo algorithms for undirected graphs. Our algorithms are always at least a factor O(n2/ log n) faster as compared to the previous algorithms, where n is the number of nodes, and is polynomial even if r is given in binary. We also present lower bounds showing that the upper bound on the expected number of effective steps we present is asymptotically tight for undirected graphs. }, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Nowak, Martin}, booktitle = {Leibniz International Proceedings in Informatics}, isbn = {978-395977046-0}, location = {Aalborg, Denmark}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Faster Monte Carlo algorithms for fixation probability of the Moran process on undirected graphs}}, doi = {10.4230/LIPIcs.MFCS.2017.61}, volume = {83}, year = {2017}, } @inproceedings{552, abstract = {Graph games provide the foundation for modeling and synthesis of reactive processes. Such games are played over graphs where the vertices are controlled by two adversarial players. We consider graph games where the objective of the first player is the conjunction of a qualitative objective (specified as a parity condition) and a quantitative objective (specified as a meanpayoff condition). There are two variants of the problem, namely, the threshold problem where the quantitative goal is to ensure that the mean-payoff value is above a threshold, and the value problem where the quantitative goal is to ensure the optimal mean-payoff value; in both cases ensuring the qualitative parity objective. The previous best-known algorithms for game graphs with n vertices, m edges, parity objectives with d priorities, and maximal absolute reward value W for mean-payoff objectives, are as follows: O(nd+1 . m . w) for the threshold problem, and O(nd+2 · m · W) for the value problem. Our main contributions are faster algorithms, and the running times of our algorithms are as follows: O(nd-1 · m ·W) for the threshold problem, and O(nd · m · W · log(n · W)) for the value problem. For mean-payoff parity objectives with two priorities, our algorithms match the best-known bounds of the algorithms for mean-payoff games (without conjunction with parity objectives). Our results are relevant in synthesis of reactive systems with both functional requirement (given as a qualitative objective) and performance requirement (given as a quantitative objective).}, author = {Chatterjee, Krishnendu and Henzinger, Monika H and Svozil, Alexander}, booktitle = {Leibniz International Proceedings in Informatics}, isbn = {978-395977046-0}, location = {Aalborg, Denmark}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Faster algorithms for mean-payoff parity games}}, doi = {10.4230/LIPIcs.MFCS.2017.39}, volume = {83}, year = {2017}, } @inproceedings{553, abstract = {We consider two player, zero-sum, finite-state concurrent reachability games, played for an infinite number of rounds, where in every round, each player simultaneously and independently of the other players chooses an action, whereafter the successor state is determined by a probability distribution given by the current state and the chosen actions. Player 1 wins iff a designated goal state is eventually visited. We are interested in the complexity of stationary strategies measured by their patience, which is defined as the inverse of the smallest non-zero probability employed. Our main results are as follows: We show that: (i) the optimal bound on the patience of optimal and -optimal strategies, for both players is doubly exponential; and (ii) even in games with a single non-absorbing state exponential (in the number of actions) patience is necessary. }, author = {Chatterjee, Krishnendu and Hansen, Kristofer and Ibsen-Jensen, Rasmus}, booktitle = {Leibniz International Proceedings in Informatics}, isbn = {978-395977046-0}, location = {Aalborg, Denmark}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Strategy complexity of concurrent safety games}}, doi = {10.4230/LIPIcs.MFCS.2017.55}, volume = {83}, year = {2017}, } @inbook{625, abstract = {In the analysis of reactive systems a quantitative objective assigns a real value to every trace of the system. The value decision problem for a quantitative objective requires a trace whose value is at least a given threshold, and the exact value decision problem requires a trace whose value is exactly the threshold. We compare the computational complexity of the value and exact value decision problems for classical quantitative objectives, such as sum, discounted sum, energy, and mean-payoff for two standard models of reactive systems, namely, graphs and graph games.}, author = {Chatterjee, Krishnendu and Doyen, Laurent and Henzinger, Thomas A}, booktitle = {Models, Algorithms, Logics and Tools}, editor = {Aceto, Luca and Bacci, Giorgio and Ingólfsdóttir, Anna and Legay, Axel and Mardare, Radu}, isbn = {978-3-319-63120-2}, issn = {0302-9743}, pages = {367 -- 381}, publisher = {Springer}, title = {{The cost of exactness in quantitative reachability}}, doi = {10.1007/978-3-319-63121-9_18}, volume = {10460}, year = {2017}, } @inproceedings{628, abstract = {We consider the problem of developing automated techniques for solving recurrence relations to aid the expected-runtime analysis of programs. The motivation is that several classical textbook algorithms have quite efficient expected-runtime complexity, whereas the corresponding worst-case bounds are either inefficient (e.g., Quick-Sort), or completely ineffective (e.g., Coupon-Collector). Since the main focus of expected-runtime analysis is to obtain efficient bounds, we consider bounds that are either logarithmic, linear or almost-linear (O(log n), O(n), O(n · log n), respectively, where n represents the input size). Our main contribution is an efficient (simple linear-time algorithm) sound approach for deriving such expected-runtime bounds for the analysis of recurrence relations induced by randomized algorithms. The experimental results show that our approach can efficiently derive asymptotically optimal expected-runtime bounds for recurrences of classical randomized algorithms, including Randomized-Search, Quick-Sort, Quick-Select, Coupon-Collector, where the worst-case bounds are either inefficient (such as linear as compared to logarithmic expected-runtime complexity, or quadratic as compared to linear or almost-linear expected-runtime complexity), or ineffective.}, author = {Chatterjee, Krishnendu and Fu, Hongfei and Murhekar, Aniket}, editor = {Majumdar, Rupak and Kunčak, Viktor}, isbn = {978-331963386-2}, location = {Heidelberg, Germany}, pages = {118 -- 139}, publisher = {Springer}, title = {{Automated recurrence analysis for almost linear expected runtime bounds}}, doi = {10.1007/978-3-319-63387-9_6}, volume = {10426}, year = {2017}, }