@inproceedings{6462, abstract = {A controller is a device that interacts with a plant. At each time point,it reads the plant’s state and issues commands with the goal that the plant oper-ates optimally. Constructing optimal controllers is a fundamental and challengingproblem. Machine learning techniques have recently been successfully applied totrain controllers, yet they have limitations. Learned controllers are monolithic andhard to reason about. In particular, it is difficult to add features without retraining,to guarantee any level of performance, and to achieve acceptable performancewhen encountering untrained scenarios. These limitations can be addressed bydeploying quantitative run-timeshieldsthat serve as a proxy for the controller.At each time point, the shield reads the command issued by the controller andmay choose to alter it before passing it on to the plant. We show how optimalshields that interfere as little as possible while guaranteeing a desired level ofcontroller performance, can be generated systematically and automatically usingreactive synthesis. First, we abstract the plant by building a stochastic model.Second, we consider the learned controller to be a black box. Third, we mea-surecontroller performanceandshield interferenceby two quantitative run-timemeasures that are formally defined using weighted automata. Then, the problemof constructing a shield that guarantees maximal performance with minimal inter-ference is the problem of finding an optimal strategy in a stochastic2-player game“controller versus shield” played on the abstract state space of the plant with aquantitative objective obtained from combining the performance and interferencemeasures. We illustrate the effectiveness of our approach by automatically con-structing lightweight shields for learned traffic-light controllers in various roadnetworks. The shields we generate avoid liveness bugs, improve controller per-formance in untrained and changing traffic situations, and add features to learnedcontrollers, such as giving priority to emergency vehicles.}, author = {Avni, Guy and Bloem, Roderick and Chatterjee, Krishnendu and Henzinger, Thomas A and Konighofer, Bettina and Pranger, Stefan}, booktitle = {31st International Conference on Computer-Aided Verification}, isbn = {9783030255398}, issn = {0302-9743}, location = {New York, NY, United States}, pages = {630--649}, publisher = {Springer}, title = {{Run-time optimization for learned controllers through quantitative games}}, doi = {10.1007/978-3-030-25540-4_36}, volume = {11561}, year = {2019}, } @article{6836, abstract = {Direct reciprocity is a powerful mechanism for the evolution of cooperation on the basis of repeated interactions1,2,3,4. It requires that interacting individuals are sufficiently equal, such that everyone faces similar consequences when they cooperate or defect. Yet inequality is ubiquitous among humans5,6 and is generally considered to undermine cooperation and welfare7,8,9,10. Most previous models of reciprocity do not include inequality11,12,13,14,15. These models assume that individuals are the same in all relevant aspects. Here we introduce a general framework to study direct reciprocity among unequal individuals. Our model allows for multiple sources of inequality. Subjects can differ in their endowments, their productivities and in how much they benefit from public goods. We find that extreme inequality prevents cooperation. But if subjects differ in productivity, some endowment inequality can be necessary for cooperation to prevail. Our mathematical predictions are supported by a behavioural experiment in which we vary the endowments and productivities of the subjects. We observe that overall welfare is maximized when the two sources of heterogeneity are aligned, such that more productive individuals receive higher endowments. By contrast, when endowments and productivities are misaligned, cooperation quickly breaks down. Our findings have implications for policy-makers concerned with equity, efficiency and the provisioning of public goods.}, author = {Hauser, Oliver P. and Hilbe, Christian and Chatterjee, Krishnendu and Nowak, Martin A.}, issn = {14764687}, journal = {Nature}, number = {7770}, pages = {524--527}, publisher = {Springer Nature}, title = {{Social dilemmas among unequals}}, doi = {10.1038/s41586-019-1488-5}, volume = {572}, year = {2019}, } @inproceedings{6942, abstract = {Graph games and Markov decision processes (MDPs) are standard models in reactive synthesis and verification of probabilistic systems with nondeterminism. The class of 𝜔 -regular winning conditions; e.g., safety, reachability, liveness, parity conditions; provides a robust and expressive specification formalism for properties that arise in analysis of reactive systems. The resolutions of nondeterminism in games and MDPs are represented as strategies, and we consider succinct representation of such strategies. The decision-tree data structure from machine learning retains the flavor of decisions of strategies and allows entropy-based minimization to obtain succinct trees. However, in contrast to traditional machine-learning problems where small errors are allowed, for winning strategies in graph games and MDPs no error is allowed, and the decision tree must represent the entire strategy. In this work we propose decision trees with linear classifiers for representation of strategies in graph games and MDPs. We have implemented strategy representation using this data structure and we present experimental results for problems on graph games and MDPs, which show that this new data structure presents a much more efficient strategy representation as compared to standard decision trees.}, author = {Ashok, Pranav and Brázdil, Tomáš and Chatterjee, Krishnendu and Křetínský, Jan and Lampert, Christoph and Toman, Viktor}, booktitle = {16th International Conference on Quantitative Evaluation of Systems}, isbn = {9783030302801}, issn = {0302-9743}, location = {Glasgow, United Kingdom}, pages = {109--128}, publisher = {Springer Nature}, title = {{Strategy representation by decision trees with linear classifiers}}, doi = {10.1007/978-3-030-30281-8_7}, volume = {11785}, year = {2019}, } @inproceedings{7183, abstract = {A probabilistic vector addition system with states (pVASS) is a finite state Markov process augmented with non-negative integer counters that can be incremented or decremented during each state transition, blocking any behaviour that would cause a counter to decrease below zero. The pVASS can be used as abstractions of probabilistic programs with many decidable properties. The use of pVASS as abstractions requires the presence of nondeterminism in the model. In this paper, we develop techniques for checking fast termination of pVASS with nondeterminism. That is, for every initial configuration of size n, we consider the worst expected number of transitions needed to reach a configuration with some counter negative (the expected termination time). We show that the problem whether the asymptotic expected termination time is linear is decidable in polynomial time for a certain natural class of pVASS with nondeterminism. Furthermore, we show the following dichotomy: if the asymptotic expected termination time is not linear, then it is at least quadratic, i.e., in Ω(n2).}, author = {Brázdil, Tomás and Chatterjee, Krishnendu and Kucera, Antonín and Novotný, Petr and Velan, Dominik}, booktitle = {International Symposium on Automated Technology for Verification and Analysis}, isbn = {9783030317836}, issn = {16113349}, location = {Taipei, Taiwan}, pages = {462--478}, publisher = {Springer Nature}, title = {{Deciding fast termination for probabilistic VASS with nondeterminism}}, doi = {10.1007/978-3-030-31784-3_27}, volume = {11781}, year = {2019}, } @article{7210, abstract = {The rate of biological evolution depends on the fixation probability and on the fixation time of new mutants. Intensive research has focused on identifying population structures that augment the fixation probability of advantageous mutants. But these amplifiers of natural selection typically increase fixation time. Here we study population structures that achieve a tradeoff between fixation probability and time. First, we show that no amplifiers can have an asymptotically lower absorption time than the well-mixed population. Then we design population structures that substantially augment the fixation probability with just a minor increase in fixation time. Finally, we show that those structures enable higher effective rate of evolution than the well-mixed population provided that the rate of generating advantageous mutants is relatively low. Our work sheds light on how population structure affects the rate of evolution. Moreover, our structures could be useful for lab-based, medical, or industrial applications of evolutionary optimization.}, author = {Tkadlec, Josef and Pavlogiannis, Andreas and Chatterjee, Krishnendu and Nowak, Martin A.}, issn = {2399-3642}, journal = {Communications Biology}, publisher = {Springer Nature}, title = {{Population structure determines the tradeoff between fixation probability and fixation time}}, doi = {10.1038/s42003-019-0373-y}, volume = {2}, year = {2019}, } @inproceedings{10190, abstract = {The verification of concurrent programs remains an open challenge, as thread interaction has to be accounted for, which leads to state-space explosion. Stateless model checking battles this problem by exploring traces rather than states of the program. As there are exponentially many traces, dynamic partial-order reduction (DPOR) techniques are used to partition the trace space into equivalence classes, and explore a few representatives from each class. The standard equivalence that underlies most DPOR techniques is the happens-before equivalence, however recent works have spawned a vivid interest towards coarser equivalences. The efficiency of such approaches is a product of two parameters: (i) the size of the partitioning induced by the equivalence, and (ii) the time spent by the exploration algorithm in each class of the partitioning. In this work, we present a new equivalence, called value-happens-before and show that it has two appealing features. First, value-happens-before is always at least as coarse as the happens-before equivalence, and can be even exponentially coarser. Second, the value-happens-before partitioning is efficiently explorable when the number of threads is bounded. We present an algorithm called value-centric DPOR (VCDPOR), which explores the underlying partitioning using polynomial time per class. Finally, we perform an experimental evaluation of VCDPOR on various benchmarks, and compare it against other state-of-the-art approaches. Our results show that value-happens-before typically induces a significant reduction in the size of the underlying partitioning, which leads to a considerable reduction in the running time for exploring the whole partitioning.}, author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Toman, Viktor}, booktitle = {Proceedings of the 34th ACM International Conference on Object-Oriented Programming, Systems, Languages, and Applications}, issn = {2475-1421}, keywords = {safety, risk, reliability and quality, software}, location = {Athens, Greece}, publisher = {ACM}, title = {{Value-centric dynamic partial order reduction}}, doi = {10.1145/3360550}, volume = {3}, year = {2019}, } @inproceedings{7402, abstract = {Graph planning gives rise to fundamental algorithmic questions such as shortest path, traveling salesman problem, etc. A classical problem in discrete planning is to consider a weighted graph and construct a path that maximizes the sum of weights for a given time horizon T. However, in many scenarios, the time horizon is not fixed, but the stopping time is chosen according to some distribution such that the expected stopping time is T. If the stopping time distribution is not known, then to ensure robustness, the distribution is chosen by an adversary, to represent the worst-case scenario. A stationary plan for every vertex always chooses the same outgoing edge. For fixed horizon or fixed stopping-time distribution, stationary plans are not sufficient for optimality. Quite surprisingly we show that when an adversary chooses the stopping-time distribution with expected stopping time T, then stationary plans are sufficient. While computing optimal stationary plans for fixed horizon is NP-complete, we show that computing optimal stationary plans under adversarial stopping-time distribution can be achieved in polynomial time. Consequently, our polynomial-time algorithm for adversarial stopping time also computes an optimal plan among all possible plans.}, author = {Chatterjee, Krishnendu and Doyen, Laurent}, booktitle = {34th Annual ACM/IEEE Symposium on Logic in Computer Science}, isbn = {9781728136080}, location = {Vancouver, BC, Canada}, pages = {1--13}, publisher = {IEEE}, title = {{Graph planning with expected finite horizon}}, doi = {10.1109/lics.2019.8785706}, year = {2019}, } @unpublished{7950, abstract = {The input to the token swapping problem is a graph with vertices v1, v2, . . . , vn, and n tokens with labels 1,2, . . . , n, one on each vertex. The goal is to get token i to vertex vi for all i= 1, . . . , n using a minimum number of swaps, where a swap exchanges the tokens on the endpoints of an edge.Token swapping on a tree, also known as “sorting with a transposition tree,” is not known to be in P nor NP-complete. We present some partial results: 1. An optimum swap sequence may need to perform a swap on a leaf vertex that has the correct token (a “happy leaf”), disproving a conjecture of Vaughan. 2. Any algorithm that fixes happy leaves—as all known approximation algorithms for the problem do—has approximation factor at least 4/3. Furthermore, the two best-known 2-approximation algorithms have approximation factor exactly 2. 3. A generalized problem—weighted coloured token swapping—is NP-complete on trees, but solvable in polynomial time on paths and stars. In this version, tokens and vertices have colours, and colours have weights. The goal is to get every token to a vertex of the same colour, and the cost of a swap is the sum of the weights of the two tokens involved.}, author = {Biniaz, Ahmad and Jain, Kshitij and Lubiw, Anna and Masárová, Zuzana and Miltzow, Tillmann and Mondal, Debajyoti and Naredla, Anurag Murty and Tkadlec, Josef and Turcotte, Alexi}, booktitle = {arXiv}, title = {{Token swapping on trees}}, year = {2019}, } @inproceedings{6780, abstract = {In this work, we consider the almost-sure termination problem for probabilistic programs that asks whether a given probabilistic program terminates with probability 1. Scalable approaches for program analysis often rely on modularity as their theoretical basis. In non-probabilistic programs, the classical variant rule (V-rule) of Floyd-Hoare logic provides the foundation for modular analysis. Extension of this rule to almost-sure termination of probabilistic programs is quite tricky, and a probabilistic variant was proposed in [16]. While the proposed probabilistic variant cautiously addresses the key issue of integrability, we show that the proposed modular rule is still not sound for almost-sure termination of probabilistic programs. Besides establishing unsoundness of the previous rule, our contributions are as follows: First, we present a sound modular rule for almost-sure termination of probabilistic programs. Our approach is based on a novel notion of descent supermartingales. Second, for algorithmic approaches, we consider descent supermartingales that are linear and show that they can be synthesized in polynomial time. Finally, we present experimental results on a variety of benchmarks and several natural examples that model various types of nested while loops in probabilistic programs and demonstrate that our approach is able to efficiently prove their almost-sure termination property}, author = {Huang, Mingzhang and Fu, Hongfei and Chatterjee, Krishnendu and Goharshady, Amir Kafshdar}, booktitle = {Proceedings of the 34th ACM International Conference on Object-Oriented Programming, Systems, Languages, and Applications }, location = {Athens, Greece}, publisher = {ACM}, title = {{Modular verification for almost-sure termination of probabilistic programs}}, doi = {10.1145/3360555}, volume = {3}, year = {2019}, } @article{6380, abstract = {There is a huge gap between the speeds of modern caches and main memories, and therefore cache misses account for a considerable loss of efficiency in programs. The predominant technique to address this issue has been Data Packing: data elements that are frequently accessed within time proximity are packed into the same cache block, thereby minimizing accesses to the main memory. We consider the algorithmic problem of Data Packing on a two-level memory system. Given a reference sequence R of accesses to data elements, the task is to partition the elements into cache blocks such that the number of cache misses on R is minimized. The problem is notoriously difficult: it is NP-hard even when the cache has size 1, and is hard to approximate for any cache size larger than 4. Therefore, all existing techniques for Data Packing are based on heuristics and lack theoretical guarantees. In this work, we present the first positive theoretical results for Data Packing, along with new and stronger negative results. We consider the problem under the lens of the underlying access hypergraphs, which are hypergraphs of affinities between the data elements, where the order of an access hypergraph corresponds to the size of the affinity group. We study the problem parameterized by the treewidth of access hypergraphs, which is a standard notion in graph theory to measure the closeness of a graph to a tree. Our main results are as follows: We show there is a number q* depending on the cache parameters such that (a) if the access hypergraph of order q* has constant treewidth, then there is a linear-time algorithm for Data Packing; (b)the Data Packing problem remains NP-hard even if the access hypergraph of order q*-1 has constant treewidth. Thus, we establish a fine-grained dichotomy depending on a single parameter, namely, the highest order among access hypegraphs that have constant treewidth; and establish the optimal value q* of this parameter. Finally, we present an experimental evaluation of a prototype implementation of our algorithm. Our results demonstrate that, in practice, access hypergraphs of many commonly-used algorithms have small treewidth. We compare our approach with several state-of-the-art heuristic-based algorithms and show that our algorithm leads to significantly fewer cache-misses. }, author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Okati, Nastaran and Pavlogiannis, Andreas}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, number = {POPL}, publisher = {ACM}, title = {{Efficient parameterized algorithms for data packing}}, doi = {10.1145/3290366}, volume = {3}, year = {2019}, } @inproceedings{6056, abstract = {In today's programmable blockchains, smart contracts are limited to being deterministic and non-probabilistic. This lack of randomness is a consequential limitation, given that a wide variety of real-world financial contracts, such as casino games and lotteries, depend entirely on randomness. As a result, several ad-hoc random number generation approaches have been developed to be used in smart contracts. These include ideas such as using an oracle or relying on the block hash. However, these approaches are manipulatable, i.e. their output can be tampered with by parties who might not be neutral, such as the owner of the oracle or the miners.We propose a novel game-theoretic approach for generating provably unmanipulatable pseudorandom numbers on the blockchain. Our approach allows smart contracts to access a trustworthy source of randomness that does not rely on potentially compromised miners or oracles, hence enabling the creation of a new generation of smart contracts that are not limited to being non-probabilistic and can be drawn from the much more general class of probabilistic programs.}, author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Pourdamghani, Arash}, booktitle = {IEEE International Conference on Blockchain and Cryptocurrency}, location = {Seoul, Korea}, publisher = {IEEE}, title = {{Probabilistic smart contracts: Secure randomness on the blockchain}}, doi = {10.1109/BLOC.2019.8751326}, year = {2019}, } @inproceedings{6378, abstract = {In today's cryptocurrencies, Hashcash proof of work is the most commonly-adopted approach to mining. In Hashcash, when a miner decides to add a block to the chain, she has to solve the difficult computational puzzle of inverting a hash function. While Hashcash has been successfully adopted in both Bitcoin and Ethereum, it has attracted significant and harsh criticism due to its massive waste of electricity, its carbon footprint and environmental effects, and the inherent lack of usefulness in inverting a hash function. Various other mining protocols have been suggested, including proof of stake, in which a miner's chance of adding the next block is proportional to her current balance. However, such protocols lead to a higher entry cost for new miners who might not still have any stake in the cryptocurrency, and can in the worst case lead to an oligopoly, where the rich have complete control over mining. In this paper, we propose Hybrid Mining: a new mining protocol that combines solving real-world useful problems with Hashcash. Our protocol allows new miners to join the network by taking part in Hashcash mining without having to own an initial stake. It also allows nodes of the network to submit hard computational problems whose solutions are of interest in the real world, e.g.~protein folding problems. Then, miners can choose to compete in solving these problems, in lieu of Hashcash, for adding a new block. Hence, Hybrid Mining incentivizes miners to solve useful problems, such as hard computational problems arising in biology, in a distributed manner. It also gives researchers in other areas an easy-to-use tool to outsource their hard computations to the blockchain network, which has enormous computational power, by paying a reward to the miner who solves the problem for them. Moreover, our protocol provides strong security guarantees and is at least as resilient to double spending as Bitcoin.}, author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Pourdamghani, Arash}, booktitle = {Proceedings of the 34th ACM Symposium on Applied Computing}, isbn = {9781450359337}, location = {Limassol, Cyprus}, pages = {374--381}, publisher = {ACM}, title = {{Hybrid Mining: Exploiting blockchain’s computational power for distributed problem solving}}, doi = {10.1145/3297280.3297319}, volume = {Part F147772}, year = {2019}, } @inproceedings{6175, abstract = {We consider the problem of expected cost analysis over nondeterministic probabilistic programs, which aims at automated methods for analyzing the resource-usage of such programs. Previous approaches for this problem could only handle nonnegative bounded costs. However, in many scenarios, such as queuing networks or analysis of cryptocurrency protocols, both positive and negative costs are necessary and the costs are unbounded as well. In this work, we present a sound and efficient approach to obtain polynomial bounds on the expected accumulated cost of nondeterministic probabilistic programs. Our approach can handle (a) general positive and negative costs with bounded updates in variables; and (b) nonnegative costs with general updates to variables. We show that several natural examples which could not be handled by previous approaches are captured in our framework. Moreover, our approach leads to an efficient polynomial-time algorithm, while no previous approach for cost analysis of probabilistic programs could guarantee polynomial runtime. Finally, we show the effectiveness of our approach using experimental results on a variety of programs for which we efficiently synthesize tight resource-usage bounds.}, author = {Wang, Peixin and Fu, Hongfei and Goharshady, Amir Kafshdar and Chatterjee, Krishnendu and Qin, Xudong and Shi, Wenjun}, booktitle = {PLDI 2019: Proceedings of the 40th ACM SIGPLAN Conference on Programming Language Design and Implementation}, keywords = {Program Cost Analysis, Program Termination, Probabilistic Programs, Martingales}, location = {Phoenix, AZ, United States}, pages = {204--220}, publisher = {Association for Computing Machinery}, title = {{Cost analysis of nondeterministic probabilistic programs}}, doi = {10.1145/3314221.3314581}, year = {2019}, } @inproceedings{6490, abstract = {Smart contracts are programs that are stored and executed on the Blockchain and can receive, manage and transfer money (cryptocurrency units). Two important problems regarding smart contracts are formal analysis and compiler optimization. Formal analysis is extremely important, because smart contracts hold funds worth billions of dollars and their code is immutable after deployment. Hence, an undetected bug can cause significant financial losses. Compiler optimization is also crucial, because every action of a smart contract has to be executed by every node in the Blockchain network. Therefore, optimizations in compiling smart contracts can lead to significant savings in computation, time and energy. Two classical approaches in program analysis and compiler optimization are intraprocedural and interprocedural analysis. In intraprocedural analysis, each function is analyzed separately, while interprocedural analysis considers the entire program. In both cases, the analyses are usually reduced to graph problems over the control flow graph (CFG) of the program. These graph problems are often computationally expensive. Hence, there has been ample research on exploiting structural properties of CFGs for efficient algorithms. One such well-studied property is the treewidth, which is a measure of tree-likeness of graphs. It is known that intraprocedural CFGs of structured programs have treewidth at most 6, whereas the interprocedural treewidth cannot be bounded. This result has been used as a basis for many efficient intraprocedural analyses. In this paper, we explore the idea of exploiting the treewidth of smart contracts for formal analysis and compiler optimization. First, similar to classical programs, we show that the intraprocedural treewidth of structured Solidity and Vyper smart contracts is at most 9. Second, for global analysis, we prove that the interprocedural treewidth of structured smart contracts is bounded by 10 and, in sharp contrast with classical programs, treewidth-based algorithms can be easily applied for interprocedural analysis. Finally, we supplement our theoretical results with experiments using a tool we implemented for computing treewidth of smart contracts and show that the treewidth is much lower in practice. We use 36,764 real-world Ethereum smart contracts as benchmarks and find that they have an average treewidth of at most 3.35 for the intraprocedural case and 3.65 for the interprocedural case. }, author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Goharshady, Ehsan Kafshdar}, booktitle = {Proceedings of the 34th ACM Symposium on Applied Computing}, isbn = {9781450359337}, location = {Limassol, Cyprus}, pages = {400--408}, publisher = {ACM}, title = {{The treewidth of smart contracts}}, doi = {10.1145/3297280.3297322}, volume = {Part F147772}, year = {2019}, } @article{7158, abstract = {Interprocedural analysis is at the heart of numerous applications in programming languages, such as alias analysis, constant propagation, and so on. Recursive state machines (RSMs) are standard models for interprocedural analysis. We consider a general framework with RSMs where the transitions are labeled from a semiring and path properties are algebraic with semiring operations. RSMs with algebraic path properties can model interprocedural dataflow analysis problems, the shortest path problem, the most probable path problem, and so on. The traditional algorithms for interprocedural analysis focus on path properties where the starting point is fixed as the entry point of a specific method. In this work, we consider possible multiple queries as required in many applications such as in alias analysis. The study of multiple queries allows us to bring in an important algorithmic distinction between the resource usage of the one-time preprocessing vs for each individual query. The second aspect we consider is that the control flow graphs for most programs have constant treewidth. Our main contributions are simple and implementable algorithms that support multiple queries for algebraic path properties for RSMs that have constant treewidth. Our theoretical results show that our algorithms have small additional one-time preprocessing but can answer subsequent queries significantly faster as compared to the current algorithmic solutions for interprocedural dataflow analysis. We have also implemented our algorithms and evaluated their performance for performing on-demand interprocedural dataflow analysis on various domains, such as for live variable analysis and reaching definitions, on a standard benchmark set. Our experimental results align with our theoretical statements and show that after a lightweight preprocessing, on-demand queries are answered much faster than the standard existing algorithmic approaches. }, author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Goyal, Prateesh and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas}, issn = {0164-0925}, journal = {ACM Transactions on Programming Languages and Systems}, number = {4}, publisher = {ACM}, title = {{Faster algorithms for dynamic algebraic queries in basic RSMs with constant treewidth}}, doi = {10.1145/3363525}, volume = {41}, year = {2019}, } @article{7014, abstract = {We study the problem of developing efficient approaches for proving worst-case bounds of non-deterministic recursive programs. Ranking functions are sound and complete for proving termination and worst-case bounds of nonrecursive programs. First, we apply ranking functions to recursion, resulting in measure functions. We show that measure functions provide a sound and complete approach to prove worst-case bounds of non-deterministic recursive programs. Our second contribution is the synthesis of measure functions in nonpolynomial forms. We show that non-polynomial measure functions with logarithm and exponentiation can be synthesized through abstraction of logarithmic or exponentiation terms, Farkas' Lemma, and Handelman's Theorem using linear programming. While previous methods obtain worst-case polynomial bounds, our approach can synthesize bounds of the form $\mathcal{O}(n\log n)$ as well as $\mathcal{O}(n^r)$ where $r$ is not an integer. We present experimental results to demonstrate that our approach can obtain efficiently worst-case bounds of classical recursive algorithms such as (i) Merge-Sort, the divide-and-conquer algorithm for the Closest-Pair problem, where we obtain $\mathcal{O}(n \log n)$ worst-case bound, and (ii) Karatsuba's algorithm for polynomial multiplication and Strassen's algorithm for matrix multiplication, where we obtain $\mathcal{O}(n^r)$ bound such that $r$ is not an integer and close to the best-known bounds for the respective algorithms.}, author = {Chatterjee, Krishnendu and Fu, Hongfei and Goharshady, Amir Kafshdar}, journal = {ACM Transactions on Programming Languages and Systems}, number = {4}, publisher = {ACM}, title = {{Non-polynomial worst-case analysis of recursive programs}}, doi = {10.1145/3339984}, volume = {41}, year = {2019}, } @inproceedings{10883, abstract = {Solving parity games, which are equivalent to modal μ-calculus model checking, is a central algorithmic problem in formal methods, with applications in reactive synthesis, program repair, verification of branching-time properties, etc. Besides the standard compu- tation model with the explicit representation of games, another important theoretical model of computation is that of set-based symbolic algorithms. Set-based symbolic algorithms use basic set operations and one-step predecessor operations on the implicit description of games, rather than the explicit representation. The significance of symbolic algorithms is that they provide scalable algorithms for large finite-state systems, as well as for infinite-state systems with finite quotient. Consider parity games on graphs with n vertices and parity conditions with d priorities. While there is a rich literature of explicit algorithms for parity games, the main results for set-based symbolic algorithms are as follows: (a) the basic algorithm that requires O(nd) symbolic operations and O(d) symbolic space; and (b) an improved algorithm that requires O(nd/3+1) symbolic operations and O(n) symbolic space. In this work, our contributions are as follows: (1) We present a black-box set-based symbolic algorithm based on the explicit progress measure algorithm. Two important consequences of our algorithm are as follows: (a) a set-based symbolic algorithm for parity games that requires quasi-polynomially many symbolic operations and O(n) symbolic space; and (b) any future improvement in progress measure based explicit algorithms immediately imply an efficiency improvement in our set-based symbolic algorithm for parity games. (2) We present a set-based symbolic algorithm that requires quasi-polynomially many symbolic operations and O(d · log n) symbolic space. Moreover, for the important special case of d ≤ log n, our algorithm requires only polynomially many symbolic operations and poly-logarithmic symbolic space.}, author = {Chatterjee, Krishnendu and Dvořák, Wolfgang and Henzinger, Monika H and Svozil, Alexander}, booktitle = {22nd International Conference on Logic for Programming, Artificial Intelligence and Reasoning}, issn = {2398-7340}, location = {Awassa, Ethiopia}, pages = {233--253}, publisher = {EasyChair}, title = {{Quasipolynomial set-based symbolic algorithms for parity games}}, doi = {10.29007/5z5k}, volume = {57}, year = {2018}, } @inproceedings{325, abstract = {Probabilistic programs extend classical imperative programs with real-valued random variables and random branching. The most basic liveness property for such programs is the termination property. The qualitative (aka almost-sure) termination problem asks whether a given program program terminates with probability 1. While ranking functions provide a sound and complete method for non-probabilistic programs, the extension of them to probabilistic programs is achieved via ranking supermartingales (RSMs). Although deep theoretical results have been established about RSMs, their application to probabilistic programs with nondeterminism has been limited only to programs of restricted control-flow structure. For non-probabilistic programs, lexicographic ranking functions provide a compositional and practical approach for termination analysis of real-world programs. In this work we introduce lexicographic RSMs and show that they present a sound method for almost-sure termination of probabilistic programs with nondeterminism. We show that lexicographic RSMs provide a tool for compositional reasoning about almost-sure termination, and for probabilistic programs with linear arithmetic they can be synthesized efficiently (in polynomial time). We also show that with additional restrictions even asymptotic bounds on expected termination time can be obtained through lexicographic RSMs. Finally, we present experimental results on benchmarks adapted from previous work to demonstrate the effectiveness of our approach.}, author = {Agrawal, Sheshansh and Chatterjee, Krishnendu and Novotny, Petr}, location = {Los Angeles, CA, USA}, number = {POPL}, publisher = {ACM}, title = {{Lexicographic ranking supermartingales: an efficient approach to termination of probabilistic programs}}, doi = {10.1145/3158122}, volume = {2}, year = {2018}, } @inbook{59, abstract = {Graph-based games are an important tool in computer science. They have applications in synthesis, verification, refinement, and far beyond. We review graphbased games with objectives on infinite plays. We give definitions and algorithms to solve the games and to give a winning strategy. The objectives we consider are mostly Boolean, but we also look at quantitative graph-based games and their objectives. Synthesis aims to turn temporal logic specifications into correct reactive systems. We explain the reduction of synthesis to graph-based games (or equivalently tree automata) using synthesis of LTL specifications as an example. We treat the classical approach that uses determinization of parity automata and more modern approaches.}, author = {Bloem, Roderick and Chatterjee, Krishnendu and Jobstmann, Barbara}, booktitle = {Handbook of Model Checking}, editor = {Henzinger, Thomas A and Clarke, Edmund M. and Veith, Helmut and Bloem, Roderick}, isbn = {978-3-319-10574-1}, pages = {921 -- 962}, publisher = {Springer}, title = {{Graph games and reactive synthesis}}, doi = {10.1007/978-3-319-10575-8_27}, year = {2018}, } @inbook{86, abstract = {Responsiveness—the requirement that every request to a system be eventually handled—is one of the fundamental liveness properties of a reactive system. Average response time is a quantitative measure for the responsiveness requirement used commonly in performance evaluation. We show how average response time can be computed on state-transition graphs, on Markov chains, and on game graphs. In all three cases, we give polynomial-time algorithms.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, booktitle = {Principles of Modeling}, editor = {Lohstroh, Marten and Derler, Patricia and Sirjani, Marjan}, pages = {143 -- 161}, publisher = {Springer}, title = {{Computing average response time}}, doi = {10.1007/978-3-319-95246-8_9}, volume = {10760}, year = {2018}, }