@article{2836, abstract = {We study the automatic synthesis of fair non-repudiation protocols, a class of fair exchange protocols, used for digital contract signing. First, we show how to specify the objectives of the participating agents and the trusted third party as path formulas in linear temporal logic and prove that the satisfaction of these objectives imply fairness; a property required of fair exchange protocols. We then show that weak (co-operative) co-synthesis and classical (strictly competitive) co-synthesis fail, whereas assume-guarantee synthesis (AGS) succeeds. We demonstrate the success of AGS as follows: (a) any solution of AGS is attack-free; no subset of participants can violate the objectives of the other participants; (b) the Asokan-Shoup-Waidner certified mail protocol that has known vulnerabilities is not a solution of AGS; (c) the Kremer-Markowitch non-repudiation protocol is a solution of AGS; and (d) AGS presents a new and symmetric fair non-repudiation protocol that is attack-free. To our knowledge this is the first application of synthesis to fair non-repudiation protocols, and our results show how synthesis can both automatically discover vulnerabilities in protocols and generate correct protocols. The solution to AGS can be computed efficiently as the secure equilibrium solution of three-player graph games. }, author = {Chatterjee, Krishnendu and Raman, Vishwanath}, journal = {Formal Aspects of Computing}, number = {4}, pages = {825 -- 859}, publisher = {Springer}, title = {{Assume-guarantee synthesis for digital contract signing}}, doi = {10.1007/s00165-013-0283-6}, volume = {26}, year = {2013}, } @article{2854, abstract = {We consider concurrent games played on graphs. At every round of a game, each player simultaneously and independently selects a move; the moves jointly determine the transition to a successor state. Two basic objectives are the safety objective to stay forever in a given set of states, and its dual, the reachability objective to reach a given set of states. First, we present a simple proof of the fact that in concurrent reachability games, for all ε>0, memoryless ε-optimal strategies exist. A memoryless strategy is independent of the history of plays, and an ε-optimal strategy achieves the objective with probability within ε of the value of the game. In contrast to previous proofs of this fact, our proof is more elementary and more combinatorial. Second, we present a strategy-improvement (a.k.a. policy-iteration) algorithm for concurrent games with reachability objectives. Finally, we present a strategy-improvement algorithm for turn-based stochastic games (where each player selects moves in turns) with safety objectives. Our algorithms yield sequences of player-1 strategies which ensure probabilities of winning that converge monotonically (from below) to the value of the game. © 2012 Elsevier Inc.}, author = {Chatterjee, Krishnendu and De Alfaro, Luca and Henzinger, Thomas A}, journal = {Journal of Computer and System Sciences}, number = {5}, pages = {640 -- 657}, publisher = {Elsevier}, title = {{Strategy improvement for concurrent reachability and turn based stochastic safety games}}, doi = {10.1016/j.jcss.2012.12.001}, volume = {79}, year = {2013}, } @inproceedings{2886, abstract = {We focus on the realizability problem of Message Sequence Graphs (MSG), i.e. the problem whether a given MSG specification is correctly distributable among parallel components communicating via messages. This fundamental problem of MSG is known to be undecidable. We introduce a well motivated restricted class of MSG, so called controllable-choice MSG, and show that all its models are realizable and moreover it is decidable whether a given MSG model is a member of this class. In more detail, this class of MSG specifications admits a deadlock-free realization by overloading existing messages with additional bounded control data. We also show that the presented class is the largest known subclass of MSG that allows for deadlock-free realization.}, author = {Chmelik, Martin and Řehák, Vojtěch}, location = {Znojmo, Czech Republic}, pages = {118 -- 130}, publisher = {Springer}, title = {{Controllable-choice message sequence graphs}}, doi = {10.1007/978-3-642-36046-6_12}, volume = {7721}, year = {2013}, } @article{3116, abstract = {Multithreaded programs coordinate their interaction through synchronization primitives like mutexes and semaphores, which are managed by an OS-provided resource manager. We propose algorithms for the automatic construction of code-aware resource managers for multithreaded embedded applications. Such managers use knowledge about the structure and resource usage (mutex and semaphore usage) of the threads to guarantee deadlock freedom and progress while managing resources in an efficient way. Our algorithms compute managers as winning strategies in certain infinite games, and produce a compact code description of these strategies. We have implemented the algorithms in the tool Cynthesis. Given a multithreaded program in C, the tool produces C code implementing a code-aware resource manager. We show in experiments that Cynthesis produces compact resource managers within a few minutes on a set of embedded benchmarks with up to 6 threads. © 2012 Springer Science+Business Media, LLC.}, author = {Chatterjee, Krishnendu and De Alfaro, Luca and Faella, Marco and Majumdar, Ritankar and Raman, Vishwanath}, journal = {Formal Methods in System Design}, number = {2}, pages = {142 -- 174}, publisher = {Springer}, title = {{Code aware resource management}}, doi = {10.1007/s10703-012-0170-4}, volume = {42}, year = {2013}, } @article{2831, abstract = {We consider Markov decision processes (MDPs) with Büchi (liveness) objectives. We consider the problem of computing the set of almost-sure winning states from where the objective can be ensured with probability 1. Our contributions are as follows: First, we present the first subquadratic symbolic algorithm to compute the almost-sure winning set for MDPs with Büchi objectives; our algorithm takes O(n · √ m) symbolic steps as compared to the previous known algorithm that takes O(n 2) symbolic steps, where n is the number of states and m is the number of edges of the MDP. In practice MDPs have constant out-degree, and then our symbolic algorithm takes O(n · √ n) symbolic steps, as compared to the previous known O(n 2) symbolic steps algorithm. Second, we present a new algorithm, namely win-lose algorithm, with the following two properties: (a) the algorithm iteratively computes subsets of the almost-sure winning set and its complement, as compared to all previous algorithms that discover the almost-sure winning set upon termination; and (b) requires O(n · √ K) symbolic steps, where K is the maximal number of edges of strongly connected components (scc's) of the MDP. The win-lose algorithm requires symbolic computation of scc's. Third, we improve the algorithm for symbolic scc computation; the previous known algorithm takes linear symbolic steps, and our new algorithm improves the constants associated with the linear number of steps. In the worst case the previous known algorithm takes 5×n symbolic steps, whereas our new algorithm takes 4×n symbolic steps.}, author = {Chatterjee, Krishnendu and Henzinger, Monika H and Joglekar, Manas and Shah, Nisarg}, journal = {Formal Methods in System Design}, number = {3}, pages = {301 -- 327}, publisher = {Springer}, title = {{Symbolic algorithms for qualitative analysis of Markov decision processes with Büchi objectives}}, doi = {10.1007/s10703-012-0180-2}, volume = {42}, year = {2013}, } @inproceedings{2279, abstract = {We consider two-player games played on weighted directed graphs with mean-payoff and total-payoff objectives, two classical quantitative objectives. While for single-dimensional games the complexity and memory bounds for both objectives coincide, we show that in contrast to multi-dimensional mean-payoff games that are known to be coNP-complete, multi-dimensional total-payoff games are undecidable. We introduce conservative approximations of these objectives, where the payoff is considered over a local finite window sliding along a play, instead of the whole play. For single dimension, we show that (i) if the window size is polynomial, deciding the winner takes polynomial time, and (ii) the existence of a bounded window can be decided in NP ∩ coNP, and is at least as hard as solving mean-payoff games. For multiple dimensions, we show that (i) the problem with fixed window size is EXPTIME-complete, and (ii) there is no primitive-recursive algorithm to decide the existence of a bounded window.}, author = {Chatterjee, Krishnendu and Doyen, Laurent and Randour, Mickael and Raskin, Jean}, location = {Hanoi, Vietnam}, pages = {118 -- 132}, publisher = {Springer}, title = {{Looking at mean-payoff and total-payoff through windows}}, doi = {10.1007/978-3-319-02444-8_10}, volume = {8172}, year = {2013}, } @misc{5399, abstract = {In this work we present a flexible tool for tumor progression, which simulates the evolutionary dynamics of cancer. Tumor progression implements a multi-type branching process where the key parameters are the fitness landscape, the mutation rate, and the average time of cell division. The fitness of a cancer cell depends on the mutations it has accumulated. The input to our tool could be any fitness landscape, mutation rate, and cell division time, and the tool produces the growth dynamics and all relevant statistics.}, author = {Reiter, Johannes and Bozic, Ivana and Chatterjee, Krishnendu and Nowak, Martin}, issn = {2664-1690}, pages = {17}, publisher = {IST Austria}, title = {{TTP: Tool for Tumor Progression}}, doi = {10.15479/AT:IST-2013-104-v1-1}, year = {2013}, } @inproceedings{2295, abstract = {We consider partially observable Markov decision processes (POMDPs) with ω-regular conditions specified as parity objectives. The qualitative analysis problem given a POMDP and a parity objective asks whether there is a strategy to ensure that the objective is satisfied with probability 1 (resp. positive probability). While the qualitative analysis problems are known to be undecidable even for very special cases of parity objectives, we establish decidability (with optimal EXPTIME-complete complexity) of the qualitative analysis problems for POMDPs with all parity objectives under finite-memory strategies. We also establish asymptotically optimal (exponential) memory bounds.}, author = {Chatterjee, Krishnendu and Chmelik, Martin and Tracol, Mathieu}, location = {Torino, Italy}, pages = {165 -- 180}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{What is decidable about partially observable Markov decision processes with omega-regular objectives}}, doi = {10.4230/LIPIcs.CSL.2013.165}, volume = {23}, year = {2013}, } @misc{5403, abstract = {We consider concurrent games played by two-players on a finite state graph, where in every round the players simultaneously choose a move, and the current state along with the joint moves determine the successor state. We study the most fundamental objective for concurrent games, namely, mean-payoff or limit-average objective, where a reward is associated to every transition, and the goal of player 1 is to maximize the long-run average of the rewards, and the objective of player 2 is strictly the opposite (i.e., the games are zero-sum). The path constraint for player 1 could be qualitative, i.e., the mean-payoff is the maximal reward, or arbitrarily close to it; or quantitative, i.e., a given threshold between the minimal and maximal reward. We consider the computation of the almost-sure (resp. positive) winning sets, where player 1 can ensure that the path constraint is satisfied with probability 1 (resp. positive probability). Almost-sure winning with qualitative constraint exactly corresponds to the question whether there exists a strategy to ensure that the payoff is the maximal reward of the game. Our main results for qualitative path constraints are as follows: (1) we establish qualitative determinacy results that show for every state either player 1 has a strategy to ensure almost-sure (resp. positive) winning against all player-2 strategies or player 2 has a spoiling strategy to falsify almost-sure (resp. positive) winning against all player-1 strategies; (2) we present optimal strategy complexity results that precisely characterize the classes of strategies required for almost-sure and positive winning for both players; and (3) we present quadratic time algorithms to compute the almost-sure and the positive winning sets, matching the best known bound of the algorithms for much simpler problems (such as reachability objectives). For quantitative constraints we show that a polynomial time solution for the almost-sure or the positive winning set would imply a solution to a long-standing open problem (of solving the value problem of mean-payoff games) that is not known to be in polynomial time.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus}, issn = {2664-1690}, pages = {33}, publisher = {IST Austria}, title = {{Qualitative analysis of concurrent mean-payoff games}}, doi = {10.15479/AT:IST-2013-126-v1-1}, year = {2013}, } @misc{5400, abstract = {We consider partially observable Markov decision processes (POMDPs) with ω-regular conditions specified as parity objectives. The class of ω-regular languages extends regular languages to infinite strings and provides a robust specification language to express all properties used in verification, and parity objectives are canonical forms to express ω-regular conditions. The qualitative analysis problem given a POMDP and a parity objective asks whether there is a strategy to ensure that the objective is satis- fied with probability 1 (resp. positive probability). While the qualitative analysis problems are known to be undecidable even for very special cases of parity objectives, we establish decidability (with optimal complexity) of the qualitative analysis problems for POMDPs with all parity objectives under finite- memory strategies. We establish asymptotically optimal (exponential) memory bounds and EXPTIME- completeness of the qualitative analysis problems under finite-memory strategies for POMDPs with parity objectives.}, author = {Chatterjee, Krishnendu and Chmelik, Martin and Tracol, Mathieu}, issn = {2664-1690}, pages = {41}, publisher = {IST Austria}, title = {{What is decidable about partially observable Markov decision processes with ω-regular objectives}}, doi = {10.15479/AT:IST-2013-109-v1-1}, year = {2013}, }