@inproceedings{2279, abstract = {We consider two-player games played on weighted directed graphs with mean-payoff and total-payoff objectives, two classical quantitative objectives. While for single-dimensional games the complexity and memory bounds for both objectives coincide, we show that in contrast to multi-dimensional mean-payoff games that are known to be coNP-complete, multi-dimensional total-payoff games are undecidable. We introduce conservative approximations of these objectives, where the payoff is considered over a local finite window sliding along a play, instead of the whole play. For single dimension, we show that (i) if the window size is polynomial, deciding the winner takes polynomial time, and (ii) the existence of a bounded window can be decided in NP ∩ coNP, and is at least as hard as solving mean-payoff games. For multiple dimensions, we show that (i) the problem with fixed window size is EXPTIME-complete, and (ii) there is no primitive-recursive algorithm to decide the existence of a bounded window.}, author = {Chatterjee, Krishnendu and Doyen, Laurent and Randour, Mickael and Raskin, Jean}, location = {Hanoi, Vietnam}, pages = {118 -- 132}, publisher = {Springer}, title = {{Looking at mean-payoff and total-payoff through windows}}, doi = {10.1007/978-3-319-02444-8_10}, volume = {8172}, year = {2013}, } @misc{5399, abstract = {In this work we present a flexible tool for tumor progression, which simulates the evolutionary dynamics of cancer. Tumor progression implements a multi-type branching process where the key parameters are the fitness landscape, the mutation rate, and the average time of cell division. The fitness of a cancer cell depends on the mutations it has accumulated. The input to our tool could be any fitness landscape, mutation rate, and cell division time, and the tool produces the growth dynamics and all relevant statistics.}, author = {Reiter, Johannes and Bozic, Ivana and Chatterjee, Krishnendu and Nowak, Martin}, issn = {2664-1690}, pages = {17}, publisher = {IST Austria}, title = {{TTP: Tool for Tumor Progression}}, doi = {10.15479/AT:IST-2013-104-v1-1}, year = {2013}, } @inproceedings{2295, abstract = {We consider partially observable Markov decision processes (POMDPs) with ω-regular conditions specified as parity objectives. The qualitative analysis problem given a POMDP and a parity objective asks whether there is a strategy to ensure that the objective is satisfied with probability 1 (resp. positive probability). While the qualitative analysis problems are known to be undecidable even for very special cases of parity objectives, we establish decidability (with optimal EXPTIME-complete complexity) of the qualitative analysis problems for POMDPs with all parity objectives under finite-memory strategies. We also establish asymptotically optimal (exponential) memory bounds.}, author = {Chatterjee, Krishnendu and Chmelik, Martin and Tracol, Mathieu}, location = {Torino, Italy}, pages = {165 -- 180}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{What is decidable about partially observable Markov decision processes with omega-regular objectives}}, doi = {10.4230/LIPIcs.CSL.2013.165}, volume = {23}, year = {2013}, } @misc{5403, abstract = {We consider concurrent games played by two-players on a finite state graph, where in every round the players simultaneously choose a move, and the current state along with the joint moves determine the successor state. We study the most fundamental objective for concurrent games, namely, mean-payoff or limit-average objective, where a reward is associated to every transition, and the goal of player 1 is to maximize the long-run average of the rewards, and the objective of player 2 is strictly the opposite (i.e., the games are zero-sum). The path constraint for player 1 could be qualitative, i.e., the mean-payoff is the maximal reward, or arbitrarily close to it; or quantitative, i.e., a given threshold between the minimal and maximal reward. We consider the computation of the almost-sure (resp. positive) winning sets, where player 1 can ensure that the path constraint is satisfied with probability 1 (resp. positive probability). Almost-sure winning with qualitative constraint exactly corresponds to the question whether there exists a strategy to ensure that the payoff is the maximal reward of the game. Our main results for qualitative path constraints are as follows: (1) we establish qualitative determinacy results that show for every state either player 1 has a strategy to ensure almost-sure (resp. positive) winning against all player-2 strategies or player 2 has a spoiling strategy to falsify almost-sure (resp. positive) winning against all player-1 strategies; (2) we present optimal strategy complexity results that precisely characterize the classes of strategies required for almost-sure and positive winning for both players; and (3) we present quadratic time algorithms to compute the almost-sure and the positive winning sets, matching the best known bound of the algorithms for much simpler problems (such as reachability objectives). For quantitative constraints we show that a polynomial time solution for the almost-sure or the positive winning set would imply a solution to a long-standing open problem (of solving the value problem of mean-payoff games) that is not known to be in polynomial time.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus}, issn = {2664-1690}, pages = {33}, publisher = {IST Austria}, title = {{Qualitative analysis of concurrent mean-payoff games}}, doi = {10.15479/AT:IST-2013-126-v1-1}, year = {2013}, } @misc{5400, abstract = {We consider partially observable Markov decision processes (POMDPs) with ω-regular conditions specified as parity objectives. The class of ω-regular languages extends regular languages to infinite strings and provides a robust specification language to express all properties used in verification, and parity objectives are canonical forms to express ω-regular conditions. The qualitative analysis problem given a POMDP and a parity objective asks whether there is a strategy to ensure that the objective is satis- fied with probability 1 (resp. positive probability). While the qualitative analysis problems are known to be undecidable even for very special cases of parity objectives, we establish decidability (with optimal complexity) of the qualitative analysis problems for POMDPs with all parity objectives under finite- memory strategies. We establish asymptotically optimal (exponential) memory bounds and EXPTIME- completeness of the qualitative analysis problems under finite-memory strategies for POMDPs with parity objectives.}, author = {Chatterjee, Krishnendu and Chmelik, Martin and Tracol, Mathieu}, issn = {2664-1690}, pages = {41}, publisher = {IST Austria}, title = {{What is decidable about partially observable Markov decision processes with ω-regular objectives}}, doi = {10.15479/AT:IST-2013-109-v1-1}, year = {2013}, }