@article{14778, abstract = {We consider the almost-sure (a.s.) termination problem for probabilistic programs, which are a stochastic extension of classical imperative programs. Lexicographic ranking functions provide a sound and practical approach for termination of non-probabilistic programs, and their extension to probabilistic programs is achieved via lexicographic ranking supermartingales (LexRSMs). However, LexRSMs introduced in the previous work have a limitation that impedes their automation: all of their components have to be non-negative in all reachable states. This might result in a LexRSM not existing even for simple terminating programs. Our contributions are twofold. First, we introduce a generalization of LexRSMs that allows for some components to be negative. This standard feature of non-probabilistic termination proofs was hitherto not known to be sound in the probabilistic setting, as the soundness proof requires a careful analysis of the underlying stochastic process. Second, we present polynomial-time algorithms using our generalized LexRSMs for proving a.s. termination in broad classes of linear-arithmetic programs.}, author = {Chatterjee, Krishnendu and Kafshdar Goharshady, Ehsan and Novotný, Petr and Zárevúcky, Jiří and Zikelic, Dorde}, issn = {1433-299X}, journal = {Formal Aspects of Computing}, keywords = {Theoretical Computer Science, Software}, number = {2}, publisher = {Association for Computing Machinery}, title = {{On lexicographic proof rules for probabilistic termination}}, doi = {10.1145/3585391}, volume = {35}, year = {2023}, } @inproceedings{9644, abstract = {We present a new approach to proving non-termination of non-deterministic integer programs. Our technique is rather simple but efficient. It relies on a purely syntactic reversal of the program's transition system followed by a constraint-based invariant synthesis with constraints coming from both the original and the reversed transition system. The latter task is performed by a simple call to an off-the-shelf SMT-solver, which allows us to leverage the latest advances in SMT-solving. Moreover, our method offers a combination of features not present (as a whole) in previous approaches: it handles programs with non-determinism, provides relative completeness guarantees and supports programs with polynomial arithmetic. The experiments performed with our prototype tool RevTerm show that our approach, despite its simplicity and stronger theoretical guarantees, is at least on par with the state-of-the-art tools, often achieving a non-trivial improvement under a proper configuration of its parameters.}, author = {Chatterjee, Krishnendu and Goharshady, Ehsan Kafshdar and Novotný, Petr and Zikelic, Dorde}, booktitle = {Proceedings of the 42nd ACM SIGPLAN International Conference on Programming Language Design and Implementation}, isbn = {9781450383912}, location = {Online}, pages = {1033--1048}, publisher = {Association for Computing Machinery}, title = {{Proving non-termination by program reversal}}, doi = {10.1145/3453483.3454093}, year = {2021}, } @inproceedings{10414, abstract = {We consider the almost-sure (a.s.) termination problem for probabilistic programs, which are a stochastic extension of classical imperative programs. Lexicographic ranking functions provide a sound and practical approach for termination of non-probabilistic programs, and their extension to probabilistic programs is achieved via lexicographic ranking supermartingales (LexRSMs). However, LexRSMs introduced in the previous work have a limitation that impedes their automation: all of their components have to be non-negative in all reachable states. This might result in LexRSM not existing even for simple terminating programs. Our contributions are twofold: First, we introduce a generalization of LexRSMs which allows for some components to be negative. This standard feature of non-probabilistic termination proofs was hitherto not known to be sound in the probabilistic setting, as the soundness proof requires a careful analysis of the underlying stochastic process. Second, we present polynomial-time algorithms using our generalized LexRSMs for proving a.s. termination in broad classes of linear-arithmetic programs.}, author = {Chatterjee, Krishnendu and Goharshady, Ehsan Kafshdar and Novotný, Petr and Zárevúcky, Jiří and Zikelic, Dorde}, booktitle = {24th International Symposium on Formal Methods}, isbn = {9-783-0309-0869-0}, issn = {1611-3349}, location = {Virtual}, pages = {619--639}, publisher = {Springer Nature}, title = {{On lexicographic proof rules for probabilistic termination}}, doi = {10.1007/978-3-030-90870-6_33}, volume = {13047}, year = {2021}, } @inproceedings{8193, abstract = {Multiple-environment Markov decision processes (MEMDPs) are MDPs equipped with not one, but multiple probabilistic transition functions, which represent the various possible unknown environments. While the previous research on MEMDPs focused on theoretical properties for long-run average payoff, we study them with discounted-sum payoff and focus on their practical advantages and applications. MEMDPs can be viewed as a special case of Partially observable and Mixed observability MDPs: the state of the system is perfectly observable, but not the environment. We show that the specific structure of MEMDPs allows for more efficient algorithmic analysis, in particular for faster belief updates. We demonstrate the applicability of MEMDPs in several domains. In particular, we formalize the sequential decision-making approach to contextual recommendation systems as MEMDPs and substantially improve over the previous MDP approach.}, author = {Chatterjee, Krishnendu and Chmelik, Martin and Karkhanis, Deep and Novotný, Petr and Royer, Amélie}, booktitle = {Proceedings of the 30th International Conference on Automated Planning and Scheduling}, issn = {23340843}, location = {Nancy, France}, pages = {48--56}, publisher = {Association for the Advancement of Artificial Intelligence}, title = {{Multiple-environment Markov decision processes: Efficient analysis and applications}}, volume = {30}, year = {2020}, } @inproceedings{7183, abstract = {A probabilistic vector addition system with states (pVASS) is a finite state Markov process augmented with non-negative integer counters that can be incremented or decremented during each state transition, blocking any behaviour that would cause a counter to decrease below zero. The pVASS can be used as abstractions of probabilistic programs with many decidable properties. The use of pVASS as abstractions requires the presence of nondeterminism in the model. In this paper, we develop techniques for checking fast termination of pVASS with nondeterminism. That is, for every initial configuration of size n, we consider the worst expected number of transitions needed to reach a configuration with some counter negative (the expected termination time). We show that the problem whether the asymptotic expected termination time is linear is decidable in polynomial time for a certain natural class of pVASS with nondeterminism. Furthermore, we show the following dichotomy: if the asymptotic expected termination time is not linear, then it is at least quadratic, i.e., in Ω(n2).}, author = {Brázdil, Tomás and Chatterjee, Krishnendu and Kucera, Antonín and Novotný, Petr and Velan, Dominik}, booktitle = {International Symposium on Automated Technology for Verification and Analysis}, isbn = {9783030317836}, issn = {16113349}, location = {Taipei, Taiwan}, pages = {462--478}, publisher = {Springer Nature}, title = {{Deciding fast termination for probabilistic VASS with nondeterminism}}, doi = {10.1007/978-3-030-31784-3_27}, volume = {11781}, year = {2019}, } @inproceedings{325, abstract = {Probabilistic programs extend classical imperative programs with real-valued random variables and random branching. The most basic liveness property for such programs is the termination property. The qualitative (aka almost-sure) termination problem asks whether a given program program terminates with probability 1. While ranking functions provide a sound and complete method for non-probabilistic programs, the extension of them to probabilistic programs is achieved via ranking supermartingales (RSMs). Although deep theoretical results have been established about RSMs, their application to probabilistic programs with nondeterminism has been limited only to programs of restricted control-flow structure. For non-probabilistic programs, lexicographic ranking functions provide a compositional and practical approach for termination analysis of real-world programs. In this work we introduce lexicographic RSMs and show that they present a sound method for almost-sure termination of probabilistic programs with nondeterminism. We show that lexicographic RSMs provide a tool for compositional reasoning about almost-sure termination, and for probabilistic programs with linear arithmetic they can be synthesized efficiently (in polynomial time). We also show that with additional restrictions even asymptotic bounds on expected termination time can be obtained through lexicographic RSMs. Finally, we present experimental results on benchmarks adapted from previous work to demonstrate the effectiveness of our approach.}, author = {Agrawal, Sheshansh and Chatterjee, Krishnendu and Novotny, Petr}, location = {Los Angeles, CA, USA}, number = {POPL}, publisher = {ACM}, title = {{Lexicographic ranking supermartingales: an efficient approach to termination of probabilistic programs}}, doi = {10.1145/3158122}, volume = {2}, year = {2018}, } @inproceedings{143, abstract = {Vector Addition Systems with States (VASS) provide a well-known and fundamental model for the analysis of concurrent processes, parameterized systems, and are also used as abstract models of programs in resource bound analysis. In this paper we study the problem of obtaining asymptotic bounds on the termination time of a given VASS. In particular, we focus on the practically important case of obtaining polynomial bounds on termination time. Our main contributions are as follows: First, we present a polynomial-time algorithm for deciding whether a given VASS has a linear asymptotic complexity. We also show that if the complexity of a VASS is not linear, it is at least quadratic. Second, we classify VASS according to quantitative properties of their cycles. We show that certain singularities in these properties are the key reason for non-polynomial asymptotic complexity of VASS. In absence of singularities, we show that the asymptotic complexity is always polynomial and of the form Θ(nk), for some integer k d, where d is the dimension of the VASS. We present a polynomial-time algorithm computing the optimal k. For general VASS, the same algorithm, which is based on a complete technique for the construction of ranking functions in VASS, produces a valid lower bound, i.e., a k such that the termination complexity is (nk). Our results are based on new insights into the geometry of VASS dynamics, which hold the potential for further applicability to VASS analysis.}, author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Kučera, Antonín and Novotny, Petr and Velan, Dominik and Zuleger, Florian}, isbn = {978-1-4503-5583-4}, location = {Oxford, United Kingdom}, pages = {185 -- 194}, publisher = {IEEE}, title = {{Efficient algorithms for asymptotic bounds on termination time in VASS}}, doi = {10.1145/3209108.3209191}, volume = {F138033}, year = {2018}, } @article{5993, abstract = {In this article, we consider the termination problem of probabilistic programs with real-valued variables. Thequestions concerned are: qualitative ones that ask (i) whether the program terminates with probability 1(almost-sure termination) and (ii) whether the expected termination time is finite (finite termination); andquantitative ones that ask (i) to approximate the expected termination time (expectation problem) and (ii) tocompute a boundBsuch that the probability not to terminate afterBsteps decreases exponentially (con-centration problem). To solve these questions, we utilize the notion of ranking supermartingales, which isa powerful approach for proving termination of probabilistic programs. In detail, we focus on algorithmicsynthesis of linear ranking-supermartingales over affine probabilistic programs (Apps) with both angelic anddemonic non-determinism. An important subclass of Apps is LRApp which is defined as the class of all Appsover which a linear ranking-supermartingale exists.Our main contributions are as follows. Firstly, we show that the membership problem of LRApp (i) canbe decided in polynomial time for Apps with at most demonic non-determinism, and (ii) isNP-hard and inPSPACEfor Apps with angelic non-determinism. Moreover, theNP-hardness result holds already for Appswithout probability and demonic non-determinism. Secondly, we show that the concentration problem overLRApp can be solved in the same complexity as for the membership problem of LRApp. Finally, we show thatthe expectation problem over LRApp can be solved in2EXPTIMEand isPSPACE-hard even for Apps withoutprobability and non-determinism (i.e., deterministic programs). Our experimental results demonstrate theeffectiveness of our approach to answer the qualitative and quantitative questions over Apps with at mostdemonic non-determinism.}, author = {Chatterjee, Krishnendu and Fu, Hongfei and Novotný, Petr and Hasheminezhad, Rouzbeh}, issn = {0164-0925}, journal = {ACM Transactions on Programming Languages and Systems}, number = {2}, publisher = {Association for Computing Machinery (ACM)}, title = {{Algorithmic analysis of qualitative and quantitative termination problems for affine probabilistic programs}}, doi = {10.1145/3174800}, volume = {40}, year = {2018}, } @inproceedings{24, abstract = {Partially-observable Markov decision processes (POMDPs) with discounted-sum payoff are a standard framework to model a wide range of problems related to decision making under uncertainty. Traditionally, the goal has been to obtain policies that optimize the expectation of the discounted-sum payoff. A key drawback of the expectation measure is that even low probability events with extreme payoff can significantly affect the expectation, and thus the obtained policies are not necessarily risk-averse. An alternate approach is to optimize the probability that the payoff is above a certain threshold, which allows obtaining risk-averse policies, but ignores optimization of the expectation. We consider the expectation optimization with probabilistic guarantee (EOPG) problem, where the goal is to optimize the expectation ensuring that the payoff is above a given threshold with at least a specified probability. We present several results on the EOPG problem, including the first algorithm to solve it.}, author = {Chatterjee, Krishnendu and Elgyütt, Adrian and Novotny, Petr and Rouillé, Owen}, location = {Stockholm, Sweden}, pages = {4692 -- 4699}, publisher = {IJCAI}, title = {{Expectation optimization with probabilistic guarantees in POMDPs with discounted-sum objectives}}, doi = {10.24963/ijcai.2018/652}, volume = {2018}, year = {2018}, } @inproceedings{1009, abstract = {A standard objective in partially-observable Markov decision processes (POMDPs) is to find a policy that maximizes the expected discounted-sum payoff. However, such policies may still permit unlikely but highly undesirable outcomes, which is problematic especially in safety-critical applications. Recently, there has been a surge of interest in POMDPs where the goal is to maximize the probability to ensure that the payoff is at least a given threshold, but these approaches do not consider any optimization beyond satisfying this threshold constraint. In this work we go beyond both the “expectation” and “threshold” approaches and consider a “guaranteed payoff optimization (GPO)” problem for POMDPs, where we are given a threshold t and the objective is to find a policy σ such that a) each possible outcome of σ yields a discounted-sum payoff of at least t, and b) the expected discounted-sum payoff of σ is optimal (or near-optimal) among all policies satisfying a). We present a practical approach to tackle the GPO problem and evaluate it on standard POMDP benchmarks.}, author = {Chatterjee, Krishnendu and Novotny, Petr and Pérez, Guillermo and Raskin, Jean and Zikelic, Djordje}, booktitle = {Proceedings of the 31st AAAI Conference on Artificial Intelligence}, location = {San Francisco, CA, United States}, pages = {3725 -- 3732}, publisher = {AAAI Press}, title = {{Optimizing expectation with guarantees in POMDPs}}, volume = {5}, year = {2017}, } @inproceedings{1194, abstract = {Termination is one of the basic liveness properties, and we study the termination problem for probabilistic programs with real-valued variables. Previous works focused on the qualitative problem that asks whether an input program terminates with probability~1 (almost-sure termination). A powerful approach for this qualitative problem is the notion of ranking supermartingales with respect to a given set of invariants. The quantitative problem (probabilistic termination) asks for bounds on the termination probability. A fundamental and conceptual drawback of the existing approaches to address probabilistic termination is that even though the supermartingales consider the probabilistic behavior of the programs, the invariants are obtained completely ignoring the probabilistic aspect. In this work we address the probabilistic termination problem for linear-arithmetic probabilistic programs with nondeterminism. We define the notion of {\em stochastic invariants}, which are constraints along with a probability bound that the constraints hold. We introduce a concept of {\em repulsing supermartingales}. First, we show that repulsing supermartingales can be used to obtain bounds on the probability of the stochastic invariants. Second, we show the effectiveness of repulsing supermartingales in the following three ways: (1)~With a combination of ranking and repulsing supermartingales we can compute lower bounds on the probability of termination; (2)~repulsing supermartingales provide witnesses for refutation of almost-sure termination; and (3)~with a combination of ranking and repulsing supermartingales we can establish persistence properties of probabilistic programs. We also present results on related computational problems and an experimental evaluation of our approach on academic examples. }, author = {Chatterjee, Krishnendu and Novotny, Petr and Zikelic, Djordje}, issn = {07308566}, location = {Paris, France}, number = {1}, pages = {145 -- 160}, publisher = {ACM}, title = {{Stochastic invariants for probabilistic termination}}, doi = {10.1145/3009837.3009873}, volume = {52}, year = {2017}, } @inproceedings{1325, abstract = {We study graphs and two-player games in which rewards are assigned to states, and the goal of the players is to satisfy or dissatisfy certain property of the generated outcome, given as a mean payoff property. Since the notion of mean-payoff does not reflect possible fluctuations from the mean-payoff along a run, we propose definitions and algorithms for capturing the stability of the system, and give algorithms for deciding if a given mean payoff and stability objective can be ensured in the system.}, author = {Brázdil, Tomáš and Forejt, Vojtěch and Kučera, Antonín and Novotny, Petr}, location = {Quebec City, Canada}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Stability in graphs and games}}, doi = {10.4230/LIPIcs.CONCUR.2016.10}, volume = {59}, year = {2016}, } @inproceedings{1327, abstract = {We consider partially observable Markov decision processes (POMDPs) with a set of target states and positive integer costs associated with every transition. The traditional optimization objective (stochastic shortest path) asks to minimize the expected total cost until the target set is reached. We extend the traditional framework of POMDPs to model energy consumption, which represents a hard constraint. The energy levels may increase and decrease with transitions, and the hard constraint requires that the energy level must remain positive in all steps till the target is reached. First, we present a novel algorithm for solving POMDPs with energy levels, developing on existing POMDP solvers and using RTDP as its main method. Our second contribution is related to policy representation. For larger POMDP instances the policies computed by existing solvers are too large to be understandable. We present an automated procedure based on machine learning techniques that automatically extracts important decisions of the policy allowing us to compute succinct human readable policies. Finally, we show experimentally that our algorithm performs well and computes succinct policies on a number of POMDP instances from the literature that were naturally enhanced with energy levels. }, author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Chmelik, Martin and Gupta, Anchit and Novotny, Petr}, booktitle = {Proceedings of the 15th International Conference on Autonomous Agents and Multiagent Systems}, location = {Singapore}, pages = {1465 -- 1466}, publisher = {ACM}, title = {{Stochastic shortest path with energy constraints in POMDPs}}, year = {2016}, } @inproceedings{1326, abstract = {Energy Markov Decision Processes (EMDPs) are finite-state Markov decision processes where each transition is assigned an integer counter update and a rational payoff. An EMDP configuration is a pair s(n), where s is a control state and n is the current counter value. The configurations are changed by performing transitions in the standard way. We consider the problem of computing a safe strategy (i.e., a strategy that keeps the counter non-negative) which maximizes the expected mean payoff. }, author = {Brázdil, Tomáš and Kučera, Antonín and Novotny, Petr}, location = {Chiba, Japan}, pages = {32 -- 49}, publisher = {Springer}, title = {{Optimizing the expected mean payoff in Energy Markov Decision Processes}}, doi = {10.1007/978-3-319-46520-3_3}, volume = {9938}, year = {2016}, } @inproceedings{1438, abstract = {In this paper, we consider termination of probabilistic programs with real-valued variables. The questions concerned are: (a) qualitative ones that ask (i) whether the program terminates with probability 1 (almost-sure termination) and (ii) whether the expected termination time is finite (finite termination); (b) quantitative ones that ask (i) to approximate the expected termination time (expectation problem) and (ii) to compute a bound B such that the probability to terminate after B steps decreases exponentially (concentration problem). To solve these questions, we utilize the notion of ranking supermartingales which is a powerful approach for proving termination of probabilistic programs. In detail, we focus on algorithmic synthesis of linear ranking-supermartingales over affine probabilistic programs (APP's) with both angelic and demonic non-determinism. An important subclass of APP's is LRAPP which is defined as the class of all APP's over which a linear ranking-supermartingale exists. Our main contributions are as follows. Firstly, we show that the membership problem of LRAPP (i) can be decided in polynomial time for APP's with at most demonic non-determinism, and (ii) is NP-hard and in PSPACE for APP's with angelic non-determinism; moreover, the NP-hardness result holds already for APP's without probability and demonic non-determinism. Secondly, we show that the concentration problem over LRAPP can be solved in the same complexity as for the membership problem of LRAPP. Finally, we show that the expectation problem over LRAPP can be solved in 2EXPTIME and is PSPACE-hard even for APP's without probability and non-determinism (i.e., deterministic programs). Our experimental results demonstrate the effectiveness of our approach to answer the qualitative and quantitative questions over APP's with at most demonic non-determinism.}, author = {Chatterjee, Krishnendu and Fu, Hongfei and Novotny, Petr and Hasheminezhad, Rouzbeh}, location = {St. Petersburg, FL, USA}, pages = {327 -- 342}, publisher = {ACM}, title = {{Algorithmic analysis of qualitative and quantitative termination problems for affine probabilistic programs}}, doi = {10.1145/2837614.2837639}, volume = {20-22}, year = {2016}, } @inproceedings{1660, abstract = {We study the pattern frequency vector for runs in probabilistic Vector Addition Systems with States (pVASS). Intuitively, each configuration of a given pVASS is assigned one of finitely many patterns, and every run can thus be seen as an infinite sequence of these patterns. The pattern frequency vector assigns to each run the limit of pattern frequencies computed for longer and longer prefixes of the run. If the limit does not exist, then the vector is undefined. We show that for one-counter pVASS, the pattern frequency vector is defined and takes one of finitely many values for almost all runs. Further, these values and their associated probabilities can be approximated up to an arbitrarily small relative error in polynomial time. For stable two-counter pVASS, we show the same result, but we do not provide any upper complexity bound. As a byproduct of our study, we discover counterexamples falsifying some classical results about stochastic Petri nets published in the 80s.}, author = {Brázdil, Tomáš and Kiefer, Stefan and Kučera, Antonín and Novotny, Petr}, location = {Kyoto, Japan}, pages = {44 -- 55}, publisher = {IEEE}, title = {{Long-run average behaviour of probabilistic vector addition systems}}, doi = {10.1109/LICS.2015.15}, year = {2015}, } @inproceedings{1667, abstract = {We consider parametric version of fixed-delay continuoustime Markov chains (or equivalently deterministic and stochastic Petri nets, DSPN) where fixed-delay transitions are specified by parameters, rather than concrete values. Our goal is to synthesize values of these parameters that, for a given cost function, minimise expected total cost incurred before reaching a given set of target states. We show that under mild assumptions, optimal values of parameters can be effectively approximated using translation to a Markov decision process (MDP) whose actions correspond to discretized values of these parameters. To this end we identify and overcome several interesting phenomena arising in systems with fixed delays.}, author = {Brázdil, Tomáš and Korenčiak, L'Uboš and Krčál, Jan and Novotny, Petr and Řehák, Vojtěch}, location = {Madrid, Spain}, pages = {141 -- 159}, publisher = {Springer}, title = {{Optimizing performance of continuous-time stochastic systems using timeout synthesis}}, doi = {10.1007/978-3-319-22264-6_10}, volume = {9259}, year = {2015}, } @inproceedings{3135, abstract = {We introduce consumption games, a model for discrete interactive system with multiple resources that are consumed or reloaded independently. More precisely, a consumption game is a finite-state graph where each transition is labeled by a vector of resource updates, where every update is a non-positive number or ω. The ω updates model the reloading of a given resource. Each vertex belongs either to player □ or player ◇, where the aim of player □ is to play so that the resources are never exhausted. We consider several natural algorithmic problems about consumption games, and show that although these problems are computationally hard in general, they are solvable in polynomial time for every fixed number of resource types (i.e., the dimension of the update vectors) and bounded resource updates. }, author = {Brázdil, Brázdil and Chatterjee, Krishnendu and Kučera, Antonín and Novotny, Petr}, location = {Berkeley, CA, USA}, pages = {23 -- 38}, publisher = {Springer}, title = {{Efficient controller synthesis for consumption games with multiple resource types}}, doi = {10.1007/978-3-642-31424-7_8}, volume = {7358}, year = {2012}, }