@article{1861, abstract = {Continuous-time Markov chains are commonly used in practice for modeling biochemical reaction networks in which the inherent randomness of themolecular interactions cannot be ignored. This has motivated recent research effort into methods for parameter inference and experiment design for such models. The major difficulty is that such methods usually require one to iteratively solve the chemical master equation that governs the time evolution of the probability distribution of the system. This, however, is rarely possible, and even approximation techniques remain limited to relatively small and simple systems. An alternative explored in this article is to base methods on only some low-order moments of the entire probability distribution. We summarize the theory behind such moment-based methods for parameter inference and experiment design and provide new case studies where we investigate their performance.}, author = {Ruess, Jakob and Lygeros, John}, journal = {ACM Transactions on Modeling and Computer Simulation}, number = {2}, publisher = {ACM}, title = {{Moment-based methods for parameter inference and experiment design for stochastic biochemical reaction networks}}, doi = {10.1145/2688906}, volume = {25}, year = {2015}, } @article{1866, author = {Henzinger, Thomas A and Raskin, Jean}, journal = {Communications of the ACM}, number = {2}, pages = {86--86}, publisher = {ACM}, title = {{The equivalence problem for finite automata: Technical perspective}}, doi = {10.1145/2701001}, volume = {58}, year = {2015}, } @inproceedings{1882, abstract = {We provide a framework for compositional and iterative design and verification of systems with quantitative information, such as rewards, time or energy. It is based on disjunctive modal transition systems where we allow actions to bear various types of quantitative information. Throughout the design process the actions can be further refined and the information made more precise. We show how to compute the results of standard operations on the systems, including the quotient (residual), which has not been previously considered for quantitative non-deterministic systems. Our quantitative framework has close connections to the modal nu-calculus and is compositional with respect to general notions of distances between systems and the standard operations.}, author = {Fahrenberg, Uli and Kretinsky, Jan and Legay, Axel and Traonouez, Louis}, location = {Bertinoro, Italy}, pages = {306 -- 324}, publisher = {Springer}, title = {{Compositionality for quantitative specifications}}, doi = {10.1007/978-3-319-15317-9_19}, volume = {8997}, year = {2015}, } @inproceedings{1992, abstract = {We present a method and a tool for generating succinct representations of sets of concurrent traces. We focus on trace sets that contain all correct or all incorrect permutations of events from a given trace. We represent trace sets as HB-Formulas that are Boolean combinations of happens-before constraints between events. To generate a representation of incorrect interleavings, our method iteratively explores interleavings that violate the specification and gathers generalizations of the discovered interleavings into an HB-Formula; its complement yields a representation of correct interleavings. We claim that our trace set representations can drive diverse verification, fault localization, repair, and synthesis techniques for concurrent programs. We demonstrate this by using our tool in three case studies involving synchronization synthesis, bug summarization, and abstraction refinement based verification. In each case study, our initial experimental results have been promising. In the first case study, we present an algorithm for inferring missing synchronization from an HB-Formula representing correct interleavings of a given trace. The algorithm applies rules to rewrite specific patterns in the HB-Formula into locks, barriers, and wait-notify constructs. In the second case study, we use an HB-Formula representing incorrect interleavings for bug summarization. While the HB-Formula itself is a concise counterexample summary, we present additional inference rules to help identify specific concurrency bugs such as data races, define-use order violations, and two-stage access bugs. In the final case study, we present a novel predicate learning procedure that uses HB-Formulas representing abstract counterexamples to accelerate counterexample-guided abstraction refinement (CEGAR). In each iteration of the CEGAR loop, the procedure refines the abstraction to eliminate multiple spurious abstract counterexamples drawn from the HB-Formula.}, author = {Gupta, Ashutosh and Henzinger, Thomas A and Radhakrishna, Arjun and Samanta, Roopsha and Tarrach, Thorsten}, isbn = {978-1-4503-3300-9}, location = {Mumbai, India}, pages = {433 -- 444}, publisher = {ACM}, title = {{Succinct representation of concurrent trace sets}}, doi = {10.1145/2676726.2677008}, year = {2015}, } @article{1832, abstract = {Linearizability of concurrent data structures is usually proved by monolithic simulation arguments relying on the identification of the so-called linearization points. Regrettably, such proofs, whether manual or automatic, are often complicated and scale poorly to advanced non-blocking concurrency patterns, such as helping and optimistic updates. In response, we propose a more modular way of checking linearizability of concurrent queue algorithms that does not involve identifying linearization points. We reduce the task of proving linearizability with respect to the queue specification to establishing four basic properties, each of which can be proved independently by simpler arguments. As a demonstration of our approach, we verify the Herlihy and Wing queue, an algorithm that is challenging to verify by a simulation proof. }, author = {Chakraborty, Soham and Henzinger, Thomas A and Sezgin, Ali and Vafeiadis, Viktor}, journal = {Logical Methods in Computer Science}, number = {1}, publisher = {International Federation of Computational Logic}, title = {{Aspect-oriented linearizability proofs}}, doi = {10.2168/LMCS-11(1:20)2015}, volume = {11}, year = {2015}, } @article{1731, abstract = {We consider two-player zero-sum games on graphs. These games can be classified on the basis of the information of the players and on the mode of interaction between them. On the basis of information the classification is as follows: (a) partial-observation (both players have partial view of the game); (b) one-sided complete-observation (one player has complete observation); and (c) complete-observation (both players have complete view of the game). On the basis of mode of interaction we have the following classification: (a) concurrent (both players interact simultaneously); and (b) turn-based (both players interact in turn). The two sources of randomness in these games are randomness in transition function and randomness in strategies. In general, randomized strategies are more powerful than deterministic strategies, and randomness in transitions gives more general classes of games. In this work we present a complete characterization for the classes of games where randomness is not helpful in: (a) the transition function probabilistic transition can be simulated by deterministic transition); and (b) strategies (pure strategies are as powerful as randomized strategies). As consequence of our characterization we obtain new undecidability results for these games. }, author = {Chatterjee, Krishnendu and Doyen, Laurent and Gimbert, Hugo and Henzinger, Thomas A}, journal = {Information and Computation}, number = {12}, pages = {3 -- 16}, publisher = {Elsevier}, title = {{Randomness for free}}, doi = {10.1016/j.ic.2015.06.003}, volume = {245}, year = {2015}, } @article{1856, abstract = {The traditional synthesis question given a specification asks for the automatic construction of a system that satisfies the specification, whereas often there exists a preference order among the different systems that satisfy the given specification. Under a probabilistic assumption about the possible inputs, such a preference order is naturally expressed by a weighted automaton, which assigns to each word a value, such that a system is preferred if it generates a higher expected value. We solve the following optimal synthesis problem: given an omega-regular specification, a Markov chain that describes the distribution of inputs, and a weighted automaton that measures how well a system satisfies the given specification under the input assumption, synthesize a system that optimizes the measured value. For safety specifications and quantitative measures that are defined by mean-payoff automata, the optimal synthesis problem reduces to finding a strategy in a Markov decision process (MDP) that is optimal for a long-run average reward objective, which can be achieved in polynomial time. For general omega-regular specifications along with mean-payoff automata, the solution rests on a new, polynomial-time algorithm for computing optimal strategies in MDPs with mean-payoff parity objectives. Our algorithm constructs optimal strategies that consist of two memoryless strategies and a counter. The counter is in general not bounded. To obtain a finite-state system, we show how to construct an ε-optimal strategy with a bounded counter, for all ε > 0. Furthermore, we show how to decide in polynomial time if it is possible to construct an optimal finite-state system (i.e., a system without a counter) for a given specification. We have implemented our approach and the underlying algorithms in a tool that takes qualitative and quantitative specifications and automatically constructs a system that satisfies the qualitative specification and optimizes the quantitative specification, if such a system exists. We present some experimental results showing optimal systems that were automatically generated in this way.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Jobstmann, Barbara and Singh, Rohit}, journal = {Journal of the ACM}, number = {1}, publisher = {ACM}, title = {{Measuring and synthesizing systems in probabilistic environments}}, doi = {10.1145/2699430}, volume = {62}, year = {2015}, } @inproceedings{1657, abstract = {We consider Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) objectives. There exist two different views: (i) ~the expectation semantics, where the goal is to optimize the expected mean-payoff objective, and (ii) ~the satisfaction semantics, where the goal is to maximize the probability of runs such that the mean-payoff value stays above a given vector. We consider optimization with respect to both objectives at once, thus unifying the existing semantics. Precisely, the goal is to optimize the expectation while ensuring the satisfaction constraint. Our problem captures the notion of optimization with respect to strategies that are risk-averse (i.e., Ensure certain probabilistic guarantee). Our main results are as follows: First, we present algorithms for the decision problems, which are always polynomial in the size of the MDP. We also show that an approximation of the Pareto curve can be computed in time polynomial in the size of the MDP, and the approximation factor, but exponential in the number of dimensions. Second, we present a complete characterization of the strategy complexity (in terms of memory bounds and randomization) required to solve our problem. }, author = {Chatterjee, Krishnendu and Komárková, Zuzana and Kretinsky, Jan}, location = {Kyoto, Japan}, pages = {244 -- 256}, publisher = {IEEE}, title = {{Unifying two views on multiple mean-payoff objectives in Markov decision processes}}, doi = {10.1109/LICS.2015.32}, year = {2015}, } @inproceedings{1656, abstract = {Recently there has been a significant effort to handle quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, some basic system properties such as average response time cannot be expressed using weighted automata, nor in any other know decidable formalism. In this work, we introduce nested weighted automata as a natural extension of weighted automata which makes it possible to express important quantitative properties such as average response time. In nested weighted automata, a master automaton spins off and collects results from weighted slave automata, each of which computes a quantity along a finite portion of an infinite word. Nested weighted automata can be viewed as the quantitative analogue of monitor automata, which are used in run-time verification. We establish an almost complete decidability picture for the basic decision problems about nested weighted automata, and illustrate their applicability in several domains. In particular, nested weighted automata can be used to decide average response time properties.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, booktitle = {Proceedings - Symposium on Logic in Computer Science}, location = {Kyoto, Japan}, publisher = {IEEE}, title = {{Nested weighted automata}}, doi = {10.1109/LICS.2015.72}, volume = {2015-July}, year = {2015}, } @misc{5436, abstract = {Recently there has been a significant effort to handle quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, some basic system properties such as average response time cannot be expressed using weighted automata, nor in any other know decidable formalism. In this work, we introduce nested weighted automata as a natural extension of weighted automata which makes it possible to express important quantitative properties such as average response time. In nested weighted automata, a master automaton spins off and collects results from weighted slave automata, each of which computes a quantity along a finite portion of an infinite word. Nested weighted automata can be viewed as the quantitative analogue of monitor automata, which are used in run-time verification. We establish an almost complete decidability picture for the basic decision problems about nested weighted automata, and illustrate their applicability in several domains. In particular, nested weighted automata can be used to decide average response time properties.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, issn = {2664-1690}, pages = {29}, publisher = {IST Austria}, title = {{Nested weighted automata}}, doi = {10.15479/AT:IST-2015-170-v2-2}, year = {2015}, } @inproceedings{1659, abstract = {The target discounted-sum problem is the following: Given a rational discount factor 0 < λ < 1 and three rational values a, b, and t, does there exist a finite or an infinite sequence w ε(a, b)∗ or w ε(a, b)w, such that Σ|w| i=0 w(i)λi equals t? The problem turns out to relate to many fields of mathematics and computer science, and its decidability question is surprisingly hard to solve. We solve the finite version of the problem, and show the hardness of the infinite version, linking it to various areas and open problems in mathematics and computer science: β-expansions, discounted-sum automata, piecewise affine maps, and generalizations of the Cantor set. We provide some partial results to the infinite version, among which are solutions to its restriction to eventually-periodic sequences and to the cases that λ λ 1/2 or λ = 1/n, for every n ε N. We use our results for solving some open problems on discounted-sum automata, among which are the exact-value problem for nondeterministic automata over finite words and the universality and inclusion problems for functional automata.}, author = {Boker, Udi and Henzinger, Thomas A and Otop, Jan}, booktitle = {LICS}, issn = {1043-6871 }, location = {Kyoto, Japan}, pages = {750 -- 761}, publisher = {IEEE}, title = {{The target discounted-sum problem}}, doi = {10.1109/LICS.2015.74}, year = {2015}, } @inproceedings{1610, abstract = {The edit distance between two words w1, w2 is the minimal number of word operations (letter insertions, deletions, and substitutions) necessary to transform w1 to w2. The edit distance generalizes to languages L1,L2, where the edit distance is the minimal number k such that for every word from L1 there exists a word in L2 with edit distance at most k. We study the edit distance computation problem between pushdown automata and their subclasses. The problem of computing edit distance to pushdown automata is undecidable, and in practice, the interesting question is to compute the edit distance from a pushdown automaton (the implementation, a standard model for programs with recursion) to a regular language (the specification). In this work, we present a complete picture of decidability and complexity for deciding whether, for a given threshold k, the edit distance from a pushdown automaton to a finite automaton is at most k.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Ibsen-Jensen, Rasmus and Otop, Jan}, booktitle = {42nd International Colloquium}, isbn = {978-3-662-47665-9}, location = {Kyoto, Japan}, number = {Part II}, pages = {121 -- 133}, publisher = {Springer Nature}, title = {{Edit distance for pushdown automata}}, doi = {10.1007/978-3-662-47666-6_10}, volume = {9135}, year = {2015}, } @misc{5439, abstract = {The target discounted-sum problem is the following: Given a rational discount factor 0 < λ < 1 and three rational values a, b, and t, does there exist a finite or an infinite sequence w ε(a, b)∗ or w ε(a, b)w, such that Σ|w| i=0 w(i)λi equals t? The problem turns out to relate to many fields of mathematics and computer science, and its decidability question is surprisingly hard to solve. We solve the finite version of the problem, and show the hardness of the infinite version, linking it to various areas and open problems in mathematics and computer science: β-expansions, discounted-sum automata, piecewise affine maps, and generalizations of the Cantor set. We provide some partial results to the infinite version, among which are solutions to its restriction to eventually-periodic sequences and to the cases that λ λ 1/2 or λ = 1/n, for every n ε N. We use our results for solving some open problems on discounted-sum automata, among which are the exact-value problem for nondeterministic automata over finite words and the universality and inclusion problems for functional automata. }, author = {Boker, Udi and Henzinger, Thomas A and Otop, Jan}, issn = {2664-1690}, pages = {20}, publisher = {IST Austria}, title = {{The target discounted-sum problem}}, doi = {10.15479/AT:IST-2015-335-v1-1}, year = {2015}, } @inproceedings{1502, abstract = {We extend the theory of input-output conformance with operators for merge and quotient. The former is useful when testing against multiple requirements or views. The latter can be used to generate tests for patches of an already tested system. Both operators can combine systems with different action alphabets, which is usually the case when constructing complex systems and specifications from parts, for instance different views as well as newly defined functionality of a~previous version of the system.}, author = {Beneš, Nikola and Daca, Przemyslaw and Henzinger, Thomas A and Kretinsky, Jan and Nickovic, Dejan}, isbn = {978-1-4503-3471-6}, location = {Montreal, QC, Canada}, pages = {101 -- 110}, publisher = {ACM}, title = {{Complete composition operators for IOCO-testing theory}}, doi = {10.1145/2737166.2737175}, year = {2015}, } @article{1501, abstract = {We consider Markov decision processes (MDPs) which are a standard model for probabilistic systems. We focus on qualitative properties for MDPs that can express that desired behaviors of the system arise almost-surely (with probability 1) or with positive probability. We introduce a new simulation relation to capture the refinement relation of MDPs with respect to qualitative properties, and present discrete graph algorithms with quadratic complexity to compute the simulation relation. We present an automated technique for assume-guarantee style reasoning for compositional analysis of two-player games by giving a counterexample guided abstraction-refinement approach to compute our new simulation relation. We show a tight link between two-player games and MDPs, and as a consequence the results for games are lifted to MDPs with qualitative properties. We have implemented our algorithms and show that the compositional analysis leads to significant improvements. }, author = {Chatterjee, Krishnendu and Chmelik, Martin and Daca, Przemyslaw}, journal = {Formal Methods in System Design}, number = {2}, pages = {230 -- 264}, publisher = {Springer}, title = {{CEGAR for compositional analysis of qualitative properties in Markov decision processes}}, doi = {10.1007/s10703-015-0235-2}, volume = {47}, year = {2015}, } @inproceedings{1689, abstract = {We consider the problem of computing the set of initial states of a dynamical system such that there exists a control strategy to ensure that the trajectories satisfy a temporal logic specification with probability 1 (almost-surely). We focus on discrete-time, stochastic linear dynamics and specifications given as formulas of the Generalized Reactivity(1) fragment of Linear Temporal Logic over linear predicates in the states of the system. We propose a solution based on iterative abstraction-refinement, and turn-based 2-player probabilistic games. While the theoretical guarantee of our algorithm after any finite number of iterations is only a partial solution, we show that if our algorithm terminates, then the result is the set of satisfying initial states. Moreover, for any (partial) solution our algorithm synthesizes witness control strategies to ensure almost-sure satisfaction of the temporal logic specification. We demonstrate our approach on an illustrative case study.}, author = {Svoreňová, Mária and Kretinsky, Jan and Chmelik, Martin and Chatterjee, Krishnendu and Cěrná, Ivana and Belta, Cǎlin}, booktitle = {Proceedings of the 18th International Conference on Hybrid Systems: Computation and Control}, location = {Seattle, WA, United States}, pages = {259 -- 268}, publisher = {ACM}, title = {{Temporal logic control for stochastic linear systems using abstraction refinement of probabilistic games}}, doi = {10.1145/2728606.2728608}, year = {2015}, } @inproceedings{1729, abstract = {We present a computer-aided programming approach to concurrency. The approach allows programmers to program assuming a friendly, non-preemptive scheduler, and our synthesis procedure inserts synchronization to ensure that the final program works even with a preemptive scheduler. The correctness specification is implicit, inferred from the non-preemptive behavior. Let us consider sequences of calls that the program makes to an external interface. The specification requires that any such sequence produced under a preemptive scheduler should be included in the set of such sequences produced under a non-preemptive scheduler. The solution is based on a finitary abstraction, an algorithm for bounded language inclusion modulo an independence relation, and rules for inserting synchronization. We apply the approach to device-driver programming, where the driver threads call the software interface of the device and the API provided by the operating system. Our experiments demonstrate that our synthesis method is precise and efficient, and, since it does not require explicit specifications, is more practical than the conventional approach based on user-provided assertions.}, author = {Cerny, Pavol and Clarke, Edmund and Henzinger, Thomas A and Radhakrishna, Arjun and Ryzhyk, Leonid and Samanta, Roopsha and Tarrach, Thorsten}, location = {San Francisco, CA, United States}, pages = {180 -- 197}, publisher = {Springer}, title = {{From non-preemptive to preemptive scheduling using synchronization synthesis}}, doi = {10.1007/978-3-319-21668-3_11}, volume = {9207}, year = {2015}, } @inproceedings{1835, abstract = {The behaviour of gene regulatory networks (GRNs) is typically analysed using simulation-based statistical testing-like methods. In this paper, we demonstrate that we can replace this approach by a formal verification-like method that gives higher assurance and scalability. We focus on Wagner’s weighted GRN model with varying weights, which is used in evolutionary biology. In the model, weight parameters represent the gene interaction strength that may change due to genetic mutations. For a property of interest, we synthesise the constraints over the parameter space that represent the set of GRNs satisfying the property. We experimentally show that our parameter synthesis procedure computes the mutational robustness of GRNs –an important problem of interest in evolutionary biology– more efficiently than the classical simulation method. We specify the property in linear temporal logics. We employ symbolic bounded model checking and SMT solving to compute the space of GRNs that satisfy the property, which amounts to synthesizing a set of linear constraints on the weights.}, author = {Giacobbe, Mirco and Guet, Calin C and Gupta, Ashutosh and Henzinger, Thomas A and Paixao, Tiago and Petrov, Tatjana}, location = {London, United Kingdom}, pages = {469 -- 483}, publisher = {Springer}, title = {{Model checking gene regulatory networks}}, doi = {10.1007/978-3-662-46681-0_47}, volume = {9035}, year = {2015}, } @inproceedings{1603, abstract = {For deterministic systems, a counterexample to a property can simply be an error trace, whereas counterexamples in probabilistic systems are necessarily more complex. For instance, a set of erroneous traces with a sufficient cumulative probability mass can be used. Since these are too large objects to understand and manipulate, compact representations such as subchains have been considered. In the case of probabilistic systems with non-determinism, the situation is even more complex. While a subchain for a given strategy (or scheduler, resolving non-determinism) is a straightforward choice, we take a different approach. Instead, we focus on the strategy itself, and extract the most important decisions it makes, and present its succinct representation. The key tools we employ to achieve this are (1) introducing a concept of importance of a state w.r.t. the strategy, and (2) learning using decision trees. There are three main consequent advantages of our approach. Firstly, it exploits the quantitative information on states, stressing the more important decisions. Secondly, it leads to a greater variability and degree of freedom in representing the strategies. Thirdly, the representation uses a self-explanatory data structure. In summary, our approach produces more succinct and more explainable strategies, as opposed to e.g. binary decision diagrams. Finally, our experimental results show that we can extract several rules describing the strategy even for very large systems that do not fit in memory, and based on the rules explain the erroneous behaviour.}, author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Chmelik, Martin and Fellner, Andreas and Kretinsky, Jan}, location = {San Francisco, CA, United States}, pages = {158 -- 177}, publisher = {Springer}, title = {{Counterexample explanation by learning small strategies in Markov decision processes}}, doi = {10.1007/978-3-319-21690-4_10}, volume = {9206}, year = {2015}, } @misc{5549, abstract = {This repository contains the experimental part of the CAV 2015 publication Counterexample Explanation by Learning Small Strategies in Markov Decision Processes. We extended the probabilistic model checker PRISM to represent strategies of Markov Decision Processes as Decision Trees. The archive contains a java executable version of the extended tool (prism_dectree.jar) together with a few examples of the PRISM benchmark library. To execute the program, please have a look at the README.txt, which provides instructions and further information on the archive. The archive contains scripts that (if run often enough) reproduces the data presented in the publication.}, author = {Fellner, Andreas}, keywords = {Markov Decision Process, Decision Tree, Probabilistic Verification, Counterexample Explanation}, publisher = {Institute of Science and Technology Austria}, title = {{Experimental part of CAV 2015 publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes}}, doi = {10.15479/AT:ISTA:28}, year = {2015}, }