@book{3300,
abstract = {This book first explores the origins of this idea, grounded in theoretical work on temporal logic and automata. The editors and authors are among the world's leading researchers in this domain, and they contributed 32 chapters representing a thorough view of the development and application of the technique. Topics covered include binary decision diagrams, symbolic model checking, satisfiability modulo theories, partial-order reduction, abstraction, interpolation, concurrency, security protocols, games, probabilistic model checking, and process algebra, and chapters on the transfer of theory to industrial practice, property specification languages for hardware, and verification of real-time systems and hybrid systems.
The book will be valuable for researchers and graduate students engaged with the development of formal methods and verification tools.},
author = {Clarke, Edmund M. and Henzinger, Thomas A and Veith, Helmut and Bloem, Roderick},
publisher = {Springer},
title = {{Handbook of model checking}},
doi = {10.1007/978-3-319-10575-8},
year = {2018},
}
@article{608,
abstract = {Synthesis is the automated construction of a system from its specification. In real life, hardware and software systems are rarely constructed from scratch. Rather, a system is typically constructed from a library of components. Lustig and Vardi formalized this intuition and studied LTL synthesis from component libraries. In real life, designers seek optimal systems. In this paper we add optimality considerations to the setting. We distinguish between quality considerations (for example, size - the smaller a system is, the better it is), and pricing (for example, the payment to the company who manufactured the component). We study the problem of designing systems with minimal quality-cost and price. A key point is that while the quality cost is individual - the choices of a designer are independent of choices made by other designers that use the same library, pricing gives rise to a resource-allocation game - designers that use the same component share its price, with the share being proportional to the number of uses (a component can be used several times in a design). We study both closed and open settings, and in both we solve the problem of finding an optimal design. In a setting with multiple designers, we also study the game-theoretic problems of the induced resource-allocation game.},
author = {Avni, Guy and Kupferman, Orna},
journal = {Theoretical Computer Science},
pages = {50 -- 72},
publisher = {Elsevier},
title = {{Synthesis from component libraries with costs}},
doi = {10.1016/j.tcs.2017.11.001},
volume = {712},
year = {2018},
}
@inproceedings{81,
abstract = {We solve the offline monitoring problem for timed propositional temporal logic (TPTL), interpreted over dense-time Boolean signals. The variant of TPTL we consider extends linear temporal logic (LTL) with clock variables and reset quantifiers, providing a mechanism to specify real-time constraints. We first describe a general monitoring algorithm based on an exhaustive computation of the set of satisfying clock assignments as a finite union of zones. We then propose a specialized monitoring algorithm for the one-variable case using a partition of the time domain based on the notion of region equivalence, whose complexity is linear in the length of the signal, thereby generalizing a known result regarding the monitoring of metric temporal logic (MTL). The region and zone representations of time constraints are known from timed automata verification and can also be used in the discrete-time case. Our prototype implementation appears to outperform previous discrete-time implementations of TPTL monitoring,},
author = {Elgyütt, Adrian and Ferrere, Thomas and Henzinger, Thomas A},
location = {Beijing, China},
pages = {53 -- 70},
publisher = {Springer},
title = {{Monitoring temporal logic with clock variables}},
doi = {10.1007/978-3-030-00151-3_4},
volume = {11022},
year = {2018},
}
@inproceedings{156,
abstract = {Imprecision in timing can sometimes be beneficial: Metric interval temporal logic (MITL), disabling the expression of punctuality constraints, was shown to translate to timed automata, yielding an elementary decision procedure. We show how this principle extends to other forms of dense-time specification using regular expressions. By providing a clean, automaton-based formal framework for non-punctual languages, we are able to recover and extend several results in timed systems. Metric interval regular expressions (MIRE) are introduced, providing regular expressions with non-singular duration constraints. We obtain that MIRE are expressively complete relative to a class of one-clock timed automata, which can be determinized using additional clocks. Metric interval dynamic logic (MIDL) is then defined using MIRE as temporal modalities. We show that MIDL generalizes known extensions of MITL, while translating to timed automata at comparable cost.},
author = {Ferrere, Thomas},
location = {Oxford, UK},
pages = {147 -- 164},
publisher = {Springer},
title = {{The compound interest in relaxing punctuality}},
doi = {10.1007/978-3-319-95582-7_9},
volume = {10951},
year = {2018},
}
@article{743,
abstract = {This special issue of the Journal on Formal Methods in System Design is dedicated to Prof. Helmut Veith, who unexpectedly passed away in March 2016. Helmut Veith was a brilliant researcher, inspiring collaborator, passionate mentor, generous friend, and valued member of the formal methods community. Helmut was not only known for his numerous and influential contributions in the field of automated verification (most prominently his work on Counterexample-Guided Abstraction Refinement [1,2]), but also for his untiring and passionate efforts for the logic community: he co-organized the Vienna Summer of Logic (an event comprising twelve conferences and numerous workshops which attracted thousands of researchers from all over the world), he initiated the Vienna Center for Logic and Algorithms (which promotes international collaboration on logic and algorithms and organizes outreach events such as the LogicLounge), and he coordinated the Doctoral Program on Logical Methods in Computer Science at TU Wien (currently educating more than 40 doctoral students) and a National Research Network on Rigorous Systems Engineering (uniting fifteen researchers in Austria to address the challenge of building reliable and safe computer
systems). With his enthusiasm and commitment, Helmut completely reshaped the Austrian research landscape in the field of logic and verification in his few years as a full professor at TU Wien.},
author = {Gottlob, Georg and Henzinger, Thomas A and Weißenbacher, Georg},
journal = {Formal Methods in System Design},
number = {2},
pages = {267 -- 269},
publisher = {Springer},
title = {{Preface of the special issue in memoriam Helmut Veith}},
doi = {10.1007/s10703-017-0307-6},
volume = {51},
year = {2017},
}
@article{465,
abstract = {The edit distance between two words w 1 , w 2 is the minimal number of word operations (letter insertions, deletions, and substitutions) necessary to transform w 1 to w 2 . The edit distance generalizes to languages L 1 , L 2 , where the edit distance from L 1 to L 2 is the minimal number k such that for every word from L 1 there exists a word in L 2 with edit distance at most k . We study the edit distance computation problem between pushdown automata and their subclasses. The problem of computing edit distance to a pushdown automaton is undecidable, and in practice, the interesting question is to compute the edit distance from a pushdown automaton (the implementation, a standard model for programs with recursion) to a regular language (the specification). In this work, we present a complete picture of decidability and complexity for the following problems: (1) deciding whether, for a given threshold k , the edit distance from a pushdown automaton to a finite automaton is at most k , and (2) deciding whether the edit distance from a pushdown automaton to a finite automaton is finite. },
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Ibsen-Jensen, Rasmus and Otop, Jan},
issn = {18605974},
journal = {Logical Methods in Computer Science},
number = {3},
publisher = {International Federation of Computational Logic},
title = {{Edit distance for pushdown automata}},
doi = {10.23638/LMCS-13(3:23)2017},
volume = {13},
year = {2017},
}
@article{467,
abstract = {Recently there has been a significant effort to handle quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, some basic system properties such as average response time cannot be expressed using weighted automata or in any other known decidable formalism. In this work, we introduce nested weighted automata as a natural extension of weighted automata, which makes it possible to express important quantitative properties such as average response time. In nested weighted automata, a master automaton spins off and collects results from weighted slave automata, each of which computes a quantity along a finite portion of an infinite word. Nested weighted automata can be viewed as the quantitative analogue of monitor automata, which are used in runtime verification. We establish an almost-complete decidability picture for the basic decision problems about nested weighted automata and illustrate their applicability in several domains. In particular, nested weighted automata can be used to decide average response time properties.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan},
issn = {15293785},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Nested weighted automata}},
doi = {10.1145/3152769},
volume = {18},
year = {2017},
}
@article{471,
abstract = {We present a new algorithm for the statistical model checking of Markov chains with respect to unbounded temporal properties, including full linear temporal logic. The main idea is that we monitor each simulation run on the fly, in order to detect quickly if a bottom strongly connected component is entered with high probability, in which case the simulation run can be terminated early. As a result, our simulation runs are often much shorter than required by termination bounds that are computed a priori for a desired level of confidence on a large state space. In comparison to previous algorithms for statistical model checking our method is not only faster in many cases but also requires less information about the system, namely, only the minimum transition probability that occurs in the Markov chain. In addition, our method can be generalised to unbounded quantitative properties such as mean-payoff bounds. },
author = {Daca, Przemyslaw and Henzinger, Thomas A and Kretinsky, Jan and Petrov, Tatjana},
issn = {15293785},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {2},
publisher = {ACM},
title = {{Faster statistical model checking for unbounded temporal properties}},
doi = {10.1145/3060139},
volume = {18},
year = {2017},
}
@inproceedings{549,
abstract = {Model checking is usually based on a comprehensive traversal of the state space. Causality-based model checking is a radically different approach that instead analyzes the cause-effect relationships in a program. We give an overview on a new class of model checking algorithms that capture the causal relationships in a special data structure called concurrent traces. Concurrent traces identify key events in an execution history and link them through their cause-effect relationships. The model checker builds a tableau of concurrent traces, where the case splits represent different causal explanations of a hypothetical error. Causality-based model checking has been implemented in the ARCTOR tool, and applied to previously intractable multi-threaded benchmarks.},
author = {Finkbeiner, Bernd and Kupriyanov, Andrey},
booktitle = {Electronic Proceedings in Theoretical Computer Science},
issn = {20752180},
location = {Uppsala, Sweden},
pages = {31 -- 38},
publisher = {Open Publishing Association},
title = {{Causality-based model checking}},
doi = {10.4204/EPTCS.259.3},
volume = {259},
year = {2017},
}
@inbook{625,
abstract = {In the analysis of reactive systems a quantitative objective assigns a real value to every trace of the system. The value decision problem for a quantitative objective requires a trace whose value is at least a given threshold, and the exact value decision problem requires a trace whose value is exactly the threshold. We compare the computational complexity of the value and exact value decision problems for classical quantitative objectives, such as sum, discounted sum, energy, and mean-payoff for two standard models of reactive systems, namely, graphs and graph games.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Henzinger, Thomas A},
booktitle = {Models, Algorithms, Logics and Tools},
editor = {Aceto, Luca and Bacci, Giorgio and Ingólfsdóttir, Anna and Legay, Axel and Mardare, Radu},
issn = {03029743},
pages = {367 -- 381},
publisher = {Springer},
title = {{The cost of exactness in quantitative reachability}},
doi = {10.1007/978-3-319-63121-9_18},
volume = {10460},
year = {2017},
}
@inproceedings{631,
abstract = {Template polyhedra generalize intervals and octagons to polyhedra whose facets are orthogonal to a given set of arbitrary directions. They have been employed in the abstract interpretation of programs and, with particular success, in the reachability analysis of hybrid automata. While previously, the choice of directions has been left to the user or a heuristic, we present a method for the automatic discovery of directions that generalize and eliminate spurious counterexamples. We show that for the class of convex hybrid automata, i.e., hybrid automata with (possibly nonlinear) convex constraints on derivatives, such directions always exist and can be found using convex optimization. We embed our method inside a CEGAR loop, thus enabling the time-unbounded reachability analysis of an important and richer class of hybrid automata than was previously possible. We evaluate our method on several benchmarks, demonstrating also its superior efficiency for the special case of linear hybrid automata.},
author = {Bogomolov, Sergiy and Frehse, Goran and Giacobbe, Mirco and Henzinger, Thomas A},
isbn = {978-366254576-8},
location = {Uppsala, Sweden},
pages = {589 -- 606},
publisher = {Springer},
title = {{Counterexample guided refinement of template polyhedra}},
doi = {10.1007/978-3-662-54577-5_34},
volume = {10205},
year = {2017},
}
@inproceedings{633,
abstract = {A Rapidly-exploring Random Tree (RRT) is an algorithm which can search a non-convex region of space by incrementally building a space-filling tree. The tree is constructed from random points drawn from system’s state space and is biased to grow towards large unexplored areas in the system. RRT can provide better coverage of a system’s possible behaviors compared with random simulations, but is more lightweight than full reachability analysis. In this paper, we explore some of the design decisions encountered while implementing a hybrid extension of the RRT algorithm, which have not been elaborated on before. In particular, we focus on handling non-determinism, which arises due to discrete transitions. We introduce the notion of important points to account for this phenomena. We showcase our ideas using heater and navigation benchmarks.},
author = {Bak, Stanley and Bogomolov, Sergiy and Henzinger, Thomas A and Kumar, Aviral},
editor = {Abate, Alessandro and Bodo, Sylvie},
isbn = {978-331963500-2},
location = {Heidelberg, Germany},
pages = {83 -- 89},
publisher = {Springer},
title = {{Challenges and tool implementation of hybrid rapidly exploring random trees}},
doi = {10.1007/978-3-319-63501-9_6},
volume = {10381},
year = {2017},
}
@inproceedings{636,
abstract = {Signal regular expressions can specify sequential properties of real-valued signals based on threshold conditions, regular operations, and duration constraints. In this paper we endow them with a quantitative semantics which indicates how robustly a signal matches or does not match a given expression. First, we show that this semantics is a safe approximation of a distance between the signal and the language defined by the expression. Then, we consider the robust matching problem, that is, computing the quantitative semantics of every segment of a given signal relative to an expression. We present an algorithm that solves this problem for piecewise-constant and piecewise-linear signals and show that for such signals the robustness map is a piecewise-linear function. The availability of an indicator describing how robustly a signal segment matches some regular pattern provides a general framework for quantitative monitoring of cyber-physical systems.},
author = {Bakhirkin, Alexey and Ferrere, Thomas and Maler, Oded and Ulus, Dogan},
editor = {Abate, Alessandro and Geeraerts, Gilles},
isbn = {978-331965764-6},
location = {Berlin, Germany},
pages = {189 -- 206},
publisher = {Springer},
title = {{On the quantitative semantics of regular expressions over real-valued signals}},
doi = {10.1007/978-3-319-65765-3_11},
volume = {10419},
year = {2017},
}
@proceedings{638,
editor = {Bogomolov, Sergiy and Martel, Matthieu and Prabhakar, Pavithra},
publisher = {Springer},
title = {{Numerical Software Verification}},
doi = {10.1007/978-3-319-54292-8},
volume = {10152},
year = {2017},
}
@misc{6426,
abstract = {Synchronous programs are easy to specify because the side effects of an operation are finished by the time the invocation of the operation returns to the caller. Asynchronous programs, on the other hand, are difficult to specify because there are side effects due to pending computation scheduled as a result of the invocation of an operation. They are also difficult to verify because of the large number of possible interleavings of concurrent asynchronous computation threads. We show that specifications and correctness proofs for asynchronous programs can be structured by introducing the fiction, for proof purposes, that intermediate, non-quiescent states of asynchronous operations can be ignored. Then, the task of specification becomes relatively simple and the task of verification can be naturally decomposed into smaller sub-tasks. The sub-tasks iteratively summarize, guided by the structure of an asynchronous program, the atomic effect of non-atomic operations and the synchronous effect of asynchronous operations. This structuring of specifications and proofs corresponds to the introduction of multiple layers of stepwise refinement for asynchronous programs. We present the first proof rule, called synchronization, to reduce asynchronous invocations on a lower layer to synchronous invocations on a higher layer. We implemented our proof method in CIVL and evaluated it on a collection of benchmark programs.},
author = {Henzinger, Thomas A and Kragl, Bernhard and Qadeer, Shaz},
issn = {2664-1690},
pages = {28},
publisher = {IST Austria},
title = {{Synchronizing the asynchronous}},
doi = {10.15479/AT:IST-2018-853-v2-2},
year = {2017},
}
@inproceedings{647,
abstract = {Despite researchers’ efforts in the last couple of decades, reachability analysis is still a challenging problem even for linear hybrid systems. Among the existing approaches, the most practical ones are mainly based on bounded-time reachable set over-approximations. For the purpose of unbounded-time analysis, one important strategy is to abstract the original system and find an invariant for the abstraction. In this paper, we propose an approach to constructing a new kind of abstraction called conic abstraction for affine hybrid systems, and to computing reachable sets based on this abstraction. The essential feature of a conic abstraction is that it partitions the state space of a system into a set of convex polyhedral cones which is derived from a uniform conic partition of the derivative space. Such a set of polyhedral cones is able to cut all trajectories of the system into almost straight segments so that every segment of a reach pipe in a polyhedral cone tends to be straight as well, and hence can be over-approximated tightly by polyhedra using similar techniques as HyTech or PHAVer. In particular, for diagonalizable affine systems, our approach can guarantee to find an invariant for unbounded reachable sets, which is beyond the capability of bounded-time reachability analysis tools. We implemented the approach in a tool and experiments on benchmarks show that our approach is more powerful than SpaceEx and PHAVer in dealing with diagonalizable systems.},
author = {Bogomolov, Sergiy and Giacobbe, Mirco and Henzinger, Thomas A and Kong, Hui},
isbn = {978-331965764-6},
location = {Berlin, Germany},
pages = {116 -- 132},
publisher = {Springer},
title = {{Conic abstractions for hybrid systems}},
doi = {10.1007/978-3-319-65765-3_7},
volume = {10419 },
year = {2017},
}
@inproceedings{663,
abstract = {In this paper, we propose an approach to automatically compute invariant clusters for nonlinear semialgebraic hybrid systems. An invariant cluster for an ordinary differential equation (ODE) is a multivariate polynomial invariant g(u→, x→) = 0, parametric in u→, which can yield an infinite number of concrete invariants by assigning different values to u→ so that every trajectory of the system can be overapproximated precisely by the intersection of a group of concrete invariants. For semialgebraic systems, which involve ODEs with multivariate polynomial right-hand sides, given a template multivariate polynomial g(u→, x→), an invariant cluster can be obtained by first computing the remainder of the Lie derivative of g(u→, x→) divided by g(u→, x→) and then solving the system of polynomial equations obtained from the coefficients of the remainder. Based on invariant clusters and sum-of-squares (SOS) programming, we present a new method for the safety verification of hybrid systems. Experiments on nonlinear benchmark systems from biology and control theory show that our approach is efficient. },
author = {Kong, Hui and Bogomolov, Sergiy and Schilling, Christian and Jiang, Yu and Henzinger, Thomas A},
booktitle = {Proceedings of the 20th International Conference on Hybrid Systems},
isbn = {978-145034590-3},
location = {Pittsburgh, PA, United States},
pages = {163 -- 172},
publisher = {ACM},
title = {{Safety verification of nonlinear hybrid systems based on invariant clusters}},
doi = {10.1145/3049797.3049814},
year = {2017},
}
@inproceedings{711,
abstract = {Nested weighted automata (NWA) present a robust and convenient automata-theoretic formalism for quantitative specifications. Previous works have considered NWA that processed input words only in the forward direction. It is natural to allow the automata to process input words backwards as well, for example, to measure the maximal or average time between a response and the preceding request. We therefore introduce and study bidirectional NWA that can process input words in both directions. First, we show that bidirectional NWA can express interesting quantitative properties that are not expressible by forward-only NWA. Second, for the fundamental decision problems of emptiness and universality, we establish decidability and complexity results for the new framework which match the best-known results for the special case of forward-only NWA. Thus, for NWA, the increased expressiveness of bidirectionality is achieved at no additional computational complexity. This is in stark contrast to the unweighted case, where bidirectional finite automata are no more expressive but exponentially more succinct than their forward-only counterparts.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan},
issn = {18688969},
location = {Berlin, Germany},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Bidirectional nested weighted automata}},
doi = {10.4230/LIPIcs.CONCUR.2017.5},
volume = {85},
year = {2017},
}
@inproceedings{941,
abstract = {Recently there has been a proliferation of automated program repair (APR) techniques, targeting various programming languages. Such techniques can be generally classified into two families: syntactic- and semantics-based. Semantics-based APR, on which we focus, typically uses symbolic execution to infer semantic constraints and then program synthesis to construct repairs conforming to them. While syntactic-based APR techniques have been shown successful on bugs in real-world programs written in both C and Java, semantics-based APR techniques mostly target C programs. This leaves empirical comparisons of the APR families not fully explored, and developers without a Java-based semantics APR technique. We present JFix, a semantics-based APR framework that targets Java, and an associated Eclipse plugin. JFix is implemented atop Symbolic PathFinder, a well-known symbolic execution engine for Java programs. It extends one particular APR technique (Angelix), and is designed to be sufficiently generic to support a variety of such techniques. We demonstrate that semantics-based APR can indeed efficiently and effectively repair a variety of classes of bugs in large real-world Java programs. This supports our claim that the framework can both support developers seeking semantics-based repair of bugs in Java programs, as well as enable larger scale empirical studies comparing syntactic- and semantics-based APR targeting Java. The demonstration of our tool is available via the project website at: https://xuanbachle.github.io/semanticsrepair/ },
author = {Le, Xuan and Chu, Duc Hiep and Lo, David and Le Goues, Claire and Visser, Willem},
booktitle = {Proceedings of the 26th ACM SIGSOFT International Symposium on Software Testing and Analysis},
location = {Santa Barbara, CA, United States},
pages = {376 -- 379 },
publisher = {ACM},
title = {{JFIX: Semantics-based repair of Java programs via symbolic PathFinder}},
doi = {10.1145/3092703.3098225},
year = {2017},
}
@inproceedings{942,
abstract = {A notable class of techniques for automatic program repair is known as semantics-based. Such techniques, e.g., Angelix, infer semantic specifications via symbolic execution, and then use program synthesis to construct new code that satisfies those inferred specifications. However, the obtained specifications are naturally incomplete, leaving the synthesis engine with a difficult task of synthesizing a general solution from a sparse space of many possible solutions that are consistent with the provided specifications but that do not necessarily generalize. We present S3, a new repair synthesis engine that leverages programming-by-examples methodology to synthesize high-quality bug repairs. The novelty in S3 that allows it to tackle the sparse search space to create more general repairs is three-fold: (1) A systematic way to customize and constrain the syntactic search space via a domain-specific language, (2) An efficient enumeration-based search strategy over the constrained search space, and (3) A number of ranking features based on measures of the syntactic and semantic distances between candidate solutions and the original buggy program. We compare S3’s repair effectiveness with state-of-the-art synthesis engines Angelix, Enumerative, and CVC4. S3 can successfully and correctly fix at least three times more bugs than the best baseline on datasets of 52 bugs in small programs, and 100 bugs in real-world large programs. },
author = {Le, Xuan and Chu, Duc Hiep and Lo, David and Le Goues, Claire and Visser, Willem},
isbn = {978-145035105-8},
location = {Paderborn, Germany},
pages = {593 -- 604},
publisher = {ACM},
title = {{S3: Syntax- and semantic-guided repair synthesis via programming by examples}},
doi = {10.1145/3106237.3106309},
volume = {F130154},
year = {2017},
}