@article{5677, abstract = {Recently, contract-based design has been proposed as an “orthogonal” approach that complements system design methodologies proposed so far to cope with the complexity of system design. Contract-based design provides a rigorous scaffolding for verification, analysis, abstraction/refinement, and even synthesis. A number of results have been obtained in this domain but a unified treatment of the topic that can help put contract-based design in perspective was missing. This monograph intends to provide such a treatment where contracts are precisely defined and characterized so that they can be used in design methodologies with no ambiguity. In particular, this monograph identifies the essence of complex system design using contracts through a mathematical “meta-theory”, where all the properties of the methodology are derived from a very abstract and generic notion of contract. We show that the meta-theory provides deep and illuminating links with existing contract and interface theories, as well as guidelines for designing new theories. Our study encompasses contracts for both software and systems, with emphasis on the latter. We illustrate the use of contracts with two examples: requirement engineering for a parking garage management, and the development of contracts for timing and scheduling in the context of the Autosar methodology in use in the automotive sector.}, author = {Benveniste, Albert and Nickovic, Dejan and Caillaud, Benoît and Passerone, Roberto and Raclet, Jean Baptiste and Reinkemeier, Philipp and Sangiovanni-Vincentelli, Alberto and Damm, Werner and Henzinger, Thomas A and Larsen, Kim G.}, issn = {1551-3939}, journal = {Foundations and Trends in Electronic Design Automation}, number = {2-3}, pages = {124--400}, publisher = {Now Publishers}, title = {{Contracts for system design}}, doi = {10.1561/1000000053}, volume = {12}, year = {2018}, } @article{10418, abstract = {We present a new proof rule for proving almost-sure termination of probabilistic programs, including those that contain demonic non-determinism. An important question for a probabilistic program is whether the probability mass of all its diverging runs is zero, that is that it terminates "almost surely". Proving that can be hard, and this paper presents a new method for doing so. It applies directly to the program's source code, even if the program contains demonic choice. Like others, we use variant functions (a.k.a. "super-martingales") that are real-valued and decrease randomly on each loop iteration; but our key innovation is that the amount as well as the probability of the decrease are parametric. We prove the soundness of the new rule, indicate where its applicability goes beyond existing rules, and explain its connection to classical results on denumerable (non-demonic) Markov chains.}, author = {Mciver, Annabelle and Morgan, Carroll and Kaminski, Benjamin Lucien and Katoen, Joost P}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, location = {Los Angeles, CA, United States}, number = {POPL}, publisher = {Association for Computing Machinery}, title = {{A new proof rule for almost-sure termination}}, doi = {10.1145/3158121}, volume = {2}, year = {2017}, } @article{471, abstract = {We present a new algorithm for the statistical model checking of Markov chains with respect to unbounded temporal properties, including full linear temporal logic. The main idea is that we monitor each simulation run on the fly, in order to detect quickly if a bottom strongly connected component is entered with high probability, in which case the simulation run can be terminated early. As a result, our simulation runs are often much shorter than required by termination bounds that are computed a priori for a desired level of confidence on a large state space. In comparison to previous algorithms for statistical model checking our method is not only faster in many cases but also requires less information about the system, namely, only the minimum transition probability that occurs in the Markov chain. In addition, our method can be generalised to unbounded quantitative properties such as mean-payoff bounds. }, author = {Daca, Przemyslaw and Henzinger, Thomas A and Kretinsky, Jan and Petrov, Tatjana}, issn = {15293785}, journal = {ACM Transactions on Computational Logic (TOCL)}, number = {2}, publisher = {ACM}, title = {{Faster statistical model checking for unbounded temporal properties}}, doi = {10.1145/3060139}, volume = {18}, year = {2017}, } @article{467, abstract = {Recently there has been a significant effort to handle quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, some basic system properties such as average response time cannot be expressed using weighted automata or in any other known decidable formalism. In this work, we introduce nested weighted automata as a natural extension of weighted automata, which makes it possible to express important quantitative properties such as average response time. In nested weighted automata, a master automaton spins off and collects results from weighted slave automata, each of which computes a quantity along a finite portion of an infinite word. Nested weighted automata can be viewed as the quantitative analogue of monitor automata, which are used in runtime verification. We establish an almost-complete decidability picture for the basic decision problems about nested weighted automata and illustrate their applicability in several domains. In particular, nested weighted automata can be used to decide average response time properties.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, issn = {15293785}, journal = {ACM Transactions on Computational Logic (TOCL)}, number = {4}, publisher = {ACM}, title = {{Nested weighted automata}}, doi = {10.1145/3152769}, volume = {18}, year = {2017}, } @article{465, abstract = {The edit distance between two words w 1 , w 2 is the minimal number of word operations (letter insertions, deletions, and substitutions) necessary to transform w 1 to w 2 . The edit distance generalizes to languages L 1 , L 2 , where the edit distance from L 1 to L 2 is the minimal number k such that for every word from L 1 there exists a word in L 2 with edit distance at most k . We study the edit distance computation problem between pushdown automata and their subclasses. The problem of computing edit distance to a pushdown automaton is undecidable, and in practice, the interesting question is to compute the edit distance from a pushdown automaton (the implementation, a standard model for programs with recursion) to a regular language (the specification). In this work, we present a complete picture of decidability and complexity for the following problems: (1) deciding whether, for a given threshold k , the edit distance from a pushdown automaton to a finite automaton is at most k , and (2) deciding whether the edit distance from a pushdown automaton to a finite automaton is finite. }, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Ibsen-Jensen, Rasmus and Otop, Jan}, issn = {18605974}, journal = {Logical Methods in Computer Science}, number = {3}, publisher = {International Federation of Computational Logic}, title = {{Edit distance for pushdown automata}}, doi = {10.23638/LMCS-13(3:23)2017}, volume = {13}, year = {2017}, } @inbook{625, abstract = {In the analysis of reactive systems a quantitative objective assigns a real value to every trace of the system. The value decision problem for a quantitative objective requires a trace whose value is at least a given threshold, and the exact value decision problem requires a trace whose value is exactly the threshold. We compare the computational complexity of the value and exact value decision problems for classical quantitative objectives, such as sum, discounted sum, energy, and mean-payoff for two standard models of reactive systems, namely, graphs and graph games.}, author = {Chatterjee, Krishnendu and Doyen, Laurent and Henzinger, Thomas A}, booktitle = {Models, Algorithms, Logics and Tools}, editor = {Aceto, Luca and Bacci, Giorgio and Ingólfsdóttir, Anna and Legay, Axel and Mardare, Radu}, isbn = {978-3-319-63120-2}, issn = {0302-9743}, pages = {367 -- 381}, publisher = {Springer}, title = {{The cost of exactness in quantitative reachability}}, doi = {10.1007/978-3-319-63121-9_18}, volume = {10460}, year = {2017}, } @inproceedings{633, abstract = {A Rapidly-exploring Random Tree (RRT) is an algorithm which can search a non-convex region of space by incrementally building a space-filling tree. The tree is constructed from random points drawn from system’s state space and is biased to grow towards large unexplored areas in the system. RRT can provide better coverage of a system’s possible behaviors compared with random simulations, but is more lightweight than full reachability analysis. In this paper, we explore some of the design decisions encountered while implementing a hybrid extension of the RRT algorithm, which have not been elaborated on before. In particular, we focus on handling non-determinism, which arises due to discrete transitions. We introduce the notion of important points to account for this phenomena. We showcase our ideas using heater and navigation benchmarks.}, author = {Bak, Stanley and Bogomolov, Sergiy and Henzinger, Thomas A and Kumar, Aviral}, editor = {Abate, Alessandro and Bodo, Sylvie}, isbn = {978-331963500-2}, location = {Heidelberg, Germany}, pages = {83 -- 89}, publisher = {Springer}, title = {{Challenges and tool implementation of hybrid rapidly exploring random trees}}, doi = {10.1007/978-3-319-63501-9_6}, volume = {10381}, year = {2017}, } @inproceedings{636, abstract = {Signal regular expressions can specify sequential properties of real-valued signals based on threshold conditions, regular operations, and duration constraints. In this paper we endow them with a quantitative semantics which indicates how robustly a signal matches or does not match a given expression. First, we show that this semantics is a safe approximation of a distance between the signal and the language defined by the expression. Then, we consider the robust matching problem, that is, computing the quantitative semantics of every segment of a given signal relative to an expression. We present an algorithm that solves this problem for piecewise-constant and piecewise-linear signals and show that for such signals the robustness map is a piecewise-linear function. The availability of an indicator describing how robustly a signal segment matches some regular pattern provides a general framework for quantitative monitoring of cyber-physical systems.}, author = {Bakhirkin, Alexey and Ferrere, Thomas and Maler, Oded and Ulus, Dogan}, editor = {Abate, Alessandro and Geeraerts, Gilles}, isbn = {978-331965764-6}, location = {Berlin, Germany}, pages = {189 -- 206}, publisher = {Springer}, title = {{On the quantitative semantics of regular expressions over real-valued signals}}, doi = {10.1007/978-3-319-65765-3_11}, volume = {10419}, year = {2017}, } @proceedings{638, abstract = {This book constitutes the refereed proceedings of the 9th InternationalWorkshop on Numerical Software Verification, NSV 2016, held in Toronto, ON, Canada in July 2011 - colocated with CAV 2016, the 28th International Conference on Computer Aided Verification. The NSV workshop is dedicated to the development of logical and mathematical techniques for the reasoning about programmability and reliability.}, editor = {Bogomolov, Sergiy and Martel, Matthieu and Prabhakar, Pavithra}, issn = {0302-9743}, location = {Toronto, ON, Canada}, publisher = {Springer}, title = {{Numerical Software Verification}}, doi = {10.1007/978-3-319-54292-8}, volume = {10152}, year = {2017}, } @misc{6426, abstract = {Synchronous programs are easy to specify because the side effects of an operation are finished by the time the invocation of the operation returns to the caller. Asynchronous programs, on the other hand, are difficult to specify because there are side effects due to pending computation scheduled as a result of the invocation of an operation. They are also difficult to verify because of the large number of possible interleavings of concurrent asynchronous computation threads. We show that specifications and correctness proofs for asynchronous programs can be structured by introducing the fiction, for proof purposes, that intermediate, non-quiescent states of asynchronous operations can be ignored. Then, the task of specification becomes relatively simple and the task of verification can be naturally decomposed into smaller sub-tasks. The sub-tasks iteratively summarize, guided by the structure of an asynchronous program, the atomic effect of non-atomic operations and the synchronous effect of asynchronous operations. This structuring of specifications and proofs corresponds to the introduction of multiple layers of stepwise refinement for asynchronous programs. We present the first proof rule, called synchronization, to reduce asynchronous invocations on a lower layer to synchronous invocations on a higher layer. We implemented our proof method in CIVL and evaluated it on a collection of benchmark programs.}, author = {Henzinger, Thomas A and Kragl, Bernhard and Qadeer, Shaz}, issn = {2664-1690}, pages = {28}, publisher = {IST Austria}, title = {{Synchronizing the asynchronous}}, doi = {10.15479/AT:IST-2018-853-v2-2}, year = {2017}, } @inproceedings{663, abstract = {In this paper, we propose an approach to automatically compute invariant clusters for nonlinear semialgebraic hybrid systems. An invariant cluster for an ordinary differential equation (ODE) is a multivariate polynomial invariant g(u→, x→) = 0, parametric in u→, which can yield an infinite number of concrete invariants by assigning different values to u→ so that every trajectory of the system can be overapproximated precisely by the intersection of a group of concrete invariants. For semialgebraic systems, which involve ODEs with multivariate polynomial right-hand sides, given a template multivariate polynomial g(u→, x→), an invariant cluster can be obtained by first computing the remainder of the Lie derivative of g(u→, x→) divided by g(u→, x→) and then solving the system of polynomial equations obtained from the coefficients of the remainder. Based on invariant clusters and sum-of-squares (SOS) programming, we present a new method for the safety verification of hybrid systems. Experiments on nonlinear benchmark systems from biology and control theory show that our approach is efficient. }, author = {Kong, Hui and Bogomolov, Sergiy and Schilling, Christian and Jiang, Yu and Henzinger, Thomas A}, booktitle = {Proceedings of the 20th International Conference on Hybrid Systems}, isbn = {978-145034590-3}, location = {Pittsburgh, PA, United States}, pages = {163 -- 172}, publisher = {ACM}, title = {{Safety verification of nonlinear hybrid systems based on invariant clusters}}, doi = {10.1145/3049797.3049814}, year = {2017}, } @inproceedings{711, abstract = {Nested weighted automata (NWA) present a robust and convenient automata-theoretic formalism for quantitative specifications. Previous works have considered NWA that processed input words only in the forward direction. It is natural to allow the automata to process input words backwards as well, for example, to measure the maximal or average time between a response and the preceding request. We therefore introduce and study bidirectional NWA that can process input words in both directions. First, we show that bidirectional NWA can express interesting quantitative properties that are not expressible by forward-only NWA. Second, for the fundamental decision problems of emptiness and universality, we establish decidability and complexity results for the new framework which match the best-known results for the special case of forward-only NWA. Thus, for NWA, the increased expressiveness of bidirectionality is achieved at no additional computational complexity. This is in stark contrast to the unweighted case, where bidirectional finite automata are no more expressive but exponentially more succinct than their forward-only counterparts.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, issn = {18688969}, location = {Berlin, Germany}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Bidirectional nested weighted automata}}, doi = {10.4230/LIPIcs.CONCUR.2017.5}, volume = {85}, year = {2017}, } @inproceedings{963, abstract = {Network games are widely used as a model for selfish resource-allocation problems. In the classical model, each player selects a path connecting her source and target vertex. The cost of traversing an edge depends on the number of players that traverse it. Thus, it abstracts the fact that different users may use a resource at different times and for different durations, which plays an important role in defining the costs of the users in reality. For example, when transmitting packets in a communication network, routing traffic in a road network, or processing a task in a production system, the traversal of the network involves an inherent delay, and so sharing and congestion of resources crucially depends on time. We study timed network games , which add a time component to network games. Each vertex v in the network is associated with a cost function, mapping the load on v to the price that a player pays for staying in v for one time unit with this load. In addition, each edge has a guard, describing time intervals in which the edge can be traversed, forcing the players to spend time on vertices. Unlike earlier work that add a time component to network games, the time in our model is continuous and cannot be discretized. In particular, players have uncountably many strategies, and a game may have uncountably many pure Nash equilibria. We study properties of timed network games with cost-sharing or congestion cost functions: their stability, equilibrium inefficiency, and complexity. In particular, we show that the answer to the question whether we can restrict attention to boundary strategies, namely ones in which edges are traversed only at the boundaries of guards, is mixed. }, author = {Avni, Guy and Guha, Shibashis and Kupferman, Orna}, issn = {18688969}, location = {Aalborg, Denmark}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Timed network games with clocks}}, doi = {10.4230/LIPIcs.MFCS.2017.37}, volume = {83}, year = {2017}, } @inproceedings{941, abstract = {Recently there has been a proliferation of automated program repair (APR) techniques, targeting various programming languages. Such techniques can be generally classified into two families: syntactic- and semantics-based. Semantics-based APR, on which we focus, typically uses symbolic execution to infer semantic constraints and then program synthesis to construct repairs conforming to them. While syntactic-based APR techniques have been shown successful on bugs in real-world programs written in both C and Java, semantics-based APR techniques mostly target C programs. This leaves empirical comparisons of the APR families not fully explored, and developers without a Java-based semantics APR technique. We present JFix, a semantics-based APR framework that targets Java, and an associated Eclipse plugin. JFix is implemented atop Symbolic PathFinder, a well-known symbolic execution engine for Java programs. It extends one particular APR technique (Angelix), and is designed to be sufficiently generic to support a variety of such techniques. We demonstrate that semantics-based APR can indeed efficiently and effectively repair a variety of classes of bugs in large real-world Java programs. This supports our claim that the framework can both support developers seeking semantics-based repair of bugs in Java programs, as well as enable larger scale empirical studies comparing syntactic- and semantics-based APR targeting Java. The demonstration of our tool is available via the project website at: https://xuanbachle.github.io/semanticsrepair/ }, author = {Le, Xuan and Chu, Duc Hiep and Lo, David and Le Goues, Claire and Visser, Willem}, booktitle = {Proceedings of the 26th ACM SIGSOFT International Symposium on Software Testing and Analysis}, location = {Santa Barbara, CA, United States}, pages = {376 -- 379 }, publisher = {ACM}, title = {{JFIX: Semantics-based repair of Java programs via symbolic PathFinder}}, doi = {10.1145/3092703.3098225}, year = {2017}, } @inproceedings{950, abstract = {Two-player games on graphs are widely studied in formal methods as they model the interaction between a system and its environment. The game is played by moving a token throughout a graph to produce an infinite path. There are several common modes to determine how the players move the token through the graph; e.g., in turn-based games the players alternate turns in moving the token. We study the bidding mode of moving the token, which, to the best of our knowledge, has never been studied in infinite-duration games. Both players have separate budgets, which sum up to $1$. In each turn, a bidding takes place. Both players submit bids simultaneously, and a bid is legal if it does not exceed the available budget. The winner of the bidding pays his bid to the other player and moves the token. For reachability objectives, repeated bidding games have been studied and are called Richman games. There, a central question is the existence and computation of threshold budgets; namely, a value t\in [0,1] such that if\PO's budget exceeds $t$, he can win the game, and if\PT's budget exceeds 1-t, he can win the game. We focus on parity games and mean-payoff games. We show the existence of threshold budgets in these games, and reduce the problem of finding them to Richman games. We also determine the strategy-complexity of an optimal strategy. Our most interesting result shows that memoryless strategies suffice for mean-payoff bidding games. }, author = {Avni, Guy and Henzinger, Thomas A and Chonev, Ventsislav K}, issn = {1868-8969}, location = {Berlin, Germany}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Infinite-duration bidding games}}, doi = {10.4230/LIPIcs.CONCUR.2017.21}, volume = {85}, year = {2017}, } @phdthesis{1155, abstract = {This dissertation concerns the automatic verification of probabilistic systems and programs with arrays by statistical and logical methods. Although statistical and logical methods are different in nature, we show that they can be successfully combined for system analysis. In the first part of the dissertation we present a new statistical algorithm for the verification of probabilistic systems with respect to unbounded properties, including linear temporal logic. Our algorithm often performs faster than the previous approaches, and at the same time requires less information about the system. In addition, our method can be generalized to unbounded quantitative properties such as mean-payoff bounds. In the second part, we introduce two techniques for comparing probabilistic systems. Probabilistic systems are typically compared using the notion of equivalence, which requires the systems to have the equal probability of all behaviors. However, this notion is often too strict, since probabilities are typically only empirically estimated, and any imprecision may break the relation between processes. On the one hand, we propose to replace the Boolean notion of equivalence by a quantitative distance of similarity. For this purpose, we introduce a statistical framework for estimating distances between Markov chains based on their simulation runs, and we investigate which distances can be approximated in our framework. On the other hand, we propose to compare systems with respect to a new qualitative logic, which expresses that behaviors occur with probability one or a positive probability. This qualitative analysis is robust with respect to modeling errors and applicable to many domains. In the last part, we present a new quantifier-free logic for integer arrays, which allows us to express counting. Counting properties are prevalent in array-manipulating programs, however they cannot be expressed in the quantified fragments of the theory of arrays. We present a decision procedure for our logic, and provide several complexity results.}, author = {Daca, Przemyslaw}, issn = {2663-337X}, pages = {163}, publisher = {Institute of Science and Technology Austria}, title = {{Statistical and logical methods for property checking}}, doi = {10.15479/AT:ISTA:TH_730}, year = {2017}, } @inproceedings{647, abstract = {Despite researchers’ efforts in the last couple of decades, reachability analysis is still a challenging problem even for linear hybrid systems. Among the existing approaches, the most practical ones are mainly based on bounded-time reachable set over-approximations. For the purpose of unbounded-time analysis, one important strategy is to abstract the original system and find an invariant for the abstraction. In this paper, we propose an approach to constructing a new kind of abstraction called conic abstraction for affine hybrid systems, and to computing reachable sets based on this abstraction. The essential feature of a conic abstraction is that it partitions the state space of a system into a set of convex polyhedral cones which is derived from a uniform conic partition of the derivative space. Such a set of polyhedral cones is able to cut all trajectories of the system into almost straight segments so that every segment of a reach pipe in a polyhedral cone tends to be straight as well, and hence can be over-approximated tightly by polyhedra using similar techniques as HyTech or PHAVer. In particular, for diagonalizable affine systems, our approach can guarantee to find an invariant for unbounded reachable sets, which is beyond the capability of bounded-time reachability analysis tools. We implemented the approach in a tool and experiments on benchmarks show that our approach is more powerful than SpaceEx and PHAVer in dealing with diagonalizable systems.}, author = {Bogomolov, Sergiy and Giacobbe, Mirco and Henzinger, Thomas A and Kong, Hui}, isbn = {978-331965764-6}, location = {Berlin, Germany}, pages = {116 -- 132}, publisher = {Springer}, title = {{Conic abstractions for hybrid systems}}, doi = {10.1007/978-3-319-65765-3_7}, volume = {10419 }, year = {2017}, } @inproceedings{631, abstract = {Template polyhedra generalize intervals and octagons to polyhedra whose facets are orthogonal to a given set of arbitrary directions. They have been employed in the abstract interpretation of programs and, with particular success, in the reachability analysis of hybrid automata. While previously, the choice of directions has been left to the user or a heuristic, we present a method for the automatic discovery of directions that generalize and eliminate spurious counterexamples. We show that for the class of convex hybrid automata, i.e., hybrid automata with (possibly nonlinear) convex constraints on derivatives, such directions always exist and can be found using convex optimization. We embed our method inside a CEGAR loop, thus enabling the time-unbounded reachability analysis of an important and richer class of hybrid automata than was previously possible. We evaluate our method on several benchmarks, demonstrating also its superior efficiency for the special case of linear hybrid automata.}, author = {Bogomolov, Sergiy and Frehse, Goran and Giacobbe, Mirco and Henzinger, Thomas A}, isbn = {978-366254576-8}, location = {Uppsala, Sweden}, pages = {589 -- 606}, publisher = {Springer}, title = {{Counterexample guided refinement of template polyhedra}}, doi = {10.1007/978-3-662-54577-5_34}, volume = {10205}, year = {2017}, } @article{1407, abstract = {We consider the problem of computing the set of initial states of a dynamical system such that there exists a control strategy to ensure that the trajectories satisfy a temporal logic specification with probability 1 (almost-surely). We focus on discrete-time, stochastic linear dynamics and specifications given as formulas of the Generalized Reactivity(1) fragment of Linear Temporal Logic over linear predicates in the states of the system. We propose a solution based on iterative abstraction-refinement, and turn-based 2-player probabilistic games. While the theoretical guarantee of our algorithm after any finite number of iterations is only a partial solution, we show that if our algorithm terminates, then the result is the set of all satisfying initial states. Moreover, for any (partial) solution our algorithm synthesizes witness control strategies to ensure almost-sure satisfaction of the temporal logic specification. While the proposed algorithm guarantees progress and soundness in every iteration, it is computationally demanding. We offer an alternative, more efficient solution for the reachability properties that decomposes the problem into a series of smaller problems of the same type. All algorithms are demonstrated on an illustrative case study.}, author = {Svoreňová, Mária and Kretinsky, Jan and Chmelik, Martin and Chatterjee, Krishnendu and Cěrná, Ivana and Belta, Cǎlin}, journal = {Nonlinear Analysis: Hybrid Systems}, number = {2}, pages = {230 -- 253}, publisher = {Elsevier}, title = {{Temporal logic control for stochastic linear systems using abstraction refinement of probabilistic games}}, doi = {10.1016/j.nahs.2016.04.006}, volume = {23}, year = {2017}, } @article{1338, abstract = {We present a computer-aided programming approach to concurrency. The approach allows programmers to program assuming a friendly, non-preemptive scheduler, and our synthesis procedure inserts synchronization to ensure that the final program works even with a preemptive scheduler. The correctness specification is implicit, inferred from the non-preemptive behavior. Let us consider sequences of calls that the program makes to an external interface. The specification requires that any such sequence produced under a preemptive scheduler should be included in the set of sequences produced under a non-preemptive scheduler. We guarantee that our synthesis does not introduce deadlocks and that the synchronization inserted is optimal w.r.t. a given objective function. The solution is based on a finitary abstraction, an algorithm for bounded language inclusion modulo an independence relation, and generation of a set of global constraints over synchronization placements. Each model of the global constraints set corresponds to a correctness-ensuring synchronization placement. The placement that is optimal w.r.t. the given objective function is chosen as the synchronization solution. We apply the approach to device-driver programming, where the driver threads call the software interface of the device and the API provided by the operating system. Our experiments demonstrate that our synthesis method is precise and efficient. The implicit specification helped us find one concurrency bug previously missed when model-checking using an explicit, user-provided specification. We implemented objective functions for coarse-grained and fine-grained locking and observed that different synchronization placements are produced for our experiments, favoring a minimal number of synchronization operations or maximum concurrency, respectively.}, author = {Cerny, Pavol and Clarke, Edmund and Henzinger, Thomas A and Radhakrishna, Arjun and Ryzhyk, Leonid and Samanta, Roopsha and Tarrach, Thorsten}, journal = {Formal Methods in System Design}, number = {2-3}, pages = {97 -- 139}, publisher = {Springer}, title = {{From non-preemptive to preemptive scheduling using synchronization synthesis}}, doi = {10.1007/s10703-016-0256-5}, volume = {50}, year = {2017}, }