@article{1351, abstract = {The behaviour of gene regulatory networks (GRNs) is typically analysed using simulation-based statistical testing-like methods. In this paper, we demonstrate that we can replace this approach by a formal verification-like method that gives higher assurance and scalability. We focus on Wagner’s weighted GRN model with varying weights, which is used in evolutionary biology. In the model, weight parameters represent the gene interaction strength that may change due to genetic mutations. For a property of interest, we synthesise the constraints over the parameter space that represent the set of GRNs satisfying the property. We experimentally show that our parameter synthesis procedure computes the mutational robustness of GRNs—an important problem of interest in evolutionary biology—more efficiently than the classical simulation method. We specify the property in linear temporal logic. We employ symbolic bounded model checking and SMT solving to compute the space of GRNs that satisfy the property, which amounts to synthesizing a set of linear constraints on the weights.}, author = {Giacobbe, Mirco and Guet, Calin C and Gupta, Ashutosh and Henzinger, Thomas A and Paixao, Tiago and Petrov, Tatjana}, issn = {00015903}, journal = {Acta Informatica}, number = {8}, pages = {765 -- 787}, publisher = {Springer}, title = {{Model checking the evolution of gene regulatory networks}}, doi = {10.1007/s00236-016-0278-x}, volume = {54}, year = {2017}, } @article{1196, abstract = {We define the . model-measuring problem: given a model . M and specification . ϕ, what is the maximal distance . ρ such that all models . M' within distance . ρ from . M satisfy (or violate) . ϕ. The model-measuring problem presupposes a distance function on models. We concentrate on . automatic distance functions, which are defined by weighted automata. The model-measuring problem subsumes several generalizations of the classical model-checking problem, in particular, quantitative model-checking problems that measure the degree of satisfaction of a specification; robustness problems that measure how much a model can be perturbed without violating the specification; and parameter synthesis for hybrid systems. We show that for automatic distance functions, and (a) . ω-regular linear-time, (b) . ω-regular branching-time, and (c) hybrid specifications, the model-measuring problem can be solved.We use automata-theoretic model-checking methods for model measuring, replacing the emptiness question for word, tree, and hybrid automata by the . optimal-value question for the weighted versions of these automata. For automata over words and trees, we consider weighted automata that accumulate weights by maximizing, summing, discounting, and limit averaging. For hybrid automata, we consider monotonic (parametric) hybrid automata, a hybrid counterpart of (discrete) weighted automata.We give several examples of using the model-measuring problem to compute various notions of robustness and quantitative satisfaction for temporal specifications. Further, we propose the modeling framework for model measuring to ease the specification and reduce the likelihood of errors in modeling.Finally, we present a variant of the model-measuring problem, called the . model-repair problem. The model-repair problem applies to models that do not satisfy the specification; it can be used to derive restrictions, under which the model satisfies the specification, i.e., to repair the model.}, author = {Henzinger, Thomas A and Otop, Jan}, journal = {Nonlinear Analysis: Hybrid Systems}, pages = {166 -- 190}, publisher = {Elsevier}, title = {{Model measuring for discrete and hybrid systems}}, doi = {10.1016/j.nahs.2016.09.001}, volume = {23}, year = {2017}, } @inproceedings{1116, abstract = {Time-triggered switched networks are a deterministic communication infrastructure used by real-time distributed embedded systems. Due to the criticality of the applications running over them, developers need to ensure that end-to-end communication is dependable and predictable. Traditional approaches assume static networks that are not flexible to changes caused by reconfigurations or, more importantly, faults, which are dealt with in the application using redundancy. We adopt the concept of handling faults in the switches from non-real-time networks while maintaining the required predictability. We study a class of forwarding schemes that can handle various types of failures. We consider probabilistic failures. We study a class of forwarding schemes that can handle various types of failures. We consider probabilistic failures. For a given network with a forwarding scheme and a constant ℓ, we compute the {\em score} of the scheme, namely the probability (induced by faults) that at least ℓ messages arrive on time. We reduce the scoring problem to a reachability problem on a Markov chain with a "product-like" structure. Its special structure allows us to reason about it symbolically, and reduce the scoring problem to #SAT. Our solution is generic and can be adapted to different networks and other contexts. Also, we show the computational complexity of the scoring problem is #P-complete, and we study methods to estimate the score. We evaluate the effectiveness of our techniques with an implementation. }, author = {Avni, Guy and Goel, Shubham and Henzinger, Thomas A and Rodríguez Navas, Guillermo}, issn = {03029743}, location = {Uppsala, Sweden}, pages = {169 -- 187}, publisher = {Springer}, title = {{Computing scores of forwarding schemes in switched networks with probabilistic faults}}, doi = {10.1007/978-3-662-54580-5_10}, volume = {10206}, year = {2017}, } @article{1066, abstract = {Simulation is an attractive alternative to language inclusion for automata as it is an under-approximation of language inclusion, but usually has much lower complexity. Simulation has also been extended in two orthogonal directions, namely, (1) fair simulation, for simulation over specified set of infinite runs; and (2) quantitative simulation, for simulation between weighted automata. While fair trace inclusion is PSPACE-complete, fair simulation can be computed in polynomial time. For weighted automata, the (quantitative) language inclusion problem is undecidable in general, whereas the (quantitative) simulation reduces to quantitative games, which admit pseudo-polynomial time algorithms. In this work, we study (quantitative) simulation for weighted automata with Büchi acceptance conditions, i.e., we generalize fair simulation from non-weighted automata to weighted automata. We show that imposing Büchi acceptance conditions on weighted automata changes many fundamental properties of the simulation games, yet they still admit pseudo-polynomial time algorithms.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan and Velner, Yaron}, journal = {Information and Computation}, number = {2}, pages = {143 -- 166}, publisher = {Elsevier}, title = {{Quantitative fair simulation games}}, doi = {10.1016/j.ic.2016.10.006}, volume = {254}, year = {2017}, } @inproceedings{1011, abstract = {Pushdown systems (PDSs) and recursive state machines (RSMs), which are linearly equivalent, are standard models for interprocedural analysis. Yet RSMs are more convenient as they (a) explicitly model function calls and returns, and (b) specify many natural parameters for algorithmic analysis, e.g., the number of entries and exits. We consider a general framework where RSM transitions are labeled from a semiring and path properties are algebraic with semiring operations, which can model, e.g., interprocedural reachability and dataflow analysis problems. Our main contributions are new algorithms for several fundamental problems. As compared to a direct translation of RSMs to PDSs and the best-known existing bounds of PDSs, our analysis algorithm improves the complexity for finite-height semirings (that subsumes reachability and standard dataflow properties). We further consider the problem of extracting distance values from the representation structures computed by our algorithm, and give efficient algorithms that distinguish the complexity of a one-time preprocessing from the complexity of each individual query. Another advantage of our algorithm is that our improvements carry over to the concurrent setting, where we improve the bestknown complexity for the context-bounded analysis of concurrent RSMs. Finally, we provide a prototype implementation that gives a significant speed-up on several benchmarks from the SLAM/SDV project.}, author = {Chatterjee, Krishnendu and Kragl, Bernhard and Mishra, Samarth and Pavlogiannis, Andreas}, editor = {Yang, Hongseok}, issn = {03029743}, location = {Uppsala, Sweden}, pages = {287 -- 313}, publisher = {Springer}, title = {{Faster algorithms for weighted recursive state machines}}, doi = {10.1007/978-3-662-54434-1_11}, volume = {10201}, year = {2017}, } @inproceedings{1003, abstract = {Network games (NGs) are played on directed graphs and are extensively used in network design and analysis. Search problems for NGs include finding special strategy profiles such as a Nash equilibrium and a globally optimal solution. The networks modeled by NGs may be huge. In formal verification, abstraction has proven to be an extremely effective technique for reasoning about systems with big and even infinite state spaces. We describe an abstraction-refinement methodology for reasoning about NGs. Our methodology is based on an abstraction function that maps the state space of an NG to a much smaller state space. We search for a global optimum and a Nash equilibrium by reasoning on an under- and an overapproximation defined on top of this smaller state space. When the approximations are too coarse to find such profiles, we refine the abstraction function. Our experimental results demonstrate the efficiency of the methodology.}, author = {Avni, Guy and Guha, Shibashis and Kupferman, Orna}, issn = {10450823}, location = {Melbourne, Australia}, pages = {70 -- 76}, publisher = {AAAI Press}, title = {{An abstraction-refinement methodology for reasoning about network games}}, doi = {10.24963/ijcai.2017/11}, year = {2017}, } @inproceedings{962, abstract = {We present a new algorithm for model counting of a class of string constraints. In addition to the classic operation of concatenation, our class includes some recursively defined operations such as Kleene closure, and replacement of substrings. Additionally, our class also includes length constraints on the string expressions, which means, by requiring reasoning about numbers, that we face a multi-sorted logic. In the end, our string constraints are motivated by their use in programming for web applications. Our algorithm comprises two novel features: the ability to use a technique of (1) partial derivatives for constraints that are already in a solved form, i.e. a form where its (string) satisfiability is clearly displayed, and (2) non-progression, where cyclic reasoning in the reduction process may be terminated (thus allowing for the algorithm to look elsewhere). Finally, we experimentally compare our model counter with two recent works on model counting of similar constraints, SMC [18] and ABC [5], to demonstrate its superior performance.}, author = {Trinh, Minh and Chu, Duc Hiep and Jaffar, Joxan}, editor = {Majumdar, Rupak and Kunčak, Viktor}, issn = {03029743}, location = {Heidelberg, Germany}, pages = {399 -- 418}, publisher = {Springer}, title = {{Model counting for recursively-defined strings}}, doi = {10.1007/978-3-319-63390-9_21}, volume = {10427}, year = {2017}, } @inproceedings{942, abstract = {A notable class of techniques for automatic program repair is known as semantics-based. Such techniques, e.g., Angelix, infer semantic specifications via symbolic execution, and then use program synthesis to construct new code that satisfies those inferred specifications. However, the obtained specifications are naturally incomplete, leaving the synthesis engine with a difficult task of synthesizing a general solution from a sparse space of many possible solutions that are consistent with the provided specifications but that do not necessarily generalize. We present S3, a new repair synthesis engine that leverages programming-by-examples methodology to synthesize high-quality bug repairs. The novelty in S3 that allows it to tackle the sparse search space to create more general repairs is three-fold: (1) A systematic way to customize and constrain the syntactic search space via a domain-specific language, (2) An efficient enumeration-based search strategy over the constrained search space, and (3) A number of ranking features based on measures of the syntactic and semantic distances between candidate solutions and the original buggy program. We compare S3’s repair effectiveness with state-of-the-art synthesis engines Angelix, Enumerative, and CVC4. S3 can successfully and correctly fix at least three times more bugs than the best baseline on datasets of 52 bugs in small programs, and 100 bugs in real-world large programs. }, author = {Le, Xuan and Chu, Duc Hiep and Lo, David and Le Goues, Claire and Visser, Willem}, isbn = {978-145035105-8}, location = {Paderborn, Germany}, pages = {593 -- 604}, publisher = {ACM}, title = {{S3: Syntax- and semantic-guided repair synthesis via programming by examples}}, doi = {10.1145/3106237.3106309}, volume = {F130154}, year = {2017}, } @article{743, abstract = {This special issue of the Journal on Formal Methods in System Design is dedicated to Prof. Helmut Veith, who unexpectedly passed away in March 2016. Helmut Veith was a brilliant researcher, inspiring collaborator, passionate mentor, generous friend, and valued member of the formal methods community. Helmut was not only known for his numerous and influential contributions in the field of automated verification (most prominently his work on Counterexample-Guided Abstraction Refinement [1,2]), but also for his untiring and passionate efforts for the logic community: he co-organized the Vienna Summer of Logic (an event comprising twelve conferences and numerous workshops which attracted thousands of researchers from all over the world), he initiated the Vienna Center for Logic and Algorithms (which promotes international collaboration on logic and algorithms and organizes outreach events such as the LogicLounge), and he coordinated the Doctoral Program on Logical Methods in Computer Science at TU Wien (currently educating more than 40 doctoral students) and a National Research Network on Rigorous Systems Engineering (uniting fifteen researchers in Austria to address the challenge of building reliable and safe computer systems). With his enthusiasm and commitment, Helmut completely reshaped the Austrian research landscape in the field of logic and verification in his few years as a full professor at TU Wien.}, author = {Gottlob, Georg and Henzinger, Thomas A and Weißenbacher, Georg}, journal = {Formal Methods in System Design}, number = {2}, pages = {267 -- 269}, publisher = {Springer}, title = {{Preface of the special issue in memoriam Helmut Veith}}, doi = {10.1007/s10703-017-0307-6}, volume = {51}, year = {2017}, } @inproceedings{549, abstract = {Model checking is usually based on a comprehensive traversal of the state space. Causality-based model checking is a radically different approach that instead analyzes the cause-effect relationships in a program. We give an overview on a new class of model checking algorithms that capture the causal relationships in a special data structure called concurrent traces. Concurrent traces identify key events in an execution history and link them through their cause-effect relationships. The model checker builds a tableau of concurrent traces, where the case splits represent different causal explanations of a hypothetical error. Causality-based model checking has been implemented in the ARCTOR tool, and applied to previously intractable multi-threaded benchmarks.}, author = {Finkbeiner, Bernd and Kupriyanov, Andrey}, booktitle = {Electronic Proceedings in Theoretical Computer Science}, issn = {2075-2180}, location = {Uppsala, Sweden}, pages = {31 -- 38}, publisher = {Open Publishing Association}, title = {{Causality-based model checking}}, doi = {10.4204/EPTCS.259.3}, volume = {259}, year = {2017}, } @inproceedings{1090, abstract = { While weighted automata provide a natural framework to express quantitative properties, many basic properties like average response time cannot be expressed with weighted automata. Nested weighted automata extend weighted automata and consist of a master automaton and a set of slave automata that are invoked by the master automaton. Nested weighted automata are strictly more expressive than weighted automata (e.g., average response time can be expressed with nested weighted automata), but the basic decision questions have higher complexity (e.g., for deterministic automata, the emptiness question for nested weighted automata is PSPACE-hard, whereas the corresponding complexity for weighted automata is PTIME). We consider a natural subclass of nested weighted automata where at any point at most a bounded number k of slave automata can be active. We focus on automata whose master value function is the limit average. We show that these nested weighted automata with bounded width are strictly more expressive than weighted automata (e.g., average response time with no overlapping requests can be expressed with bound k=1, but not with non-nested weighted automata). We show that the complexity of the basic decision problems (i.e., emptiness and universality) for the subclass with k constant matches the complexity for weighted automata. Moreover, when k is part of the input given in unary we establish PSPACE-completeness.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, location = {Krakow; Poland}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Nested weighted limit-average automata of bounded width}}, doi = {10.4230/LIPIcs.MFCS.2016.24}, volume = {58}, year = {2016}, } @inproceedings{1095, abstract = { The semantics of concurrent data structures is usually given by a sequential specification and a consistency condition. Linearizability is the most popular consistency condition due to its simplicity and general applicability. Nevertheless, for applications that do not require all guarantees offered by linearizability, recent research has focused on improving performance and scalability of concurrent data structures by relaxing their semantics. In this paper, we present local linearizability, a relaxed consistency condition that is applicable to container-type concurrent data structures like pools, queues, and stacks. While linearizability requires that the effect of each operation is observed by all threads at the same time, local linearizability only requires that for each thread T, the effects of its local insertion operations and the effects of those removal operations that remove values inserted by T are observed by all threads at the same time. We investigate theoretical and practical properties of local linearizability and its relationship to many existing consistency conditions. We present a generic implementation method for locally linearizable data structures that uses existing linearizable data structures as building blocks. Our implementations show performance and scalability improvements over the original building blocks and outperform the fastest existing container-type implementations. }, author = {Haas, Andreas and Henzinger, Thomas A and Holzer, Andreas and Kirsch, Christoph and Lippautz, Michael and Payer, Hannes and Sezgin, Ali and Sokolova, Ana and Veith, Helmut}, booktitle = {Leibniz International Proceedings in Informatics}, location = {Quebec City; Canada}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Local linearizability for concurrent container-type data structures}}, doi = {10.4230/LIPIcs.CONCUR.2016.6}, volume = {59}, year = {2016}, } @inproceedings{1103, abstract = {We propose two parallel state-space-exploration algorithms for hybrid automaton (HA), with the goal of enhancing performance on multi-core shared-memory systems. The first uses the parallel, breadth-first-search algorithm (PBFS) of the SPIN model checker, when traversing the discrete modes of the HA, and enhances it with a parallel exploration of the continuous states within each mode. We show that this simple-minded extension of PBFS does not provide the desired load balancing in many HA benchmarks. The second algorithm is a task-parallel BFS algorithm (TP-BFS), which uses a cheap precomputation of the cost associated with the post operations (both continuous and discrete) in order to improve load balancing. We illustrate the TP-BFS and the cost precomputation of the post operators on a support-function-based algorithm for state-space exploration. The performance comparison of the two algorithms shows that, in general, TP-BFS provides a better utilization/load-balancing of the CPU. Both algorithms are implemented in the model checker XSpeed. Our experiments show a maximum speed-up of more than 2000 χ on a navigation benchmark, with respect to SpaceEx LGG scenario. In order to make the comparison fair, we employed an equal number of post operations in both tools. To the best of our knowledge, this paper represents the first attempt to provide parallel, reachability-analysis algorithms for HA.}, author = {Gurung, Amit and Deka, Arup and Bartocci, Ezio and Bogomolov, Sergiy and Grosu, Radu and Ray, Rajarshi}, location = {Kanpur, India }, publisher = {IEEE}, title = {{Parallel reachability analysis for hybrid systems}}, doi = {10.1109/MEMCOD.2016.7797741}, year = {2016}, } @inproceedings{1135, abstract = {Time-triggered (TT) switched networks are a deterministic communication infrastructure used by real-time distributed embedded systems. These networks rely on the notion of globally discretized time (i.e. time slots) and a static TT schedule that prescribes which message is sent through which link at every time slot, such that all messages reach their destination before a global timeout. These schedules are generated offline, assuming a static network with fault-free links, and entrusting all error-handling functions to the end user. Assuming the network is static is an over-optimistic view, and indeed links tend to fail in practice. We study synthesis of TT schedules on a network in which links fail over time and we assume the switches run a very simple error-recovery protocol once they detect a crashed link. We address the problem of finding a pk; qresistant schedule; namely, one that, assuming the switches run a fixed error-recovery protocol, guarantees that the number of messages that arrive at their destination by the timeout is at least no matter what sequence of at most k links fail. Thus, we maintain the simplicity of the switches while giving a guarantee on the number of messages that meet the timeout. We show how a pk; q-resistant schedule can be obtained using a CEGAR-like approach: find a schedule, decide whether it is pk; q-resistant, and if it is not, use the witnessing fault sequence to generate a constraint that is added to the program. The newly added constraint disallows the schedule to be regenerated in a future iteration while also eliminating several other schedules that are not pk; q-resistant. We illustrate the applicability of our approach using an SMT-based implementation. © 2016 ACM.}, author = {Avni, Guy and Guha, Shibashis and Rodríguez Navas, Guillermo}, booktitle = {Proceedings of the 13th International Conference on Embedded Software }, location = {Pittsburgh, PA, USA}, publisher = {ACM}, title = {{Synthesizing time triggered schedules for switched networks with faulty links}}, doi = {10.1145/2968478.2968499}, year = {2016}, } @inproceedings{1134, abstract = {Hybrid systems have both continuous and discrete dynamics and are useful for modeling a variety of control systems, from air traffic control protocols to robotic maneuvers and beyond. Recently, numerous powerful and scalable tools for analyzing hybrid systems have emerged. Several of these tools implement automated formal methods for mathematically proving a system meets a specification. This tutorial session will present three recent hybrid systems tools: C2E2, HyST, and TuLiP. C2E2 is a simulated-based verification tool for hybrid systems, and uses validated numerical solvers and bloating of simulation traces to verify systems meet specifications. HyST is a hybrid systems model transformation and translation tool, and uses a canonical intermediate representation to support most of the recent verification tools, as well as automated sound abstractions that simplify verification of a given hybrid system. TuLiP is a controller synthesis tool for hybrid systems, where given a temporal logic specification to be satisfied for a system (plant) model, TuLiP will find a controller that meets a given specification. © 2016 IEEE.}, author = {Duggirala, Parasara and Fan, Chuchu and Potok, Matthew and Qi, Bolun and Mitra, Sayan and Viswanathan, Mahesh and Bak, Stanley and Bogomolov, Sergiy and Johnson, Taylor and Nguyen, Luan and Schilling, Christian and Sogokon, Andrew and Tran, Hoang and Xiang, Weiming}, booktitle = {2016 IEEE Conference on Control Applications}, location = {Buenos Aires, Argentina }, publisher = {IEEE}, title = {{Tutorial: Software tools for hybrid systems verification transformation and synthesis C2E2 HyST and TuLiP}}, doi = {10.1109/CCA.2016.7587948}, year = {2016}, } @inproceedings{1138, abstract = {Automata with monitor counters, where the transitions do not depend on counter values, and nested weighted automata are two expressive automata-theoretic frameworks for quantitative properties. For a well-studied and wide class of quantitative functions, we establish that automata with monitor counters and nested weighted automata are equivalent. We study for the first time such quantitative automata under probabilistic semantics. We show that several problems that are undecidable for the classical questions of emptiness and universality become decidable under the probabilistic semantics. We present a complete picture of decidability for such automata, and even an almost-complete picture of computational complexity, for the probabilistic questions we consider. © 2016 ACM.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, booktitle = {Proceedings of the 31st Annual ACM/IEEE Symposium}, location = {New York, NY, USA}, pages = {76 -- 85}, publisher = {IEEE}, title = {{Quantitative automata under probabilistic semantics}}, doi = {10.1145/2933575.2933588}, year = {2016}, } @inproceedings{1227, abstract = {Many biological systems can be modeled as multiaffine hybrid systems. Due to the nonlinearity of multiaffine systems, it is difficult to verify their properties of interest directly. A common strategy to tackle this problem is to construct and analyze a discrete overapproximation of the original system. However, the conservativeness of a discrete abstraction significantly determines the level of confidence we can have in the properties of the original system. In this paper, in order to reduce the conservativeness of a discrete abstraction, we propose a new method based on a sufficient and necessary decision condition for computing discrete transitions between states in the abstract system. We assume the state space partition of a multiaffine system to be based on a set of multivariate polynomials. Hence, a rectangular partition defined in terms of polynomials of the form (xi − c) is just a simple case of multivariate polynomial partition, and the new decision condition applies naturally. We analyze and demonstrate the improvement of our method over the existing methods using some examples.}, author = {Kong, Hui and Bartocci, Ezio and Bogomolov, Sergiy and Grosu, Radu and Henzinger, Thomas A and Jiang, Yu and Schilling, Christian}, location = {Grenoble, France}, pages = {128 -- 144}, publisher = {Springer}, title = {{Discrete abstraction of multiaffine systems}}, doi = {10.1007/978-3-319-47151-8_9}, volume = {9957}, year = {2016}, } @inproceedings{1256, abstract = {Simulink is widely used for model driven development (MDD) of industrial software systems. Typically, the Simulink based development is initiated from Stateflow modeling, followed by simulation, validation and code generation mapped to physical execution platforms. However, recent industrial trends have raised the demands of rigorous verification on safety-critical applications, which is unfortunately challenging for Simulink. In this paper, we present an approach to bridge the Stateflow based model driven development and a well- defined rigorous verification. First, we develop a self- contained toolkit to translate Stateflow model into timed automata, where major advanced modeling features in Stateflow are supported. Taking advantage of the strong verification capability of Uppaal, we can not only find bugs in Stateflow models which are missed by Simulink Design Verifier, but also check more important temporal properties. Next, we customize a runtime verifier for the generated nonintrusive VHDL and C code of Stateflow model for monitoring. The major strength of the customization is the flexibility to collect and analyze runtime properties with a pure software monitor, which opens more opportunities for engineers to achieve high reliability of the target system compared with the traditional act that only relies on Simulink Polyspace. We incorporate these two parts into original Stateflow based MDD seamlessly. In this way, safety-critical properties are both verified at the model level, and at the consistent system implementation level with physical execution environment in consideration. We apply our approach on a train controller design, and the verified implementation is tested and deployed on a real hardware platform.}, author = {Jiang, Yu and Yang, Yixiao and Liu, Han and Kong, Hui and Gu, Ming and Sun, Jiaguang and Sha, Lui}, location = {Vienna, Austria}, publisher = {IEEE}, title = {{From stateflow simulation to verified implementation: A verification approach and a real-time train controller design}}, doi = {10.1109/RTAS.2016.7461337}, year = {2016}, } @inproceedings{1335, abstract = {In this paper we review various automata-theoretic formalisms for expressing quantitative properties. We start with finite-state Boolean automata that express the traditional regular properties. We then consider weighted ω-automata that can measure the average density of events, which finite-state Boolean automata cannot. However, even weighted ω-automata cannot express basic performance properties like average response time. We finally consider two formalisms of weighted ω-automata with monitors, where the monitors are either (a) counters or (b) weighted automata themselves. We present a translation result to establish that these two formalisms are equivalent. Weighted ω-automata with monitors generalize weighted ω-automata, and can express average response time property. They present a natural, robust, and expressive framework for quantitative specifications, with important decidable properties.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, location = {Edinburgh, United Kingdom}, pages = {23 -- 38}, publisher = {Springer}, title = {{Quantitative monitor automata}}, doi = {10.1007/978-3-662-53413-7_2}, volume = {9837}, year = {2016}, } @inproceedings{1390, abstract = {The goal of automatic program repair is to identify a set of syntactic changes that can turn a program that is incorrect with respect to a given specification into a correct one. Existing program repair techniques typically aim to find any program that meets the given specification. Such “best-effort” strategies can end up generating a program that is quite different from the original one. Novel techniques have been proposed to compute syntactically minimal program fixes, but the smallest syntactic fix to a program can still significantly alter the original program’s behaviour. We propose a new approach to program repair based on program distances, which can quantify changes not only to the program syntax but also to the program semantics. We call this the quantitative program repair problem where the “optimal” repair is derived using multiple distances. We implement a solution to the quantitative repair problem in a prototype tool called Qlose (Quantitatively close), using the program synthesizer Sketch. We evaluate the effectiveness of different distances in obtaining desirable repairs by evaluating Qlose on programs taken from educational tools such as CodeHunt and edX.}, author = {D'Antoni, Loris and Samanta, Roopsha and Singh, Rishabh}, location = {Toronto, Canada}, pages = {383 -- 401}, publisher = {Springer}, title = {{QLOSE: Program repair with quantitative objectives}}, doi = {10.1007/978-3-319-41540-6_21}, volume = {9780}, year = {2016}, }