@inproceedings{4359,
author = {Thomas Wies and Kuncak, Viktor and Lam,Patrick and Podelski,Andreas and Rinard,Martin},
pages = {157 -- 173},
publisher = {Springer},
title = {{Field Constraint Analysis}},
doi = {1551},
year = {2006},
}
@inproceedings{4373,
author = {Maler, Oded and Dejan Nickovic and Pnueli,Amir},
pages = {2 -- 16},
publisher = {Springer},
title = {{Real Time Temporal Logic: Past, Present, Future}},
doi = {1571},
year = {2006},
}
@inproceedings{4374,
author = {Maler, Oded and Dejan Nickovic and Pnueli,Amir},
pages = {274 -- 289},
publisher = {Springer},
title = {{From MITL to Timed Automata}},
doi = {1570},
year = {2006},
}
@article{4248,
abstract = {In finite populations, genetic drift generates interference between selected loci, causing advantageous alleles to be found more often on different chromosomes than on the same chromosome, which reduces the rate of adaptation. This “Hill–Robertson effect” generates indirect selection to increase recombination rates. We present a new method to quantify the strength of this selection. Our model represents a new beneficial allele (A) entering a population as a single copy, while another beneficial allele (B) is sweeping at another locus. A third locus affects the recombination rate between selected loci. Using a branching process model, we calculate the probability distribution of the number of copies of A on the different genetic backgrounds, after it is established but while it is still rare. Then, we use a deterministic model to express the change in frequency of the recombination modifier, due to hitchhiking, as A goes to fixation. We show that this method can give good estimates of selection for recombination. Moreover, it shows that recombination is selected through two different effects: it increases the fixation probability of new alleles, and it accelerates selective sweeps. The relative importance of these two effects depends on the relative times of occurrence of the beneficial alleles.},
author = {Roze, Denis and Nicholas Barton},
journal = {Genetics},
number = {3},
pages = {1793 -- 1811},
publisher = {Genetics Society of America},
title = {{The Hill-Robertson effect and the evolution of recombination}},
doi = {10.1534/genetics.106.058586 },
volume = {173},
year = {2006},
}
@inproceedings{4401,
author = {Alur, Rajeev and Pavol Cerny and Zdancewic,Steve},
pages = {107 -- 118},
publisher = {Springer},
title = {{Preserving Secrecy Under Refinement}},
doi = {1543},
year = {2006},
}
@inproceedings{4406,
abstract = {We propose and evaluate a new algorithm for checking the universality of nondeterministic finite automata. In contrast to the standard algorithm, which uses the subset construction to explicitly determinize the automaton, we keep the determinization step implicit. Our algorithm computes the least fixed point of a monotone function on the lattice of antichains of state sets. We evaluate the performance of our algorithm experimentally using the random automaton model recently proposed by Tabakov and Vardi. We show that on the difficult instances of this probabilistic model, the antichain algorithm outperforms the standard one by several orders of magnitude. We also show how variations of the antichain method can be used for solving the language-inclusion problem for nondeterministic finite automata, and the emptiness problem for alternating finite automata.},
author = {De Wulf, Martin and Doyen, Laurent and Thomas Henzinger and Raskin, Jean-François},
pages = {17 -- 30},
publisher = {Springer},
title = {{Antichains: A new algorithm for checking universality of finite automata}},
doi = {10.1007/11817963_5},
volume = {4144},
year = {2006},
}
@inproceedings{4431,
abstract = {We summarize some current trends in embedded systems design and point out some of their characteristics, such as the chasm between analytical and computational models, and the gap between safety-critical and best-effort engineering practices. We call for a coherent scientific foundation for embedded systems design, and we discuss a few key demands on such a foundation: the need for encompassing several manifestations of heterogeneity, and the need for constructivity in design. We believe that the development of a satisfactory Embedded Systems Design Science provides a timely challenge and opportunity for reinvigorating computer science.},
author = {Thomas Henzinger and Sifakis, Joseph},
pages = {1 -- 15},
publisher = {Springer},
title = {{The embedded systems design challenge}},
doi = {10.1007/11813040_1},
volume = {4085},
year = {2006},
}
@inproceedings{4432,
abstract = {We add freeze quantifiers to the game logic ATL in order to specify real-time objectives for games played on timed structures. We define the semantics of the resulting logic TATL by restricting the players to physically meaningful strategies, which do not prevent time from diverging. We show that TATL can be model checked over timed automaton games. We also specify timed optimization problems for physically meaningful strategies, and we show that for timed automaton games, the optimal answers can be approximated to within any degree of precision.},
author = {Thomas Henzinger and Prabhu, Vinayak S},
pages = {1 -- 17},
publisher = {Springer},
title = {{Timed alternating-time temporal logic}},
doi = {10.1007/11867340_1},
volume = {4202},
year = {2006},
}
@inproceedings{4436,
abstract = {We present an assume-guarantee interface algebra for real-time components. In our formalism a component implements a set of task sequences that share a resource. A component interface consists of an arrival rate function and a latency for each task sequence, and a capacity function for the shared resource. The interface specifies that the component guarantees certain task latencies depending on assumptions about task arrival rates and allocated resource capacities. Our algebra defines compatibility and refinement relations on interfaces. Interface compatibility can be checked on partial designs, even when some component interfaces are yet unknown. In this case interface composition computes as new assumptions the weakest constraints on the unknown components that are necessary to satisfy the specified guarantees. Interface refinement is defined in a way that ensures that compatible interfaces can be refined and implemented independently. Our algebra thus formalizes an interface-based design methodology that supports both the incremental addition of new components and the independent stepwise refinement of existing components. We demonstrate the flexibility and efficiency of the framework through simulation experiments.},
author = {Thomas Henzinger and Matic, Slobodan},
pages = {253 -- 266},
publisher = {IEEE},
title = {{An interface algebra for real-time components}},
doi = {10.1109/RTAS.2006.11},
year = {2006},
}
@inproceedings{4437,
abstract = {The synthesis of reactive systems requires the solution of two-player games on graphs with ω-regular objectives. When the objective is specified by a linear temporal logic formula or nondeterministic Büchi automaton, then previous algorithms for solving the game require the construction of an equivalent deterministic automaton. However, determinization for automata on infinite words is extremely complicated, and current implementations fail to produce deterministic automata even for relatively small inputs. We show how to construct, from a given nondeterministic Büchi automaton, an equivalent nondeterministic parity automaton that is good for solving games with objective . The main insight is that a nondeterministic automaton is good for solving games if it fairly simulates the equivalent deterministic automaton. In this way, we omit the determinization step in game solving and reactive synthesis. The fact that our automata are nondeterministic makes them surprisingly simple, amenable to symbolic implementation, and allows an incremental search for winning strategies.},
author = {Thomas Henzinger and Piterman, Nir},
pages = {395 -- 410},
publisher = {Springer},
title = {{Solving games without determinization}},
doi = {10.1007/11874683_26},
volume = {4207},
year = {2006},
}
@article{4451,
abstract = {One source of complexity in the μ-calculus is its ability to specify an unbounded number of switches between universal (AX) and existential (EX) branching modes. We therefore study the problems of satisfiability, validity, model checking, and implication for the universal and existential fragments of the μ-calculus, in which only one branching mode is allowed. The universal fragment is rich enough to express most specifications of interest, and therefore improved algorithms are of practical importance. We show that while the satisfiability and validity problems become indeed simpler for the existential and universal fragments, this is, unfortunately, not the case for model checking and implication. We also show the corresponding results for the alternation-free fragment of the μ-calculus, where no alternations between least and greatest fixed points are allowed. Our results imply that efforts to find a polynomial-time model-checking algorithm for the μ-calculus can be replaced by efforts to find such an algorithm for the universal or existential fragment.},
author = {Thomas Henzinger and Kupferman, Orna and Majumdar, Ritankar S},
journal = {Theoretical Computer Science},
number = {2},
pages = {173 -- 186},
publisher = {Elsevier},
title = {{On the universal and existential fragments of the mu-calculus}},
doi = {10.1016/j.tcs.2005.11.015},
volume = {354},
year = {2006},
}
@inproceedings{4523,
abstract = {We consider the problem if a given program satisfies a specified safety property. Interesting programs have infinite state spaces, with inputs ranging over infinite domains, and for these programs the property checking problem is undecidable. Two broad approaches to property checking are testing and verification. Testing tries to find inputs and executions which demonstrate violations of the property. Verification tries to construct a formal proof which shows that all executions of the program satisfy the property. Testing works best when errors are easy to find, but it is often difficult to achieve sufficient coverage for correct programs. On the other hand, verification methods are most successful when proofs are easy to find, but they are often inefficient at discovering errors. We propose a new algorithm, Synergy, which combines testing and verification. Synergy unifies several ideas from the literature, including counterexample-guided model checking, directed testing, and partition refinement.This paper presents a description of the Synergy algorithm, its theoretical properties, a comparison with related algorithms, and a prototype implementation called Yogi.},
author = {Gulavani, Bhargav S and Thomas Henzinger and Kannan, Yamini and Nori, Aditya V and Rajamani, Sriram K},
pages = {117 -- 127},
publisher = {ACM},
title = {{Synergy: A new algorithm for property checking}},
doi = {10.1145/1181775.1181790},
year = {2006},
}
@inproceedings{4526,
abstract = {We designed and implemented a new programming language called Hierarchical Timing Language (HTL) for hard realtime systems. Critical timing constraints are specified within the language,and ensured by the compiler. Programs in HTL are extensible in two dimensions without changing their timing behavior: new program modules can be added, and individual program tasks can be refined. The mechanism supporting time invariance under parallel composition is that different program modules communicate at specified instances of time. Time invariance under refinement is achieved by conservative scheduling of the top level. HTL is a coordination language, in that individual tasks can be implemented in "foreign" languages. As a case study, we present a distributed HTL implementation of an automotive steer-by-wire controller.},
author = {Ghosal, Arkadeb and Thomas Henzinger and Iercan, Daniel and Kirsch, Christoph M and Sangiovanni-Vincentelli, Alberto},
pages = {132 -- 141},
publisher = {ACM},
title = {{A hierarchical coordination language for interacting real-time tasks}},
doi = {10.1145/1176887.1176907},
year = {2006},
}
@inproceedings{4528,
abstract = {Computational modeling of biological systems is becoming increasingly common as scientists attempt to understand biological phenomena in their full complexity. Here we distinguish between two types of biological models mathematical and computational - according to their different representations of biological phenomena and their diverse potential. We call the approach of constructing computational models of biological systems executable biology, as it focuses on the design of executable computer algorithms that mimic biological phenomena. We give an overview of the main modeling efforts in this direction, and discuss some of the new challenges that executable biology poses for computer science and biology. We argue that for executable biology to reach its full potential as a mainstream biological technique, formal and algorithmic approaches must be integrated into biological research, driving biology towards a more precise engineering discipline.},
author = {Fisher, Jasmin and Thomas Henzinger},
pages = {1675 -- 1682},
publisher = {IEEE},
title = {{Executable biology}},
doi = {10.1109/WSC.2006.322942},
year = {2006},
}
@inproceedings{4538,
abstract = {A stochastic graph game is played by two players on a game graph with probabilistic transitions. We consider stochastic graph games with ω-regular winning conditions specified as parity objectives. These games lie in NP ∩ coNP. We present a strategy improvement algorithm for stochastic parity games; this is the first non-brute-force algorithm for solving these games. From the strategy improvement algorithm we obtain a randomized subexponential-time algorithm to solve such games.},
author = {Krishnendu Chatterjee and Thomas Henzinger},
pages = {512 -- 523},
publisher = {Springer},
title = {{Strategy improvement and randomized subexponential algorithms for stochastic parity games}},
doi = {10.1007/11672142_42},
volume = {3884},
year = {2006},
}
@inproceedings{4539,
abstract = {Games on graphs with ω-regular objectives provide a model for the control and synthesis of reactive systems. Every ω-regular objective can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens “eventually.” Two main strengths of the classical, infinite-limit formulation of liveness are robustness (independence from the granularity of transitions) and simplicity (abstraction of complicated time bounds). However, the classical liveness formulation suffers from the drawback that the time until something good happens may be unbounded. A stronger formulation of liveness, so-called finitary liveness, overcomes this drawback, while still retaining robustness and simplicity. Finitary liveness requires that there exists an unknown, fixed bound b such that something good happens within b transitions. While for one-shot liveness (reachability) objectives, classical and finitary liveness coincide, for repeated liveness (Büchi) objectives, the finitary formulation is strictly stronger. In this work we study games with finitary parity and Streett (fairness) objectives. We prove the determinacy of these games, present algorithms for solving these games, and characterize the memory requirements of winning strategies. Our algorithms can be used, for example, for synthesizing controllers that do not let the response time of a system increase without bound.},
author = {Krishnendu Chatterjee and Thomas Henzinger},
pages = {257 -- 271},
publisher = {Springer},
title = {{Finitary winning in omega-regular games}},
doi = {10.1007/11691372_17},
volume = {3920},
year = {2006},
}
@unpublished{573,
abstract = {Mitchison and Jozsa recently suggested that the "chained-Zeno" counterfactual computation protocol recently proposed by Hosten et al. is counterfactual for only one output of the computer. This claim was based on the existing abstract algebraic definition of counterfactual computation, and indeed according to this definition, their argument is correct. However, a more general definition (physically adequate) for counterfactual computation is implicitly assumed by Hosten et. al. Here we explain in detail why the protocol is counterfactual and how the "history tracking" method of the existing description inadequately represents the physics underlying the protocol. Consequently, we propose a modified definition of counterfactual computation. Finally, we comment on one of the most interesting aspects of the error-correcting protocol. },
author = {Hosten, Onur and Rakher, Matthew and Barreiro, Julio and Peters, Nicholas and Kwiat, Paul},
pages = {12},
publisher = {ArXiv},
title = {{Counterfactual computation revisited}},
year = {2006},
}
@unpublished{574,
abstract = {Vaidman, in a recent article adopts the method of 'quantum weak measurements in pre- and postselected ensembles' to ascertain whether or not the chained-Zeno counterfactual computation scheme proposed by Hosten et al. is counterfactual; which has been the topic of a debate on the definition of counterfactuality. We disagree with his conclusion, which brings up some interesting aspects of quantum weak measurements and some concerns about the way they are interpreted. },
author = {Hosten, Onur and Kwiat, Paul},
pages = {2},
publisher = {ArXiv},
title = {{Weak measurements and counterfactual computation}},
year = {2006},
}
@inproceedings{577,
abstract = {Visible light photon counters (VLPCs) and solid-state photomultipliers (SSPMs) are high-efficiency single-photon detectors which have multi-photon counting capability. While both the VLPCs and the SSPMs have inferred internal quantum efficiencies above 93%, the actual measured values for both the detectors were in fact limited to less than 88%, attributed to in-coupling losses. We are currently improving this overall detection efficiency via a) custom anti-reflection coating the detectors and the in-coupling fibers, b) implementing a novel cryogenic design to reduce transmission losses and, c) using low-noise electronics to obtain a better signal-to-noise ratio.},
author = {Rangarajan, Radhika and Altepeter, Joseph B and Jeffrey, Evan R and Stoutimore, Micah J and Peters, Nicholas A and Onur Hosten and Kwiat, Paul G},
publisher = {SPIE},
title = {{High-efficiency single-photon detectors}},
doi = {10.1117/12.686117},
volume = {6372},
year = {2006},
}
@inproceedings{578,
abstract = {A source of single photons allows secure quantum key distribution, in addition, to being a critical resource for linear optics quantum computing. We describe our progress on deterministically creating single photons from spontaneous parametric downconversion, an extension of the Pittman, Jacobs and Franson scheme [Phys. Rev A, v66, 042303 (2002)]. Their idea was to conditionally prepare single photons by measuring one member of a spontaneously emitted photon pair and storing the remaining conditionally prepared photon until a predetermined time, when it would be "deterministically" released from storage. Our approach attempts to improve upon this by recycling the pump pulse in order to decrease the possibility of multiple-pair generation, while maintaining a high probability of producing a single pair. Many of the challenges we discuss are central to other quantum information technologies, including the need for low-loss optical storage, switching and detection, and fast feed-forward control.},
author = {Peters, Nicholas A and Arnold, Keith J and VanDevender, Aaron P and Jeffrey, Evan R and Rangarajan, Radhika and Onur Hosten and Barreiro, Julio T and Altepeter, Joseph B and Kwiat, Paul G},
publisher = {SPIE},
title = {{Towards a quasi-deterministic single-photon source}},
doi = {10.1117/12.684702},
volume = {6305},
year = {2006},
}