@article{3979,
abstract = {Protein-protein interactions, which form the basis for most cellular processes, result in the formation of protein interfaces. Believing that the local shape of proteins is crucial, we take a geometric approach and present a definition of an interface surface formed by two or more proteins as a subset of their Voronoi diagram. The definition deals with the difficult and important problem of specifying interface boundaries by invoking methods used in the alpha shape representation of molecules, the discrete flow on Delaunay simplices to define pockets and reconstruct surfaces, and the assessment of the importance of topological features. We present an algorithm to construct the surface and define a hierarchy that distinguishes core and peripheral regions. This hierarchy is shown to have correlation with hot-spots in protein-protein interactions. Finally, we study the geometric and topological properties of interface surfaces and show their high degree of contortion.},
author = {Ban, Yih-En Andrew and Herbert Edelsbrunner and Rudolph, Johannes},
journal = {Journal of the ACM},
number = {3},
pages = {361 -- 378},
publisher = {ACM},
title = {{Interface surfaces for protein-protein complexes}},
doi = {10.1145/1147954.1147957},
volume = {53},
year = {2006},
}
@article{3980,
abstract = {Given a smoothly embedded 2-manifold in R-3, we define the elevation of a point as the height difference to a canonically defined second point on the same manifold. Our definition is invariant under rigid motions and can be used to define features such as lines of discontinuous or continuous but non-smooth elevation. We give an algorithm for finding points of locally maximum elevation, which we suggest mark cavities and protrusions and are useful in matching shapes as for example in protein docking.},
author = {Agarwal, Pankaj K and Herbert Edelsbrunner and Harer, John and Wang, Yusu},
journal = {Discrete & Computational Geometry},
number = {4},
pages = {553 -- 572},
publisher = {Springer},
title = {{Extreme elevation on a 2-manifold}},
doi = {10.1007/s00454-006-1265-8},
volume = {36},
year = {2006},
}
@article{4235,
author = {Harold Vladar and González,J. A},
journal = {Journal of Theoretical Biology},
pages = {91 -- 109},
publisher = {Elsevier},
title = {{Dynamic response of cancer under the influence of immunological activity and therapy}},
year = {2006},
}
@article{4248,
abstract = {In finite populations, genetic drift generates interference between selected loci, causing advantageous alleles to be found more often on different chromosomes than on the same chromosome, which reduces the rate of adaptation. This “Hill–Robertson effect” generates indirect selection to increase recombination rates. We present a new method to quantify the strength of this selection. Our model represents a new beneficial allele (A) entering a population as a single copy, while another beneficial allele (B) is sweeping at another locus. A third locus affects the recombination rate between selected loci. Using a branching process model, we calculate the probability distribution of the number of copies of A on the different genetic backgrounds, after it is established but while it is still rare. Then, we use a deterministic model to express the change in frequency of the recombination modifier, due to hitchhiking, as A goes to fixation. We show that this method can give good estimates of selection for recombination. Moreover, it shows that recombination is selected through two different effects: it increases the fixation probability of new alleles, and it accelerates selective sweeps. The relative importance of these two effects depends on the relative times of occurrence of the beneficial alleles.},
author = {Roze, Denis and Nicholas Barton},
journal = {Genetics},
number = {3},
pages = {1793 -- 1811},
publisher = {Genetics Society of America},
title = {{The Hill-Robertson effect and the evolution of recombination}},
doi = {10.1534/genetics.106.058586 },
volume = {173},
year = {2006},
}
@misc{4250,
abstract = {A recent analysis has shown that divergence between human and chimpanzee varies greatly across the genome. Although this is consistent with ‘hybridisation’ between the diverging human and chimp lineages, such observations can be explained more simply by the null model of allopatric speciation.},
author = {Nicholas Barton},
booktitle = {Current Biology},
number = {16},
pages = {647 -- 650},
publisher = {Cell Press},
title = {{Evolutionary Biology: How did the human species form?}},
doi = {10.1016/j.cub.2006.07.032},
volume = {16},
year = {2006},
}
@article{4345,
abstract = {Der Artikel beschäftigt sich mit dem Konzept der Bibliothek 2.0 (bzw. Library 2.0). Er skizziert anhand einiger Beispiele die Entwicklung zum Web 2.0 und beschreibt, wie Web 2.0-Technologien und -Anwendungen in Bibliotheken eingesetzt werden. Im Mittelpunkt stehen Social-Tagging-Systeme, benutzerorientierte Erweiterungen von Bibliothekskatalogen und Dokumentenservern sowie der Einsatz von Weblogs an Bibliotheken. Ferner werden neue Anforderungen an Bibliothekare diskutiert.},
author = {Patrick Danowski and Heller,Lambert},
journal = {Bibliotheksdienst},
number = {11},
pages = {1250 -- 1271},
publisher = {Zentral- und Landesbibliothek Berlin},
title = {{Bibliothek 2.0 - Die Bibliothek der Zukunft?}},
doi = {424},
volume = {40},
year = {2006},
}
@article{4351,
abstract = {BACKGROUND: Character mapping on phylogenies has played an important, if not critical role, in our understanding of molecular, morphological, and behavioral evolution. Until very recently we have relied on parsimony to infer character changes. Parsimony has a number of serious limitations that are drawbacks to our understanding. Recent statistical methods have been developed that free us from these limitations enabling us to overcome the problems of parsimony by accommodating uncertainty in evolutionary time, ancestral states, and the phylogeny. RESULTS: SIMMAP has been developed to implement stochastic character mapping that is useful to both molecular evolutionists, systematists, and bioinformaticians. Researchers can address questions about positive selection, patterns of amino acid substitution, character association, and patterns of morphological evolution. CONCLUSION: Stochastic character mapping, as implemented in the SIMMAP software, enables users to address questions that require mapping characters onto phylogenies using a probabilistic approach that does not rely on parsimony. Analyses can be performed using a fully Bayesian approach that is not reliant on considering a single topology, set of substitution model parameters, or reconstruction of ancestral states. Uncertainty in these quantities is accommodated by using MCMC samples from their respective posterior distributions.},
author = {Jonathan Bollback},
journal = {BMC Bioinformatics},
publisher = {BioMed Central},
title = {{SIMMAP: stochastic character mapping of discrete traits on phylogenies}},
doi = {10.1186/1471-2105-7-88},
volume = {7},
year = {2006},
}
@article{4352,
abstract = {Anopheles darlingi is the primary malaria vector in Latin America, and is especially important in Amazonian Brazil. Historically, control efforts have been focused on indoor house spraying using a variety of insecticides, but since the mid-1990s there has been a shift to patient treatment and focal insecticide fogging. Anopheles darlingi was believed to have been significantly reduced in a gold-mining community, Peixoto de Azevedo (in Mato Grosso State), in the early 1990s by insecticide use during a severe malaria epidemic. In contrast, although An. darlingi was eradicated from some districts of the city of Belem (the capital of Para State) in 1968 to reduce malaria, populations around the water protection area in the eastern district were treated only briefly. To investigate the population structure of An. darlingi including evidence for a population bottleneck in Peixoto, we analyzed eight microsatellite loci of 256 individuals from seven locations in Brazil: three in Amapa State, three in Para State, and one in Mato Grosso State. Allelic diversity and mean expected heterozygosity were high for all populations (mean number alleles/locus and H(E) were 13.5 and 0.834, respectively) and did not differ significantly between locations. Significant heterozygote deficits were associated with linkage disequilibrium, most likely due to either the Wahlund effect or selection. We found no evidence for a population bottleneck in Peixoto, possibly because the reduction was not extreme enough to be detected. Overall estimates of long-term N(e) varied from 92.4 individuals under the linkage disequilibrium model to infinity under the heterozygote excess model. Fixation indices and analysis of molecular variance demonstrated significant differentiation between locations north and south of the Amazon River, suggesting a degree of genetic isolation between them, attributed to isolation by distance.},
author = {Conn, Jan E and Vineis, Joseph H and Jonathan Bollback and Onyabe, David Y and Wilkerson, Richard C and Povoa, Marinete M},
journal = {The American Journal of Tropical Medicine and Hygiene},
number = {5},
pages = {798 -- 806},
publisher = {American Society of Tropical Medicine and Hygiene},
title = {{Population structure of the malaria vector Anopheles darlingi in a malaria-endemic region of eastern Amazonian Brazil}},
volume = {74},
year = {2006},
}
@inproceedings{4359,
author = {Thomas Wies and Kuncak, Viktor and Lam,Patrick and Podelski,Andreas and Rinard,Martin},
pages = {157 -- 173},
publisher = {Springer},
title = {{Field Constraint Analysis}},
doi = {1551},
year = {2006},
}
@inproceedings{4373,
author = {Maler, Oded and Dejan Nickovic and Pnueli,Amir},
pages = {2 -- 16},
publisher = {Springer},
title = {{Real Time Temporal Logic: Past, Present, Future}},
doi = {1571},
year = {2006},
}
@inproceedings{4374,
author = {Maler, Oded and Dejan Nickovic and Pnueli,Amir},
pages = {274 -- 289},
publisher = {Springer},
title = {{From MITL to Timed Automata}},
doi = {1570},
year = {2006},
}
@inproceedings{4401,
author = {Alur, Rajeev and Pavol Cerny and Zdancewic,Steve},
pages = {107 -- 118},
publisher = {Springer},
title = {{Preserving Secrecy Under Refinement}},
doi = {1543},
year = {2006},
}
@inproceedings{4406,
abstract = {We propose and evaluate a new algorithm for checking the universality of nondeterministic finite automata. In contrast to the standard algorithm, which uses the subset construction to explicitly determinize the automaton, we keep the determinization step implicit. Our algorithm computes the least fixed point of a monotone function on the lattice of antichains of state sets. We evaluate the performance of our algorithm experimentally using the random automaton model recently proposed by Tabakov and Vardi. We show that on the difficult instances of this probabilistic model, the antichain algorithm outperforms the standard one by several orders of magnitude. We also show how variations of the antichain method can be used for solving the language-inclusion problem for nondeterministic finite automata, and the emptiness problem for alternating finite automata.},
author = {De Wulf, Martin and Doyen, Laurent and Thomas Henzinger and Raskin, Jean-François},
pages = {17 -- 30},
publisher = {Springer},
title = {{Antichains: A new algorithm for checking universality of finite automata}},
doi = {10.1007/11817963_5},
volume = {4144},
year = {2006},
}
@inproceedings{4431,
abstract = {We summarize some current trends in embedded systems design and point out some of their characteristics, such as the chasm between analytical and computational models, and the gap between safety-critical and best-effort engineering practices. We call for a coherent scientific foundation for embedded systems design, and we discuss a few key demands on such a foundation: the need for encompassing several manifestations of heterogeneity, and the need for constructivity in design. We believe that the development of a satisfactory Embedded Systems Design Science provides a timely challenge and opportunity for reinvigorating computer science.},
author = {Thomas Henzinger and Sifakis, Joseph},
pages = {1 -- 15},
publisher = {Springer},
title = {{The embedded systems design challenge}},
doi = {10.1007/11813040_1},
volume = {4085},
year = {2006},
}
@inproceedings{4432,
abstract = {We add freeze quantifiers to the game logic ATL in order to specify real-time objectives for games played on timed structures. We define the semantics of the resulting logic TATL by restricting the players to physically meaningful strategies, which do not prevent time from diverging. We show that TATL can be model checked over timed automaton games. We also specify timed optimization problems for physically meaningful strategies, and we show that for timed automaton games, the optimal answers can be approximated to within any degree of precision.},
author = {Thomas Henzinger and Prabhu, Vinayak S},
pages = {1 -- 17},
publisher = {Springer},
title = {{Timed alternating-time temporal logic}},
doi = {10.1007/11867340_1},
volume = {4202},
year = {2006},
}
@inproceedings{4436,
abstract = {We present an assume-guarantee interface algebra for real-time components. In our formalism a component implements a set of task sequences that share a resource. A component interface consists of an arrival rate function and a latency for each task sequence, and a capacity function for the shared resource. The interface specifies that the component guarantees certain task latencies depending on assumptions about task arrival rates and allocated resource capacities. Our algebra defines compatibility and refinement relations on interfaces. Interface compatibility can be checked on partial designs, even when some component interfaces are yet unknown. In this case interface composition computes as new assumptions the weakest constraints on the unknown components that are necessary to satisfy the specified guarantees. Interface refinement is defined in a way that ensures that compatible interfaces can be refined and implemented independently. Our algebra thus formalizes an interface-based design methodology that supports both the incremental addition of new components and the independent stepwise refinement of existing components. We demonstrate the flexibility and efficiency of the framework through simulation experiments.},
author = {Thomas Henzinger and Matic, Slobodan},
pages = {253 -- 266},
publisher = {IEEE},
title = {{An interface algebra for real-time components}},
doi = {10.1109/RTAS.2006.11},
year = {2006},
}
@inproceedings{4437,
abstract = {The synthesis of reactive systems requires the solution of two-player games on graphs with ω-regular objectives. When the objective is specified by a linear temporal logic formula or nondeterministic Büchi automaton, then previous algorithms for solving the game require the construction of an equivalent deterministic automaton. However, determinization for automata on infinite words is extremely complicated, and current implementations fail to produce deterministic automata even for relatively small inputs. We show how to construct, from a given nondeterministic Büchi automaton, an equivalent nondeterministic parity automaton that is good for solving games with objective . The main insight is that a nondeterministic automaton is good for solving games if it fairly simulates the equivalent deterministic automaton. In this way, we omit the determinization step in game solving and reactive synthesis. The fact that our automata are nondeterministic makes them surprisingly simple, amenable to symbolic implementation, and allows an incremental search for winning strategies.},
author = {Thomas Henzinger and Piterman, Nir},
pages = {395 -- 410},
publisher = {Springer},
title = {{Solving games without determinization}},
doi = {10.1007/11874683_26},
volume = {4207},
year = {2006},
}
@article{4451,
abstract = {One source of complexity in the μ-calculus is its ability to specify an unbounded number of switches between universal (AX) and existential (EX) branching modes. We therefore study the problems of satisfiability, validity, model checking, and implication for the universal and existential fragments of the μ-calculus, in which only one branching mode is allowed. The universal fragment is rich enough to express most specifications of interest, and therefore improved algorithms are of practical importance. We show that while the satisfiability and validity problems become indeed simpler for the existential and universal fragments, this is, unfortunately, not the case for model checking and implication. We also show the corresponding results for the alternation-free fragment of the μ-calculus, where no alternations between least and greatest fixed points are allowed. Our results imply that efforts to find a polynomial-time model-checking algorithm for the μ-calculus can be replaced by efforts to find such an algorithm for the universal or existential fragment.},
author = {Thomas Henzinger and Kupferman, Orna and Majumdar, Ritankar S},
journal = {Theoretical Computer Science},
number = {2},
pages = {173 -- 186},
publisher = {Elsevier},
title = {{On the universal and existential fragments of the mu-calculus}},
doi = {10.1016/j.tcs.2005.11.015},
volume = {354},
year = {2006},
}
@inproceedings{4523,
abstract = {We consider the problem if a given program satisfies a specified safety property. Interesting programs have infinite state spaces, with inputs ranging over infinite domains, and for these programs the property checking problem is undecidable. Two broad approaches to property checking are testing and verification. Testing tries to find inputs and executions which demonstrate violations of the property. Verification tries to construct a formal proof which shows that all executions of the program satisfy the property. Testing works best when errors are easy to find, but it is often difficult to achieve sufficient coverage for correct programs. On the other hand, verification methods are most successful when proofs are easy to find, but they are often inefficient at discovering errors. We propose a new algorithm, Synergy, which combines testing and verification. Synergy unifies several ideas from the literature, including counterexample-guided model checking, directed testing, and partition refinement.This paper presents a description of the Synergy algorithm, its theoretical properties, a comparison with related algorithms, and a prototype implementation called Yogi.},
author = {Gulavani, Bhargav S and Thomas Henzinger and Kannan, Yamini and Nori, Aditya V and Rajamani, Sriram K},
pages = {117 -- 127},
publisher = {ACM},
title = {{Synergy: A new algorithm for property checking}},
doi = {10.1145/1181775.1181790},
year = {2006},
}
@inproceedings{4526,
abstract = {We designed and implemented a new programming language called Hierarchical Timing Language (HTL) for hard realtime systems. Critical timing constraints are specified within the language,and ensured by the compiler. Programs in HTL are extensible in two dimensions without changing their timing behavior: new program modules can be added, and individual program tasks can be refined. The mechanism supporting time invariance under parallel composition is that different program modules communicate at specified instances of time. Time invariance under refinement is achieved by conservative scheduling of the top level. HTL is a coordination language, in that individual tasks can be implemented in "foreign" languages. As a case study, we present a distributed HTL implementation of an automotive steer-by-wire controller.},
author = {Ghosal, Arkadeb and Thomas Henzinger and Iercan, Daniel and Kirsch, Christoph M and Sangiovanni-Vincentelli, Alberto},
pages = {132 -- 141},
publisher = {ACM},
title = {{A hierarchical coordination language for interacting real-time tasks}},
doi = {10.1145/1176887.1176907},
year = {2006},
}
@inproceedings{4528,
abstract = {Computational modeling of biological systems is becoming increasingly common as scientists attempt to understand biological phenomena in their full complexity. Here we distinguish between two types of biological models mathematical and computational - according to their different representations of biological phenomena and their diverse potential. We call the approach of constructing computational models of biological systems executable biology, as it focuses on the design of executable computer algorithms that mimic biological phenomena. We give an overview of the main modeling efforts in this direction, and discuss some of the new challenges that executable biology poses for computer science and biology. We argue that for executable biology to reach its full potential as a mainstream biological technique, formal and algorithmic approaches must be integrated into biological research, driving biology towards a more precise engineering discipline.},
author = {Fisher, Jasmin and Thomas Henzinger},
pages = {1675 -- 1682},
publisher = {IEEE},
title = {{Executable biology}},
doi = {10.1109/WSC.2006.322942},
year = {2006},
}
@inproceedings{4538,
abstract = {A stochastic graph game is played by two players on a game graph with probabilistic transitions. We consider stochastic graph games with ω-regular winning conditions specified as parity objectives. These games lie in NP ∩ coNP. We present a strategy improvement algorithm for stochastic parity games; this is the first non-brute-force algorithm for solving these games. From the strategy improvement algorithm we obtain a randomized subexponential-time algorithm to solve such games.},
author = {Krishnendu Chatterjee and Thomas Henzinger},
pages = {512 -- 523},
publisher = {Springer},
title = {{Strategy improvement and randomized subexponential algorithms for stochastic parity games}},
doi = {10.1007/11672142_42},
volume = {3884},
year = {2006},
}
@inproceedings{4539,
abstract = {Games on graphs with ω-regular objectives provide a model for the control and synthesis of reactive systems. Every ω-regular objective can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens “eventually.” Two main strengths of the classical, infinite-limit formulation of liveness are robustness (independence from the granularity of transitions) and simplicity (abstraction of complicated time bounds). However, the classical liveness formulation suffers from the drawback that the time until something good happens may be unbounded. A stronger formulation of liveness, so-called finitary liveness, overcomes this drawback, while still retaining robustness and simplicity. Finitary liveness requires that there exists an unknown, fixed bound b such that something good happens within b transitions. While for one-shot liveness (reachability) objectives, classical and finitary liveness coincide, for repeated liveness (Büchi) objectives, the finitary formulation is strictly stronger. In this work we study games with finitary parity and Streett (fairness) objectives. We prove the determinacy of these games, present algorithms for solving these games, and characterize the memory requirements of winning strategies. Our algorithms can be used, for example, for synthesizing controllers that do not let the response time of a system increase without bound.},
author = {Krishnendu Chatterjee and Thomas Henzinger},
pages = {257 -- 271},
publisher = {Springer},
title = {{Finitary winning in omega-regular games}},
doi = {10.1007/11691372_17},
volume = {3920},
year = {2006},
}
@inproceedings{4549,
abstract = {We present a compositional theory of system verification, where specifications assign real-numbered costs to systems. These costs can express a wide variety of quantitative system properties, such as resource consumption, price, or a measure of how well a system satisfies its specification. The theory supports the composition of systems and specifications, and the hiding of variables. Boolean refinement relations are replaced by real-numbered distances between descriptions of a system at different levels of detail. We show that the classical Boolean rules for compositional reasoning have quantitative counterparts in our setting. While our general theory allows costs to be specified by arbitrary cost functions, we also consider a class of linear cost functions, which give rise to an instance of our framework where all operations are computable in polynomial time.},
author = {Krishnendu Chatterjee and de Alfaro, Luca and Faella, Marco and Thomas Henzinger and Majumdar, Ritankar S and Stoelinga, Mariëlle},
pages = {179 -- 188},
publisher = {IEEE},
title = {{Compositional quantitative reasoning}},
doi = {10.1109/QEST.2006.11},
year = {2006},
}
@article{4550,
abstract = {In 2-player non-zero-sum games, Nash equilibria capture the options for rational behavior if each player attempts to maximize her payoff. In contrast to classical game theory, we consider lexicographic objectives: first, each player tries to maximize her own payoff, and then, the player tries to minimize the opponent's payoff. Such objectives arise naturally in the verification of systems with multiple components. There, instead of proving that each component satisfies its specification no matter how the other components behave, it sometimes suffices to prove that each component satisfies its specification provided that the other components satisfy their specifications. We say that a Nash equilibrium is secure if it is an equilibrium with respect to the lexicographic objectives of both players. We prove that in graph games with Borel winning conditions, which include the games that arise in verification, there may be several Nash equilibria, but there is always a unique maximal payoff profile of a secure equilibrium. We show how this equilibrium can be computed in the case of ω-regular winning conditions, and we characterize the memory requirements of strategies that achieve the equilibrium.},
author = {Krishnendu Chatterjee and Thomas Henzinger and Jurdziński, Marcin},
journal = {Theoretical Computer Science},
number = {1-2},
pages = {67 -- 82},
publisher = {Elsevier},
title = {{Games with secure equilibria}},
doi = {10.1016/j.tcs.2006.07.032},
volume = {365},
year = {2006},
}
@inproceedings{4551,
abstract = {We consider Markov decision processes (MDPs) with multiple discounted reward objectives. Such MDPs occur in design problems where one wishes to simultaneously optimize several criteria, for example, latency and power. The possible trade-offs between the different objectives are characterized by the Pareto curve. We show that every Pareto-optimal point can be achieved by a memoryless strategy; however, unlike in the single-objective case, the memoryless strategy may require randomization. Moreover, we show that the Pareto curve can be approximated in polynomial time in the size of the MDP. Additionally, we study the problem if a given value vector is realizable by any strategy, and show that it can be decided in polynomial time; but the question whether it is realizable by a deterministic memoryless strategy is NP-complete. These results provide efficient algorithms for design exploration in MDP models with multiple objectives.
This research was supported in part by the AFOSR MURI grant F49620-00-1-0327, and the NSF grants CCR-0225610, CCR-0234690, and CCR-0427202. },
author = {Krishnendu Chatterjee and Majumdar, Ritankar S and Thomas Henzinger},
pages = {325 -- 336},
publisher = {Springer},
title = {{Markov decision processes with multiple objectives}},
doi = {10.1007/11672142_26},
volume = {3884},
year = {2006},
}
@inproceedings{4552,
abstract = {A concurrent reachability game is a two-player game played on a graph: at each state, the players simultaneously and independently select moves; the two moves determine jointly a probability distribution over the successor states. The objective for player 1 consists in reaching a set of target states; the objective for player 2 is to prevent this, so that the game is zero-sum. Our contributions are two-fold. First, we present a simple proof of the fact that in concurrent reachability games, for all epsilon > 0, memoryless epsilon-optimal strategies exist. A memoryless strategy is independent of the history of plays, and an epsilon-optimal strategy achieves the objective with probability within epsilon of the value of the game. In contrast to previous proofs of this fact, which rely on the limit behavior of discounted games using advanced Puisieux series analysis, our proof is elementary and combinatorial. Second, we present a strategy-improvement (a.k.a. policy-iteration) algorithm for concurrent games with reachability objectives.},
author = {Krishnendu Chatterjee and de Alfaro, Luca and Thomas Henzinger},
pages = {291 -- 300},
publisher = {IEEE},
title = {{Strategy improvement for concurrent reachability games}},
doi = {10.1109/QEST.2006.48},
year = {2006},
}
@inproceedings{4574,
abstract = {Many software model checkers are based on predicate abstraction. If the verification goal depends on pointer structures, the approach does not work well, because it is difficult to find adequate predicate abstractions for the heap. In contrast, shape analysis, which uses graph-based heap abstractions, can provide a compact representation of recursive data structures. We integrate shape analysis into the software model checker Blast. Because shape analysis is expensive, we do not apply it globally. Instead, we ensure that, like predicates, shape graphs are computed and stored locally, only where necessary for proving the verification goal. To achieve this, we extend lazy abstraction refinement, which so far has been used only for predicate abstractions, to three-valued logical structures. This approach does not only increase the precision of model checking, but it also increases the efficiency of shape analysis. We implemented the technique by extending Blast with calls to Tvla.},
author = {Beyer, Dirk and Thomas Henzinger and Théoduloz, Grégory},
pages = {532 -- 546},
publisher = {Springer},
title = {{Lazy shape analysis}},
doi = {10.1007/11817963_48},
volume = {4144},
year = {2006},
}
@unpublished{573,
abstract = {Mitchison and Jozsa recently suggested that the "chained-Zeno" counterfactual computation protocol recently proposed by Hosten et al. is counterfactual for only one output of the computer. This claim was based on the existing abstract algebraic definition of counterfactual computation, and indeed according to this definition, their argument is correct. However, a more general definition (physically adequate) for counterfactual computation is implicitly assumed by Hosten et. al. Here we explain in detail why the protocol is counterfactual and how the "history tracking" method of the existing description inadequately represents the physics underlying the protocol. Consequently, we propose a modified definition of counterfactual computation. Finally, we comment on one of the most interesting aspects of the error-correcting protocol. },
author = {Hosten, Onur and Rakher, Matthew and Barreiro, Julio and Peters, Nicholas and Kwiat, Paul},
pages = {12},
publisher = {ArXiv},
title = {{Counterfactual computation revisited}},
year = {2006},
}
@unpublished{574,
abstract = {Vaidman, in a recent article adopts the method of 'quantum weak measurements in pre- and postselected ensembles' to ascertain whether or not the chained-Zeno counterfactual computation scheme proposed by Hosten et al. is counterfactual; which has been the topic of a debate on the definition of counterfactuality. We disagree with his conclusion, which brings up some interesting aspects of quantum weak measurements and some concerns about the way they are interpreted. },
author = {Hosten, Onur and Kwiat, Paul},
pages = {2},
publisher = {ArXiv},
title = {{Weak measurements and counterfactual computation}},
year = {2006},
}
@inproceedings{577,
abstract = {Visible light photon counters (VLPCs) and solid-state photomultipliers (SSPMs) are high-efficiency single-photon detectors which have multi-photon counting capability. While both the VLPCs and the SSPMs have inferred internal quantum efficiencies above 93%, the actual measured values for both the detectors were in fact limited to less than 88%, attributed to in-coupling losses. We are currently improving this overall detection efficiency via a) custom anti-reflection coating the detectors and the in-coupling fibers, b) implementing a novel cryogenic design to reduce transmission losses and, c) using low-noise electronics to obtain a better signal-to-noise ratio.},
author = {Rangarajan, Radhika and Altepeter, Joseph B and Jeffrey, Evan R and Stoutimore, Micah J and Peters, Nicholas A and Onur Hosten and Kwiat, Paul G},
publisher = {SPIE},
title = {{High-efficiency single-photon detectors}},
doi = {10.1117/12.686117},
volume = {6372},
year = {2006},
}
@inproceedings{578,
abstract = {A source of single photons allows secure quantum key distribution, in addition, to being a critical resource for linear optics quantum computing. We describe our progress on deterministically creating single photons from spontaneous parametric downconversion, an extension of the Pittman, Jacobs and Franson scheme [Phys. Rev A, v66, 042303 (2002)]. Their idea was to conditionally prepare single photons by measuring one member of a spontaneously emitted photon pair and storing the remaining conditionally prepared photon until a predetermined time, when it would be "deterministically" released from storage. Our approach attempts to improve upon this by recycling the pump pulse in order to decrease the possibility of multiple-pair generation, while maintaining a high probability of producing a single pair. Many of the challenges we discuss are central to other quantum information technologies, including the need for low-loss optical storage, switching and detection, and fast feed-forward control.},
author = {Peters, Nicholas A and Arnold, Keith J and VanDevender, Aaron P and Jeffrey, Evan R and Rangarajan, Radhika and Onur Hosten and Barreiro, Julio T and Altepeter, Joseph B and Kwiat, Paul G},
publisher = {SPIE},
title = {{Towards a quasi-deterministic single-photon source}},
doi = {10.1117/12.684702},
volume = {6305},
year = {2006},
}
@article{579,
abstract = {The logic underlying the coherent nature of quantum information processing often deviates from intuitive reasoning, leading to surprising effects. Counterfactual computation constitutes a striking example: the potential outcome of a quantum computation can be inferred, even if the computer is not run 1. Relying on similar arguments to interaction-free measurements 2 (or quantum interrogation3), counterfactual computation is accomplished by putting the computer in a superposition of 'running' and 'not running' states, and then interfering the two histories. Conditional on the as-yet-unknown outcome of the computation, it is sometimes possible to counterfactually infer information about the solution. Here we demonstrate counterfactual computation, implementing Grover's search algorithm with an all-optical approach4. It was believed that the overall probability of such counterfactual inference is intrinsically limited1,5, so that it could not perform better on average than random guesses. However, using a novel 'chained' version of the quantum Zeno effect6, we show how to boost the counterfactual inference probability to unity, thereby beating the random guessing limit. Our methods are general and apply to any physical system, as illustrated by a discussion of trapped-ion systems. Finally, we briefly show that, in certain circumstances, counterfactual computation can eliminate errors induced by decoherence. },
author = {Onur Hosten and Rakher, Matthew T and Barreiro, Julio T and Peters, Nicholas A and Kwiat, Paul G},
journal = {Nature},
number = {7079},
pages = {949 -- 952},
publisher = {Nature Publishing Group},
title = {{Counterfactual quantum computation through quantum interrogation}},
doi = {10.1038/nature04523},
volume = {439},
year = {2006},
}
@inproceedings{583,
abstract = {Visible light photon counters (VLPCs) and solid-state photomultipliers (SSPMs) facilitate efficient single-photon detection. We are attempting to improve their efficiency, previously limited to < 88% by coupling losses, via anti-reflection coatings, better electronics and cryogenics.},
author = {Rangarajan, Radhika and Peters, Nicholas A and Onur Hosten and Altepeter, Joseph B and Jeffrey, Evan R and Kwiat, Paul G},
publisher = {IEEE},
title = {{Improved single-photon detection}},
doi = {10.1109/CLEO.2006.4628641},
year = {2006},
}
@article{6151,
author = {Salecker, Iris and Häusser, Michael and de Bono, Mario},
issn = {1469-221X},
journal = {EMBO reports},
number = {6},
pages = {585--589},
publisher = {Wiley},
title = {{On the axonal road to circuit function and behaviour: Workshop on the assembly and function of neuronal circuits}},
doi = {10.1038/sj.embor.7400713},
volume = {7},
year = {2006},
}
@article{6152,
author = {Rogers, Candida and Persson, Annelie and Cheung, Benny and de Bono, Mario},
issn = {0960-9822},
journal = {Current Biology},
number = {7},
pages = {649--659},
publisher = {Elsevier},
title = {{Behavioral motifs and neural pathways coordinating O2 responses and aggregation in C. elegans}},
doi = {10.1016/j.cub.2006.03.023},
volume = {16},
year = {2006},
}
@article{2657,
abstract = {The highest densities of the two metabotropic GABA subunits, GABA B1 and GABAB2, have been reported as occurring around the glutamatergic synapses between Purkinje cell spines and parallel fibre varicosities. In order to determine how this distribution is achieved during development, we investigated the expression pattern and the cellular and subcellular localization of the GABAB1 and GABAB2 subunits in the rat cerebellum during postnatal development. At the light microscopic level, immunoreactivity for the GABAB1 and GABAB2 subunits was very prominent in the developing molecular layer, especially in Purkinje cells. Using double immunofluorescence, we demonstrated that GABAB1 was transiently expressed in glial cells. At the electron microscopic level, immunoreactivity for GABAB receptors was always detected both pre- and postsynaptically. Presynaptically, GABAB1 and GABAB2 were localized in the extrasynaptic membrane of parallel fibres at all ages, and only rarely in GABAergic axons. Postsynaptically, GABAB receptors were localized to the extrasynaptic and perisynaptic plasma membrane of Purkinje cell dendrites and spines throughout development. Quantitative analysis and three-dimensional reconstructions further revealed a progressive developmental movement of the GABAB1 subunit on the surface of Purkinje cells from dendritic shafts to its final destination, the dendritic spines. Together, these results indicate that GABAB receptors undergo dynamic regulation during cerebellar development in association with the establishment and maturation of glutamatergic synapses to Purkinje cells.},
author = {Luján, Rafael and Ryuichi Shigemoto},
journal = {European Journal of Neuroscience},
number = {6},
pages = {1479 -- 1490},
publisher = {Wiley-Blackwell},
title = {{Localization of metabotropic GABA receptor subunits GABAB1 and GABAB2 relative to synaptic sites in the rat developing cerebellum}},
doi = {10.1111/j.1460-9568.2006.04669.x},
volume = {23},
year = {2006},
}
@article{2659,
abstract = {Transmembrane AMPA receptor regulatory proteins (TARPs), including stargazin/γ-2, are associated with AMPA receptors and participate in their surface delivery and anchoring at the postsynaptic membrane. TARPs may also act as a positive modulator of the AMPA receptor ion channel function; however, little is known about other TARP members except for stargazin/γ-2. We examined the synaptic localization of stargazin/γ-2 and γ-8 by immunoelectron microscopy and biochemical analysis. The analysis of sodium dodecyl sulfate-digested freeze-fracture replica labeling revealed that stargazin/γ-2 was concentrated in the postsynaptic area, whereas γ-8 was distributed both in synaptic and extra-synaptic plasma membranes of the hippocampal neuron. When a synaptic plasma membrane-enriched brain fraction was treated with Triton X-100 and separated by sucrose density gradient ultracentrifugation, a large proportion of NMDA receptor and stargazin/γ-2 was accumulated in raft-enriched fractions, whereas AMPA receptor and γ-8 were distributed in both the raft-enriched fractions and other Triton-insoluble fractions. Phosphorylation of stargazin/γ-2 and γ-8 was regulated by different sets of kinases and phosphatases in cultured cortical neurons. These results suggested that stargazin/γ-2 and γ-8 have distinct roles in postsynaptic membranes under the regulation of different intracellular signaling pathways.},
author = {Inamura, Mihoko and Itakura, Makoto and Okamoto, Hirotsugu and Hoka, Sumio and Mizoguchi, Akira and Fukazawa, Yugo and Ryuichi Shigemoto and Yamamori, Saori and Takahashi, Masami},
journal = {Neuroscience Research},
number = {1},
pages = {45 -- 53},
publisher = {Elsevier},
title = {{ Differential localization and regulation of stargazin-like protein, γ-8 and stargazin in the plasma membrane of hippocampal and cortical neurons}},
doi = {10.1016/j.neures.2006.01.004},
volume = {55},
year = {2006},
}
@article{2660,
abstract = {Pavlovian fear conditioning, a simple form of associative learning, is thought to involve the induction of associative, NMDA receptor-dependent long-term potentiation (LTP) in the lateral amygdala. Using a combined genetic and electrophysiological approach, we show here that lack of a specific GABAB receptor subtype, GABAB(1a,2), unmasks a nonassociative, NMDA receptor-independent form of presynaptic LTP at cortico-amygdala afferents. Moreover, the level of presynaptic GABA B(1a,2) receptor activation, and hence the balance between associative and nonassociative forms of LTP, can be dynamically modulated by local inhibitory activity. At the behavioral level, genetic loss of GABA B(1a) results in a generalization of conditioned fear to nonconditioned stimuli. Our findings indicate that presynaptic inhibition through GABAB(1a,2) receptors serves as an activity-dependent constraint on the induction of homosynaptic plasticity, which may be important to prevent the generalization of conditioned fear.},
author = {Shaban, Hamdy and Humeau, Yann and Herry, Cyril and Cassasus, Guillaume and Ryuichi Shigemoto and Ciocchi, Stéphane and Barbieri, Samuel and Van Der Putten, Herman V and Kaupmann, Klemens and Bettler, Bernhard and Lüthi, Andreas},
journal = {Nature Neuroscience},
number = {8},
pages = {1028 -- 1035},
publisher = {Nature Publishing Group},
title = {{Generalization of amygdala LTP and conditioned fear in the absence of presynaptic inhibition}},
doi = {10.1038/nn1732},
volume = {9},
year = {2006},
}
@article{2661,
abstract = {GABAB receptors are the G protein-coupled receptors for the main inhibitory neurotransmitter in the brain, γ-aminobutyric acid (GABA). Molecular diversity in the GABAB system arises from the GABAB1a and GABAB1b subunit isoforms that solely differ in their ectodomains by a pair of sushi repeats that is unique to GABAB1a. Using a combined genetic, physiological, and morphological approach, we now demonstrate that GABAB1 isoforms localize to distinct synaptic sites and convey separate functions in vivo. At hippocampal CA3-to-CA1 synapses, GABAB1a assembles heteroreceptors inhibiting glutamate release, while predominantly GABAB1b mediates postsynaptic inhibition. Electron microscopy reveals a synaptic distribution of GABAB1 isoforms that agrees with the observed functional differences. Transfected CA3 neurons selectively express GABAB1a in distal axons, suggesting that the sushi repeats, a conserved protein interaction motif, specify heteroreceptor localization. The constitutive absence of GABAB1a but not GABAB1b results in impaired synaptic plasticity and hippocampus-dependent memory, emphasizing molecular differences in synaptic GABAB functions.},
author = {Vigot, Réjan and Barbieri, Samuel and Bräuner-Osborne, Hans and Tureček, Rostislav and Ryuichi Shigemoto and Zhang, Yan Ping and Luján, Rafael and Jacobson, Laura H and Biermann, Barbara and Fritschy, Jean-Marc and Vacher, Claire-Marie and Müller, Matthias P and Sansig, Gilles and Guetg, Nicole and Cryan, John F and Kaupmann, Klemens and Gassmann, Martin and Oertner, Thomas G and Bettler, Bernhard},
journal = {Neuron},
number = {4},
pages = {589 -- 601},
publisher = {Elsevier},
title = {{Differential Compartmentalization and Distinct Functions of GABAB Receptor Variants}},
doi = {10.1016/j.neuron.2006.04.014},
volume = {50},
year = {2006},
}
@article{2662,
abstract = {G-protein-coupled inwardly rectifying K+ channels (Kir3 channels) coupled to metabotropic GABAB receptors are essential for the control of neuronal excitation. To determine the distribution of Kir3 channels and their spatial relationship to GABAB receptors on hippocampal pyramidal cells, we used a high-resolution immunocytochemical approach. Immunoreactivity for the Kir3.2 subunit was most abundant postsynaptically and localized to the extrasynaptic plasma membrane of dendritic shafts and spines of principal cells. Quantitative analysis of immunogold particles for Kir3.2 revealed an enrichment of the protein around putative glutamatergic synapses on dendritic spines, similar to that of GABA B1. Consistent with this observation, a high degree of coclustering of Kir3.2 and GABAB1 was revealed around excitatory synapses by the highly sensitive SDS-digested freeze-fracture replica immunolabeling. In contrast, in dendritic shafts receptors and channels were found to be mainly segregated. These results suggest that Kir3.2-containing K+ channels on dendritic spines preferentially mediate the effect of GABA, whereas channels on dendritic shafts are likely to be activated by other neurotransmitters as well. Thus, Kir3 channels, localized to different subcellular compartments of hippocampal principal cells, appear to be differentially involved in synaptic integration in pyramidal cell dendrites.},
author = {Kulik, Ákos and Vida, Imre and Fukazawa, Yugo and Guetg, Nicole and Kasugai, Yu and Marker, Cheryl L and Rigato, Franck and Bettler, Bernhard and Wickman, Kevin D and Frotscher, Michael and Ryuichi Shigemoto},
journal = {Journal of Neuroscience},
number = {16},
pages = {4289 -- 4297},
publisher = {Society for Neuroscience},
title = {{Compartment-dependent colocalization of Kir3.2-containing K+ channels and GABAB receptors in hippocampal pyramidal cells}},
doi = {10.1523/JNEUROSCI.4178-05.2006},
volume = {26},
year = {2006},
}
@article{2663,
abstract = {The rocker mice are hereditary ataxic mutants that carry a point mutation in the gene encoding the CaV2.1 (P/Q-type) Ca2+ channel α1 subunit, and show the mildest symptoms among the reported CaV2.1 mutant mice. We studied the basic characteristics of the rocker mutant Ca2+ channel and their impacts on excitatory synaptic transmission in cerebellar Purkinje cells (PCs). In acutely dissociated PC somas, the rocker mutant channel showed a moderate reduction in Ca2+ channel current density, whereas its kinetics and voltage dependency of gating remained nearly normal. Despite the small changes in channel function, synaptic transmission in the parallel fiber (PF)-PC synapses was severely impaired. The climbing fiber inputs onto PCs showed a moderate impairment but could elicit normal complex spikes. Presynaptic function of the PF-PC synapses, however, was unexpectedly almost normal in terms of paired-pulse facilitation, sensitivity to extracellular Ca2+ concentration and glutamate concentration in synaptic clefts. Electron microscopic analyses including freeze-fracture replica labeling revealed that both the number and density of postsynaptic α-amino-3-hydroxy-5-methyl-4-isoxazolepropionic acid (AMPA) receptors substantially decreased without gross structural changes of the PF-PC synapses. We also observed an abnormal arborization of PC dendrites in young adult rocker mice (∼ 1 month old). These lines of evidence suggest that even a moderate dysfunction of CaV2.1 Ca2+ channel can cause substantial changes in postsynaptic molecular composition of the PF-PC synapses and dendritic structure of PCs.},
author = {Kodama, Takashi and Itsukaichi-Nishida, Yuko and Fukazawa, Yugo and Wakamori, Minoru and Miyata, Mariko and Molnár, Elek and Mori, Yasuo and Ryuichi Shigemoto and Imoto, Keiji},
journal = {European Journal of Neuroscience},
number = {11},
pages = {2993 -- 3007},
publisher = {Wiley-Blackwell},
title = {{A CaV2.1 calcium channel mutation rocker reduces the number of postsynaptic AMPA receptors in parallel fiber-Purkinje cell synapses}},
doi = {10.1111/j.1460-9568.2006.05191.x},
volume = {24},
year = {2006},
}
@misc{2664,
abstract = {Metabotropic glutamate receptors (mGlus) are a family of G-protein-coupled receptors activated by the neurotransmitter glutamate. Molecular cloning has revealed eight different subtypes (mGlu1-8) with distinct molecular and pharmacological properties. Multiplicity in this receptor family is further generated through alternative splicing. mGlus activate a multitude of signalling pathways important for modulating neuronal excitability, synaptic plasticity and feedback regulation of neurotransmitter release. In this review, we summarize anatomical findings (from our work and that of other laboratories) describing their distribution in the central nervous system. Recent evidence regarding the localization of these receptors in peripheral tissues will also be examined. The distinct regional, cellular and subcellular distribution of mGlus in the brain will be discussed in view of their relationship to neurotransmitter release sites and of possible functional implications.},
author = {Ferraguti, Francesco and Ryuichi Shigemoto},
booktitle = {Cell and Tissue Research},
number = {2},
pages = {483 -- 504},
publisher = {Springer},
title = {{Metabotropic glutamate receptors}},
doi = {10.1007/s00441-006-0266-5},
volume = {326},
year = {2006},
}
@article{2745,
abstract = {We consider the dynamics of N boson systems interacting through a pair potential N -1 V a (x i -x j ) where V a (x)=a -3 V(x/a). We denote the solution to the N-particle Schrödinger equation by Ψ N, t . Recall that the Gross-Pitaevskii (GP) equation is a nonlinear Schrödinger equation and the GP hierarchy is an infinite BBGKY hierarchy of equations so that if u t solves the GP equation, then the family of k-particle density matrices [InlineMediaObject not available: see fulltext.] solves the GP hierarchy. Under the assumption that a = Nε for 0 < ε < 3/5, we prove that as N→∞ the limit points of the k-particle density matrices of Ψ N, t are solutions of the GP hierarchy with the coupling constant in the nonlinear term of the GP equation given by ∫ V (x)dx. The uniqueness of the solutions of this hierarchy remains an open question.},
author = {Elgart, Alexander and László Erdös and Schlein, Benjamin and Yau, Horng-Tzer},
journal = {Archive for Rational Mechanics and Analysis},
number = {2},
pages = {265 -- 283},
publisher = {Springer},
title = {{Gross-Pitaevskii equation as the mean field limit of weakly coupled bosons}},
doi = {10.1007/s00205-005-0388-z},
volume = {179},
year = {2006},
}
@inproceedings{2746,
abstract = {We consider random Schrödinger equations on Rd or Zd for d ≥ 3 with uncorrelated, identically distributed random potential. Denote by λ the coupling constant and ψt the solution with initial data ψ0.},
author = {László Erdös and Salmhofer, Manfred and Yau, Horng-Tzer},
pages = {233 -- 257},
publisher = {World Scientific Publishing},
title = {{Towards the quantum Brownian motion}},
doi = {10.1007/3-540-34273-7_18},
volume = {690},
year = {2006},
}
@article{2747,
abstract = {Consider a system of N bosons on the three-dimensional unit torus interacting via a pair potential N 2V(N(x i - x j)) where x = (x i, . . ., x N) denotes the positions of the particles. Suppose that the initial data ψ N,0 satisfies the condition 〈ψ N,0, H 2 Nψ N,0) ≤ C N 2 where H N is the Hamiltonian of the Bose system. This condition is satisfied if ψ N,0 = W Nφ N,t where W N is an approximate ground state to H N and φ N,0 is regular. Let ψ N,t denote the solution to the Schrödinger equation with Hamiltonian H N. Gross and Pitaevskii proposed to model the dynamics of such a system by a nonlinear Schrödinger equation, the Gross-Pitaevskii (GP) equation. The GP hierarchy is an infinite BBGKY hierarchy of equations so that if u t solves the GP equation, then the family of k-particle density matrices ⊗ k |u t?〉 〈 t | solves the GP hierarchy. We prove that as N → ∞ the limit points of the k-particle density matrices of ψ N,t are solutions of the GP hierarchy. Our analysis requires that the N-boson dynamics be described by a modified Hamiltonian that cuts off the pair interactions whenever at least three particles come into a region with diameter much smaller than the typical interparticle distance. Our proof can be extended to a modified Hamiltonian that only forbids at least n particles from coming close together for any fixed n.},
author = {László Erdös and Schlein, Benjamin and Yau, Horng-Tzer},
journal = {Communications on Pure and Applied Mathematics},
number = {12},
pages = {1659 -- 1741},
publisher = {Wiley-Blackwell},
title = {{Derivation of the Gross-Pitaevskii hierarchy for the dynamics of Bose-Einstein condensate}},
doi = {10.1002/cpa.20123},
volume = {59},
year = {2006},
}
@article{2791,
abstract = {Generally, the motion of fluids is smooth and laminar at low speeds but becomes highly disordered and turbulent as the velocity increases. The transition from laminar to turbulent flow can involve a sequence of instabilities in which the system realizes progressively more complicated states, or it can occur suddenly. Once the transition has taken place, it is generally assumed that, under steady conditions, the turbulent state will persist indefinitely. The flow of a fluid down a straight pipe provides a ubiquitous example of a shear flow undergoing a sudden transition from laminar to turbulent motion. Extensive calculations and experimental studies have shown that, at relatively low flow rates, turbulence in pipes is transient, and is characterized by an exponential distribution of lifetimes. They also suggest that for Reynolds numbers exceeding a critical value the lifetime diverges (that is, becomes infinitely large), marking a change from transient to persistent turbulence. Here we present experimental data and numerical calculations covering more than two decades of lifetimes, showing that the lifetime does not in fact diverge but rather increases exponentially with the Reynolds number. This implies that turbulence in pipes is only a transient event (contrary to the commonly accepted view), and that the turbulent and laminar states remain dynamically connected, suggesting avenues for turbulence control.},
author = {Björn Hof and Westerweel, Jerry and Schneider, Tobias M and Eckhardt, Bruno},
journal = {Nature},
number = {7107},
pages = {59 -- 62},
publisher = {Nature Publishing Group},
title = {{Finite lifetime of turbulence in shear flows}},
doi = {10.1038/nature05089},
volume = {443},
year = {2006},
}
@article{2792,
abstract = {Transition to turbulence in pipe flow has posed a riddle in fluid dynamics since the pioneering experiments of Reynolds[1]. Although the laminar flow is linearly stable for all flow rates, practical pipe flows become turbulent at large enough flow speeds. Turbulence arises suddenly and fully without distinct steps and without a clear critical point. The complexity of this problem has puzzled mathematicians, physicists and engineers for more than a century and no satisfactory explanation of this problem has been given. In a very recent theoretical approach it has been suggested that unstable solutions of the Navier Stokes equations may hold the key to understanding this problem. In numerical studies such unstable states have been identified as exact solutions for the idealized case of a pipe with periodic boundary conditions[2, 3]. These solutions have the form of waves extending through the entire pipe and travelling in the streamwise direction at a phase speed close to the bulk velocity of the fluid. With the aid of a recently developed high-speed stereoscopic Particle Image Velocimetry (PIV) system, we were able to observe transients of such unstable solutions in turbulent pipe flow[4].},
author = {Björn Hof and van Doorne, Casimir W and Westerweel, Jerry and Nieuwstadt, Frans T},
journal = {Fluid Mechanics and its Applications},
pages = {109 -- 114},
publisher = {Springer},
title = {{Observation of nonlinear travelling waves in turbulent pipe flow}},
doi = {10.1007/1-4020-4159-4_11},
volume = {78},
year = {2006},
}
@article{2894,
abstract = {IL-10 is a potent anti-inflammatory and immunomodulatory cytokine, exerting major effects in the degree and quality of the immune response. Using a newly generated IL-10 reporter mouse model, which easily allows the study of IL-10 expression from each allele in a single cell, we report here for the first time that IL-10 is predominantly monoallelic expressed in CD4+ T cells. Furthermore, we have compelling evidence that this expression pattern is not due to parental imprinting, allelic exclusion, or strong allelic bias. Instead, our results support a stochastic regulation mechanism, in which the probability to initiate allelic transcription depends on the strength of TCR signaling and subsequent capacity to overcome restrictions imposed by chromatin hypoacetylation. In vivo Ag-experienced T cells show a higher basal probability to transcribe IL-10 when compared with naive cells, yet still show mostly monoallelic IL-10 expression. Finally, statistical analysis on allelic expression data shows transcriptional independence between both alleles. We conclude that CD4+ T cells have a low probability for IL-10 allelic activation resulting in a predominantly monoallelic expression pattern, and that IL-10 expression appears to be stochastically regulated by controlling the frequency of expressing cells, rather than absolute protein levels per cell.},
author = {Calado, Dinis P and Tiago Paixao and Holmberg, Dan and Haury, Matthias},
journal = {Journal of Immunology},
number = {8},
pages = {5358 -- 5364},
publisher = {American Association of Immunologists},
title = {{Stochastic Monoallelic Expression of IL 10 in T Cells}},
doi = {10.4049/jimmunol.177.8.5358 },
volume = {177},
year = {2006},
}
@inbook{2921,
abstract = {Most binocular stereo algorithms assume that all scene elements are visible from both cameras. Scene elements that are visible from only one camera, known as occlusions, pose an important challenge for stereo. Occlusions are important for segmentation, because they appear near discontinuities. However, stereo algorithms tend to ignore occlusions because of their difficulty. One reason is that occlusions require the input images to be treated symmetrically, which complicates the problem formulation. Worse, certain depth maps imply physically impossible scene configurations, and must be excluded from the output. In this chapter we approach the problem of binocular stereo with occlusions from an energy minimization viewpoint. We begin by reviewing traditional stereo methods that do not handle occlusions. If occlusions are ignored, it is easy to formulate the stereo problem as a pixel labeling problem, which leads to an energy function that is common in early vision. This kind of energy function can he minimized using graph cuts, which is a combinatorial optimization technique that has proven to be very effective for low-level vision problems. Motivated by this, we have designed two graph cut stereo algorithms that are designed to handle occlusions. These algorithms produce promising experimental results on real data with ground truth.},
author = {Vladimir Kolmogorov and Zabih, Ramin},
booktitle = {Handbook of Mathematical Models in Computer Vision},
pages = {423 -- 427},
publisher = {Springer},
title = {{Graph cut algorithms for binocular stereo with occlusions}},
doi = {10.1007/0-387-28831-7_26},
year = {2006},
}